A utility to run diskimage-builder undercloud elements on a running host
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2368 lines
94KB

  1. # Copyright 2015 Red Hat Inc.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. from __future__ import print_function
  15. import copy
  16. import errno
  17. import getpass
  18. import glob
  19. import hashlib
  20. import json
  21. import logging
  22. import netaddr
  23. import os
  24. import platform
  25. import re
  26. import socket
  27. import subprocess
  28. import sys
  29. import tempfile
  30. import time
  31. import uuid
  32. from ironicclient import client as ir_client
  33. from keystoneauth1 import session
  34. from keystoneauth1 import exceptions as ks_exceptions
  35. from keystoneclient import discover
  36. import keystoneauth1.identity.generic as ks_auth
  37. from mistralclient.api import client as mistralclient
  38. from mistralclient.api import base as mistralclient_exc
  39. from novaclient import client as novaclient
  40. from novaclient import exceptions
  41. import os_client_config
  42. from oslo_config import cfg
  43. from oslo_utils import netutils
  44. import psutil
  45. import pystache
  46. import six
  47. from swiftclient import client as swiftclient
  48. from instack_undercloud import validator
  49. # Making these values properties on a class allows us to delay their lookup,
  50. # which makes testing code that interacts with these files much easier.
  51. # NOTE(bnemec): The unit tests rely on these paths being in ~. If they are
  52. # ever moved the tests may need to be updated to avoid overwriting real files.
  53. class Paths(object):
  54. @property
  55. def CONF_PATH(self):
  56. return os.path.expanduser('~/undercloud.conf')
  57. # NOTE(bnemec): Deprecated
  58. @property
  59. def ANSWERS_PATH(self):
  60. return os.path.expanduser('~/instack.answers')
  61. @property
  62. def PASSWORD_PATH(self):
  63. return os.path.expanduser('~/undercloud-passwords.conf')
  64. @property
  65. def LOG_FILE(self):
  66. return os.path.expanduser('~/.instack/install-undercloud.log')
  67. @property
  68. def WORKBOOK_PATH(self):
  69. return '/usr/share/openstack-tripleo-common/workbooks'
  70. PATHS = Paths()
  71. DEFAULT_LOG_LEVEL = logging.DEBUG
  72. DEFAULT_LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
  73. DEFAULT_NODE_RESOURCE_CLASS = 'baremetal'
  74. LOG = None
  75. CONF = cfg.CONF
  76. COMPLETION_MESSAGE = """
  77. #############################################################################
  78. Undercloud %(undercloud_operation)s complete.
  79. The file containing this installation's passwords is at
  80. %(password_path)s.
  81. There is also a stackrc file at %(stackrc_path)s.
  82. These files are needed to interact with the OpenStack services, and should be
  83. secured.
  84. #############################################################################
  85. """
  86. FAILURE_MESSAGE = """
  87. #############################################################################
  88. Undercloud %(undercloud_operation)s failed.
  89. Reason: %(exception)s
  90. See the previous output for details about what went wrong. The full install
  91. log can be found at %(log_file)s.
  92. #############################################################################
  93. """
  94. # We need 8 GB, leave a little room for variation in what 8 GB means on
  95. # different platforms.
  96. REQUIRED_MB = 7680
  97. # Control plane network name
  98. PHYSICAL_NETWORK = 'ctlplane'
  99. SUBNETS_DEFAULT = ['ctlplane-subnet']
  100. # Deprecated options
  101. _deprecated_opt_network_gateway = [cfg.DeprecatedOpt(
  102. 'network_gateway', group='DEFAULT')]
  103. _deprecated_opt_network_cidr = [cfg.DeprecatedOpt(
  104. 'network_cidr', group='DEFAULT')]
  105. _deprecated_opt_dhcp_start = [cfg.DeprecatedOpt(
  106. 'dhcp_start', group='DEFAULT')]
  107. _deprecated_opt_dhcp_end = [cfg.DeprecatedOpt('dhcp_end', group='DEFAULT')]
  108. _deprecated_opt_inspection_iprange = [cfg.DeprecatedOpt(
  109. 'inspection_iprange', group='DEFAULT')]
  110. # When adding new options to the lists below, make sure to regenerate the
  111. # sample config by running "tox -e genconfig" in the project root.
  112. _opts = [
  113. cfg.StrOpt('undercloud_hostname',
  114. help=('Fully qualified hostname (including domain) to set on '
  115. 'the Undercloud. If left unset, the '
  116. 'current hostname will be used, but the user is '
  117. 'responsible for configuring all system hostname '
  118. 'settings appropriately. If set, the undercloud install '
  119. 'will configure all system hostname settings.'),
  120. ),
  121. cfg.StrOpt('local_ip',
  122. default='192.168.24.1/24',
  123. help=('IP information for the interface on the Undercloud '
  124. 'that will be handling the PXE boots and DHCP for '
  125. 'Overcloud instances. The IP portion of the value will '
  126. 'be assigned to the network interface defined by '
  127. 'local_interface, with the netmask defined by the '
  128. 'prefix portion of the value.')
  129. ),
  130. cfg.StrOpt('undercloud_public_host',
  131. deprecated_name='undercloud_public_vip',
  132. default='192.168.24.2',
  133. help=('Virtual IP or DNS address to use for the public '
  134. 'endpoints of Undercloud services. Only used with SSL.')
  135. ),
  136. cfg.StrOpt('undercloud_admin_host',
  137. deprecated_name='undercloud_admin_vip',
  138. default='192.168.24.3',
  139. help=('Virtual IP or DNS address to use for the admin '
  140. 'endpoints of Undercloud services. Only used with SSL.')
  141. ),
  142. cfg.ListOpt('undercloud_nameservers',
  143. default=[],
  144. help=('DNS nameserver(s) to use for the undercloud node.'),
  145. ),
  146. cfg.ListOpt('undercloud_ntp_servers',
  147. default=[],
  148. help=('List of ntp servers to use.')),
  149. cfg.StrOpt('overcloud_domain_name',
  150. default='localdomain',
  151. help=('DNS domain name to use when deploying the overcloud. '
  152. 'The overcloud parameter "CloudDomain" must be set to a '
  153. 'matching value.')
  154. ),
  155. cfg.ListOpt('subnets',
  156. default=SUBNETS_DEFAULT,
  157. help=('List of routed network subnets for provisioning '
  158. 'and introspection. Comma separated list of names/tags. '
  159. 'For each network a section/group needs to be added to '
  160. 'the configuration file with these parameters set: '
  161. 'cidr, dhcp_start, dhcp_end, inspection_iprange, '
  162. 'gateway and masquerade_network.'
  163. '\n\n'
  164. 'Example:\n\n'
  165. 'subnets = subnet1,subnet2\n'
  166. '\n'
  167. 'An example section/group in config file:\n'
  168. '\n'
  169. '[subnet1]\n'
  170. 'cidr = 192.168.10.0/24\n'
  171. 'dhcp_start = 192.168.10.100\n'
  172. 'dhcp_end = 192.168.10.200\n'
  173. 'inspection_iprange = 192.168.10.20,192.168.10.90\n'
  174. 'gateway = 192.168.10.254\n'
  175. 'masquerade_network = True'
  176. '\n'
  177. '[subnet2]\n'
  178. '. . .\n')),
  179. cfg.StrOpt('local_subnet',
  180. default=SUBNETS_DEFAULT[0],
  181. help=('Name of the local subnet, where the PXE boot and DHCP '
  182. 'interfaces for overcloud instances is located. The IP '
  183. 'address of the local_ip/local_interface should reside '
  184. 'in this subnet.')),
  185. cfg.StrOpt('undercloud_service_certificate',
  186. default='',
  187. help=('Certificate file to use for OpenStack service SSL '
  188. 'connections. Setting this enables SSL for the '
  189. 'OpenStack API endpoints, leaving it unset disables SSL.')
  190. ),
  191. cfg.BoolOpt('generate_service_certificate',
  192. default=False,
  193. help=('When set to True, an SSL certificate will be generated '
  194. 'as part of the undercloud install and this certificate '
  195. 'will be used in place of the value for '
  196. 'undercloud_service_certificate. The resulting '
  197. 'certificate will be written to '
  198. '/etc/pki/tls/certs/undercloud-[undercloud_public_host].'
  199. 'pem. This certificate is signed by CA selected by the '
  200. '"certificate_generation_ca" option.')
  201. ),
  202. cfg.StrOpt('certificate_generation_ca',
  203. default='local',
  204. help=('The certmonger nickname of the CA from which the '
  205. 'certificate will be requested. This is used only if '
  206. 'the generate_service_certificate option is set. '
  207. 'Note that if the "local" CA is selected the '
  208. 'certmonger\'s local CA certificate will be extracted to '
  209. '/etc/pki/ca-trust/source/anchors/cm-local-ca.pem and '
  210. 'subsequently added to the trust chain.')
  211. ),
  212. cfg.StrOpt('service_principal',
  213. default='',
  214. help=('The kerberos principal for the service that will use '
  215. 'the certificate. This is only needed if your CA '
  216. 'requires a kerberos principal. e.g. with FreeIPA.')
  217. ),
  218. cfg.StrOpt('local_interface',
  219. default='eth1',
  220. help=('Network interface on the Undercloud that will be '
  221. 'handling the PXE boots and DHCP for Overcloud '
  222. 'instances.')
  223. ),
  224. cfg.IntOpt('local_mtu',
  225. default=1500,
  226. help=('MTU to use for the local_interface.')
  227. ),
  228. cfg.StrOpt('masquerade_network',
  229. default='192.168.24.0/24',
  230. deprecated_for_removal=True,
  231. deprecated_reason=('With support for routed networks, '
  232. 'masquerading of the provisioning networks '
  233. 'is moved to a boolean option for each '
  234. 'subnet.'),
  235. help=('Network that will be masqueraded for external access, '
  236. 'if required. This should be the subnet used for PXE '
  237. 'booting.')
  238. ),
  239. cfg.StrOpt('hieradata_override',
  240. default='',
  241. help=('Path to hieradata override file. If set, the file will '
  242. 'be copied under /etc/puppet/hieradata and set as the '
  243. 'first file in the hiera hierarchy. This can be used '
  244. 'to custom configure services beyond what '
  245. 'undercloud.conf provides')
  246. ),
  247. cfg.StrOpt('net_config_override',
  248. default='',
  249. help=('Path to network config override template. If set, this '
  250. 'template will be used to configure the networking via '
  251. 'os-net-config. Must be in json format. '
  252. 'Templated tags can be used within the '
  253. 'template, see '
  254. 'instack-undercloud/elements/undercloud-stack-config/'
  255. 'net-config.json.template for example tags')
  256. ),
  257. cfg.StrOpt('inspection_interface',
  258. default='br-ctlplane',
  259. deprecated_name='discovery_interface',
  260. help=('Network interface on which inspection dnsmasq will '
  261. 'listen. If in doubt, use the default value.')
  262. ),
  263. cfg.BoolOpt('inspection_extras',
  264. default=True,
  265. help=('Whether to enable extra hardware collection during '
  266. 'the inspection process. Requires python-hardware or '
  267. 'python-hardware-detect package on the introspection '
  268. 'image.')),
  269. cfg.BoolOpt('inspection_runbench',
  270. default=False,
  271. deprecated_name='discovery_runbench',
  272. help=('Whether to run benchmarks when inspecting nodes. '
  273. 'Requires inspection_extras set to True.')
  274. ),
  275. cfg.BoolOpt('enable_node_discovery',
  276. default=False,
  277. help=('Makes ironic-inspector enroll any unknown node that '
  278. 'PXE-boots introspection ramdisk in Ironic. By default, '
  279. 'the "fake" driver is used for new nodes (it is '
  280. 'automatically enabled when this option is set to True).'
  281. ' Set discovery_default_driver to override. '
  282. 'Introspection rules can also be used to specify driver '
  283. 'information for newly enrolled nodes.')
  284. ),
  285. cfg.StrOpt('discovery_default_driver',
  286. default='ipmi',
  287. help=('The default driver or hardware type to use for newly '
  288. 'discovered nodes (requires enable_node_discovery set to '
  289. 'True). It is automatically added to enabled_drivers '
  290. 'or enabled_hardware_types accordingly.')
  291. ),
  292. cfg.BoolOpt('undercloud_debug',
  293. default=True,
  294. help=('Whether to enable the debug log level for Undercloud '
  295. 'OpenStack services.')
  296. ),
  297. cfg.BoolOpt('undercloud_update_packages',
  298. default=True,
  299. help=('Whether to update packages during the Undercloud '
  300. 'install.')
  301. ),
  302. cfg.BoolOpt('enable_tempest',
  303. default=True,
  304. help=('Whether to install Tempest in the Undercloud.')
  305. ),
  306. cfg.BoolOpt('enable_telemetry',
  307. default=False,
  308. help=('Whether to install Telemetry services '
  309. '(ceilometer, gnocchi, aodh, panko ) in the Undercloud.')
  310. ),
  311. cfg.BoolOpt('enable_ui',
  312. default=True,
  313. help=('Whether to install the TripleO UI.')
  314. ),
  315. cfg.BoolOpt('enable_validations',
  316. default=True,
  317. help=('Whether to install requirements to run the TripleO '
  318. 'validations.')
  319. ),
  320. cfg.BoolOpt('enable_cinder',
  321. default=False,
  322. help=('Whether to install the Volume service. It is not '
  323. 'currently used in the undercloud.')),
  324. cfg.BoolOpt('enable_novajoin',
  325. default=False,
  326. help=('Whether to install novajoin metadata service in '
  327. 'the Undercloud.')
  328. ),
  329. cfg.BoolOpt('enable_container_images_build',
  330. default=True,
  331. help=('Whether to enable docker container images to be build '
  332. 'on the undercloud.')
  333. ),
  334. cfg.StrOpt('ipa_otp',
  335. default='',
  336. help=('One Time Password to register Undercloud node with '
  337. 'an IPA server. '
  338. 'Required when enable_novajoin = True.')
  339. ),
  340. cfg.BoolOpt('ipxe_enabled',
  341. default=True,
  342. help=('Whether to use iPXE for deploy and inspection.'),
  343. deprecated_name='ipxe_deploy',
  344. ),
  345. cfg.IntOpt('scheduler_max_attempts',
  346. default=30, min=1,
  347. help=('Maximum number of attempts the scheduler will make '
  348. 'when deploying the instance. You should keep it '
  349. 'greater or equal to the number of bare metal nodes '
  350. 'you expect to deploy at once to work around '
  351. 'potential race condition when scheduling.')),
  352. cfg.BoolOpt('clean_nodes',
  353. default=False,
  354. help=('Whether to clean overcloud nodes (wipe the hard drive) '
  355. 'between deployments and after the introspection.')),
  356. cfg.ListOpt('enabled_drivers',
  357. default=['pxe_ipmitool', 'pxe_drac', 'pxe_ilo'],
  358. help=('List of enabled bare metal drivers.'),
  359. deprecated_for_removal=True,
  360. deprecated_reason=('Please switch to hardware types and '
  361. 'the enabled_hardware_types option.')),
  362. cfg.ListOpt('enabled_hardware_types',
  363. default=['ipmi', 'redfish', 'ilo', 'idrac'],
  364. help=('List of enabled bare metal hardware types (next '
  365. 'generation drivers).')),
  366. cfg.StrOpt('docker_registry_mirror',
  367. default='',
  368. help=('An optional docker \'registry-mirror\' that will be'
  369. 'configured in /etc/docker/daemon.json.')
  370. ),
  371. cfg.ListOpt('additional_architectures',
  372. default=[],
  373. help=('List of additional architectures enabled in your cloud '
  374. 'environment. The list of supported values is: %s'
  375. % ' '.join(validator.SUPPORTED_ARCHITECTURES))),
  376. cfg.BoolOpt('enable_routed_networks',
  377. default=False,
  378. help=('Enable support for routed ctlplane networks.')),
  379. ]
  380. # Routed subnets
  381. _subnets_opts = [
  382. cfg.StrOpt('cidr',
  383. default='192.168.24.0/24',
  384. deprecated_opts=_deprecated_opt_network_cidr,
  385. help=('Network CIDR for the Neutron-managed subnet for '
  386. 'Overcloud instances.')),
  387. cfg.StrOpt('dhcp_start',
  388. default='192.168.24.5',
  389. deprecated_opts=_deprecated_opt_dhcp_start,
  390. help=('Start of DHCP allocation range for PXE and DHCP of '
  391. 'Overcloud instances on this network.')),
  392. cfg.StrOpt('dhcp_end',
  393. default='192.168.24.24',
  394. deprecated_opts=_deprecated_opt_dhcp_end,
  395. help=('End of DHCP allocation range for PXE and DHCP of '
  396. 'Overcloud instances on this network.')),
  397. cfg.StrOpt('inspection_iprange',
  398. default='192.168.24.100,192.168.24.120',
  399. deprecated_opts=_deprecated_opt_inspection_iprange,
  400. help=('Temporary IP range that will be given to nodes on this '
  401. 'network during the inspection process. Should not '
  402. 'overlap with the range defined by dhcp_start and '
  403. 'dhcp_end, but should be in the same ip subnet.')),
  404. cfg.StrOpt('gateway',
  405. default='192.168.24.1',
  406. deprecated_opts=_deprecated_opt_network_gateway,
  407. help=('Network gateway for the Neutron-managed network for '
  408. 'Overcloud instances on this network.')),
  409. cfg.BoolOpt('masquerade',
  410. default=False,
  411. help=('The network will be masqueraded for external access.')),
  412. ]
  413. # Passwords, tokens, hashes
  414. _auth_opts = [
  415. cfg.StrOpt('undercloud_db_password',
  416. help=('Password used for MySQL root user. '
  417. 'If left unset, one will be automatically generated.')
  418. ),
  419. cfg.StrOpt('undercloud_admin_token',
  420. help=('Keystone admin token. '
  421. 'If left unset, one will be automatically generated.')
  422. ),
  423. cfg.StrOpt('undercloud_admin_password',
  424. help=('Keystone admin password. '
  425. 'If left unset, one will be automatically generated.')
  426. ),
  427. cfg.StrOpt('undercloud_glance_password',
  428. help=('Glance service password. '
  429. 'If left unset, one will be automatically generated.')
  430. ),
  431. cfg.StrOpt('undercloud_heat_encryption_key',
  432. help=('Heat db encryption key(must be 16, 24, or 32 characters.'
  433. ' If left unset, one will be automatically generated.')
  434. ),
  435. cfg.StrOpt('undercloud_heat_password',
  436. help=('Heat service password. '
  437. 'If left unset, one will be automatically generated.')
  438. ),
  439. cfg.StrOpt('undercloud_heat_cfn_password',
  440. help=('Heat cfn service password. '
  441. 'If left unset, one will be automatically generated.')
  442. ),
  443. cfg.StrOpt('undercloud_neutron_password',
  444. help=('Neutron service password. '
  445. 'If left unset, one will be automatically generated.')
  446. ),
  447. cfg.StrOpt('undercloud_nova_password',
  448. help=('Nova service password. '
  449. 'If left unset, one will be automatically generated.')
  450. ),
  451. cfg.StrOpt('undercloud_ironic_password',
  452. help=('Ironic service password. '
  453. 'If left unset, one will be automatically generated.')
  454. ),
  455. cfg.StrOpt('undercloud_aodh_password',
  456. help=('Aodh service password. '
  457. 'If left unset, one will be automatically generated.')
  458. ),
  459. cfg.StrOpt('undercloud_gnocchi_password',
  460. help=('Gnocchi service password. '
  461. 'If left unset, one will be automatically generated.')
  462. ),
  463. cfg.StrOpt('undercloud_ceilometer_password',
  464. help=('Ceilometer service password. '
  465. 'If left unset, one will be automatically generated.')
  466. ),
  467. cfg.StrOpt('undercloud_panko_password',
  468. help=('Panko service password. '
  469. 'If left unset, one will be automatically generated.')
  470. ),
  471. cfg.StrOpt('undercloud_ceilometer_metering_secret',
  472. help=('Ceilometer metering secret. '
  473. 'If left unset, one will be automatically generated.')
  474. ),
  475. cfg.StrOpt('undercloud_ceilometer_snmpd_user',
  476. default='ro_snmp_user',
  477. help=('Ceilometer snmpd read-only user. If this value is '
  478. 'changed from the default, the new value must be passed '
  479. 'in the overcloud environment as the parameter '
  480. 'SnmpdReadonlyUserName. This value must be between '
  481. '1 and 32 characters long.')
  482. ),
  483. cfg.StrOpt('undercloud_ceilometer_snmpd_password',
  484. help=('Ceilometer snmpd password. '
  485. 'If left unset, one will be automatically generated.')
  486. ),
  487. cfg.StrOpt('undercloud_swift_password',
  488. help=('Swift service password. '
  489. 'If left unset, one will be automatically generated.')
  490. ),
  491. cfg.StrOpt('undercloud_mistral_password',
  492. help=('Mistral service password. '
  493. 'If left unset, one will be automatically generated.')
  494. ),
  495. cfg.StrOpt('undercloud_rabbit_cookie',
  496. help=('Rabbitmq cookie. '
  497. 'If left unset, one will be automatically generated.')
  498. ),
  499. cfg.StrOpt('undercloud_rabbit_password',
  500. help=('Rabbitmq password. '
  501. 'If left unset, one will be automatically generated.')
  502. ),
  503. cfg.StrOpt('undercloud_rabbit_username',
  504. help=('Rabbitmq username. '
  505. 'If left unset, one will be automatically generated.')
  506. ),
  507. cfg.StrOpt('undercloud_heat_stack_domain_admin_password',
  508. help=('Heat stack domain admin password. '
  509. 'If left unset, one will be automatically generated.')
  510. ),
  511. cfg.StrOpt('undercloud_swift_hash_suffix',
  512. help=('Swift hash suffix. '
  513. 'If left unset, one will be automatically generated.')
  514. ),
  515. cfg.StrOpt('undercloud_haproxy_stats_password',
  516. help=('HAProxy stats password. '
  517. 'If left unset, one will be automatically generated.')
  518. ),
  519. cfg.StrOpt('undercloud_zaqar_password',
  520. help=('Zaqar password. '
  521. 'If left unset, one will be automatically generated.')
  522. ),
  523. cfg.StrOpt('undercloud_horizon_secret_key',
  524. help=('Horizon secret key. '
  525. 'If left unset, one will be automatically generated.')
  526. ),
  527. cfg.StrOpt('undercloud_cinder_password',
  528. help=('Cinder service password. '
  529. 'If left unset, one will be automatically generated.')
  530. ),
  531. cfg.StrOpt('undercloud_novajoin_password',
  532. help=('Novajoin vendordata plugin service password. '
  533. 'If left unset, one will be automatically generated.')
  534. ),
  535. ]
  536. CONF.register_opts(_opts)
  537. CONF.register_opts(_auth_opts, group='auth')
  538. def _load_subnets_config_groups():
  539. for group in CONF.subnets:
  540. g = cfg.OptGroup(name=group, title=group)
  541. CONF.register_opts(_subnets_opts, group=g)
  542. def list_opts():
  543. return [(None, copy.deepcopy(_opts)),
  544. (SUBNETS_DEFAULT[0], copy.deepcopy(_subnets_opts)),
  545. ('auth', copy.deepcopy(_auth_opts)),
  546. ]
  547. def _configure_logging(level, filename):
  548. """Does the initial logging configuration
  549. This should only ever be called once. If further changes to the logging
  550. config are needed they should be made directly on the LOG object.
  551. :param level: The desired logging level
  552. :param filename: The log file. Set to None to disable file logging.
  553. """
  554. try:
  555. os.makedirs(os.path.dirname(PATHS.LOG_FILE))
  556. except OSError as e:
  557. if e.errno != errno.EEXIST:
  558. raise
  559. logging.basicConfig(filename=filename,
  560. format=DEFAULT_LOG_FORMAT,
  561. level=level)
  562. global LOG
  563. LOG = logging.getLogger(__name__)
  564. if os.environ.get('OS_LOG_CAPTURE') != '1':
  565. handler = logging.StreamHandler()
  566. formatter = logging.Formatter(DEFAULT_LOG_FORMAT)
  567. handler.setFormatter(formatter)
  568. LOG.addHandler(handler)
  569. def _load_config():
  570. conf_params = []
  571. if os.path.isfile(PATHS.PASSWORD_PATH):
  572. conf_params += ['--config-file', PATHS.PASSWORD_PATH]
  573. if os.path.isfile(PATHS.CONF_PATH):
  574. conf_params += ['--config-file', PATHS.CONF_PATH]
  575. else:
  576. LOG.warning('%s does not exist. Using defaults.' % PATHS.CONF_PATH)
  577. CONF(conf_params)
  578. def _run_command(args, env=None, name=None):
  579. """Run the command defined by args and return its output
  580. :param args: List of arguments for the command to be run.
  581. :param env: Dict defining the environment variables. Pass None to use
  582. the current environment.
  583. :param name: User-friendly name for the command being run. A value of
  584. None will cause args[0] to be used.
  585. """
  586. if name is None:
  587. name = args[0]
  588. if env is None:
  589. env = os.environ
  590. env = env.copy()
  591. # When running a localized python script, we need to tell it that we're
  592. # using utf-8 for stdout, otherwise it can't tell because of the pipe.
  593. env['PYTHONIOENCODING'] = 'utf8'
  594. try:
  595. return subprocess.check_output(args,
  596. stderr=subprocess.STDOUT,
  597. env=env).decode('utf-8')
  598. except subprocess.CalledProcessError as e:
  599. LOG.error('%s failed: %s', name, e.output)
  600. raise
  601. def _run_live_command(args, env=None, name=None):
  602. """Run the command defined by args and log its output
  603. Takes the same arguments as _run_command, but runs the process
  604. asynchronously so the output can be logged while the process is still
  605. running.
  606. """
  607. if name is None:
  608. name = args[0]
  609. if env is None:
  610. env = os.environ
  611. env = env.copy()
  612. # When running a localized python script, we need to tell it that we're
  613. # using utf-8 for stdout, otherwise it can't tell because of the pipe.
  614. env['PYTHONIOENCODING'] = 'utf8'
  615. process = subprocess.Popen(args, env=env,
  616. stdout=subprocess.PIPE,
  617. stderr=subprocess.STDOUT)
  618. while True:
  619. line = process.stdout.readline().decode('utf-8')
  620. if line:
  621. LOG.info(line.rstrip())
  622. if line == '' and process.poll() is not None:
  623. break
  624. if process.returncode != 0:
  625. raise RuntimeError('%s failed. See log for details.' % name)
  626. def _check_hostname():
  627. """Check system hostname configuration
  628. Rabbit and Puppet require pretty specific hostname configuration. This
  629. function ensures that the system hostname settings are valid before
  630. continuing with the installation.
  631. """
  632. if CONF.undercloud_hostname is not None:
  633. args = ['sudo', 'hostnamectl', 'set-hostname',
  634. CONF.undercloud_hostname]
  635. _run_command(args, name='hostnamectl')
  636. LOG.info('Checking for a FQDN hostname...')
  637. args = ['sudo', 'hostnamectl', '--static']
  638. detected_static_hostname = _run_command(args, name='hostnamectl').rstrip()
  639. LOG.info('Static hostname detected as %s', detected_static_hostname)
  640. args = ['sudo', 'hostnamectl', '--transient']
  641. detected_transient_hostname = _run_command(args,
  642. name='hostnamectl').rstrip()
  643. LOG.info('Transient hostname detected as %s', detected_transient_hostname)
  644. if detected_static_hostname != detected_transient_hostname:
  645. LOG.error('Static hostname "%s" does not match transient hostname '
  646. '"%s".', detected_static_hostname,
  647. detected_transient_hostname)
  648. LOG.error('Use hostnamectl to set matching hostnames.')
  649. raise RuntimeError('Static and transient hostnames do not match')
  650. with open('/etc/hosts') as hosts_file:
  651. for line in hosts_file:
  652. if (not line.lstrip().startswith('#') and
  653. detected_static_hostname in line.split()):
  654. break
  655. else:
  656. short_hostname = detected_static_hostname.split('.')[0]
  657. if short_hostname == detected_static_hostname:
  658. raise RuntimeError('Configured hostname is not fully '
  659. 'qualified.')
  660. sed_cmd = ('sed -i "s/127.0.0.1\(\s*\)/127.0.0.1\\1%s %s /" '
  661. '/etc/hosts' %
  662. (detected_static_hostname, short_hostname))
  663. args = ['sudo', '/bin/bash', '-c', sed_cmd]
  664. _run_command(args, name='hostname-to-etc-hosts')
  665. LOG.info('Added hostname %s to /etc/hosts',
  666. detected_static_hostname)
  667. def _check_memory():
  668. """Check system memory
  669. The undercloud will not run properly in less than 8 GB of memory.
  670. This function verifies that at least that much is available before
  671. proceeding with install.
  672. """
  673. mem = psutil.virtual_memory()
  674. swap = psutil.swap_memory()
  675. total_mb = (mem.total + swap.total) / 1024 / 1024
  676. if total_mb < REQUIRED_MB:
  677. LOG.error('At least %d MB of memory is required for undercloud '
  678. 'installation. A minimum of 8 GB is recommended. '
  679. 'Only detected %d MB' % (REQUIRED_MB, total_mb))
  680. raise RuntimeError('Insufficient memory available')
  681. def _check_ipv6_enabled():
  682. """Test if IPv6 is enabled
  683. If /proc/net/if_inet6 exist ipv6 sysctl settings are available.
  684. """
  685. return os.path.isfile('/proc/net/if_inet6')
  686. def _wrap_ipv6(ip):
  687. """Wrap a IP address in square brackets if IPv6
  688. """
  689. if netutils.is_valid_ipv6(ip):
  690. return "[%s]" % ip
  691. return ip
  692. def _check_sysctl():
  693. """Check sysctl option availability
  694. The undercloud will not install properly if some of the expected sysctl
  695. values are not available to be set.
  696. """
  697. options = ['net.ipv4.ip_forward', 'net.ipv4.ip_nonlocal_bind']
  698. if _check_ipv6_enabled():
  699. options.append('net.ipv6.ip_nonlocal_bind')
  700. not_available = []
  701. for option in options:
  702. path = '/proc/sys/{opt}'.format(opt=option.replace('.', '/'))
  703. if not os.path.isfile(path):
  704. not_available.append(option)
  705. if not_available:
  706. LOG.error('Required sysctl options are not available. Check '
  707. 'that your kernel is up to date. Missing: '
  708. '{options}'.format(options=", ".join(not_available)))
  709. raise RuntimeError('Missing sysctl options')
  710. def _cidr_overlaps(a, b):
  711. return a.first <= b.last and b.first <= a.last
  712. def _validate_network():
  713. def error_handler(message):
  714. LOG.error('Undercloud configuration validation failed: %s', message)
  715. raise validator.FailedValidation(message)
  716. if (len(CONF.subnets) > 1 and not CONF.enable_routed_networks):
  717. message = ('Multiple subnets specified: %s but routed networks are '
  718. 'not enabled.' % CONF.subnets)
  719. error_handler(message)
  720. params = {opt.name: CONF[opt.name] for opt in _opts}
  721. # Get parameters of "local_subnet", pass to validator to ensure parameters
  722. # such as "local_ip", "undercloud_public_host" and "undercloud_admin_host"
  723. # are valid
  724. local_subnet_opts = CONF.get(CONF.local_subnet)
  725. params.update({opt.name: local_subnet_opts[opt.name]
  726. for opt in _subnets_opts})
  727. validator.validate_config(params, error_handler)
  728. # Validate subnet parameters
  729. subnet_cidrs = []
  730. for subnet in CONF.subnets:
  731. subnet_opts = CONF.get(subnet)
  732. params = {opt.name: subnet_opts[opt.name] for opt in _subnets_opts}
  733. if any(_cidr_overlaps(x, netaddr.IPNetwork(subnet_opts.cidr))
  734. for x in subnet_cidrs):
  735. message = ('CIDR of %s, %s, overlaps with another subnet.' %
  736. (subnet, subnet_opts.cidr))
  737. error_handler(message)
  738. subnet_cidrs.append(netaddr.IPNetwork(subnet_opts.cidr))
  739. validator.validate_subnet(subnet, params, error_handler)
  740. def _validate_no_ip_change():
  741. """Disallow provisioning interface IP changes
  742. Changing the provisioning network IP causes a number of issues, so we
  743. need to disallow it early in the install before configurations start to
  744. be changed.
  745. """
  746. os_net_config_file = '/etc/os-net-config/config.json'
  747. # Nothing to do if we haven't already installed
  748. if not os.path.isfile(
  749. os.path.expanduser(os_net_config_file)):
  750. return
  751. with open(os_net_config_file) as f:
  752. network_config = json.loads(f.read())
  753. try:
  754. ctlplane = [i for i in network_config.get('network_config', [])
  755. if i['name'] == 'br-ctlplane'][0]
  756. except IndexError:
  757. # Nothing to check if br-ctlplane wasn't configured
  758. return
  759. existing_ip = ctlplane['addresses'][0]['ip_netmask']
  760. if existing_ip != CONF.local_ip:
  761. message = ('Changing the local_ip is not allowed. Existing IP: '
  762. '%s, Configured IP: %s') % (existing_ip,
  763. CONF.local_ip)
  764. LOG.error(message)
  765. raise validator.FailedValidation(message)
  766. def _validate_passwords_file():
  767. """Disallow updates if the passwords file is missing
  768. If the undercloud was already deployed, the passwords file needs to be
  769. present so passwords that can't be changed are persisted. If the file
  770. is missing it will break the undercloud, so we should fail-fast and let
  771. the user know about the problem.
  772. """
  773. if (os.path.isfile(os.path.expanduser('~/stackrc')) and
  774. not os.path.isfile(PATHS.PASSWORD_PATH)):
  775. message = ('The %s file is missing. This will cause all service '
  776. 'passwords to change and break the existing undercloud. ' %
  777. PATHS.PASSWORD_PATH)
  778. raise validator.FailedValidation(message)
  779. def _validate_architecure_options():
  780. def error_handler(message):
  781. LOG.error('Undercloud configuration validation failed: %s', message)
  782. raise validator.FailedValidation(message)
  783. params = {opt.name: CONF[opt.name] for opt in _opts}
  784. validator._validate_additional_architectures(params, error_handler)
  785. validator._validate_ppc64le_exclusive_opts(params, error_handler)
  786. def _validate_configuration():
  787. try:
  788. _check_hostname()
  789. _check_memory()
  790. _check_sysctl()
  791. _validate_network()
  792. _validate_no_ip_change()
  793. _validate_passwords_file()
  794. _validate_architecure_options()
  795. except RuntimeError as e:
  796. LOG.error('An error occurred during configuration validation, '
  797. 'please check your host configuration and try again. '
  798. 'Error message: {error}'.format(error=e))
  799. sys.exit(1)
  800. def _generate_password(length=40):
  801. """Create a random password
  802. Copied from rdomanager-oscplugin. This should eventually live in
  803. tripleo-common.
  804. """
  805. uuid_str = six.text_type(uuid.uuid4()).encode("UTF-8")
  806. return hashlib.sha1(uuid_str).hexdigest()[:length]
  807. def _get_service_endpoints(name, format_str, public, internal, admin=None,
  808. public_proto='http', internal_proto='http'):
  809. endpoints = {}
  810. upper_name = name.upper().replace('-', '_')
  811. public_port_key = 'port'
  812. if not admin:
  813. admin = internal
  814. if public_proto in ['https', 'wss']:
  815. public_port_key = 'ssl_port'
  816. endpoints['UNDERCLOUD_ENDPOINT_%s_PUBLIC' % upper_name] = (
  817. format_str % (public_proto, _wrap_ipv6(public['host']),
  818. public[public_port_key]))
  819. endpoints['UNDERCLOUD_ENDPOINT_%s_INTERNAL' % upper_name] = (
  820. format_str % (internal_proto, _wrap_ipv6(internal['host']),
  821. internal['port']))
  822. endpoints['UNDERCLOUD_ENDPOINT_%s_ADMIN' % upper_name] = (
  823. format_str % (internal_proto, _wrap_ipv6(admin['host']),
  824. admin['port']))
  825. return endpoints
  826. def _generate_endpoints(instack_env):
  827. local_host = instack_env['LOCAL_IP']
  828. public_host = local_host
  829. public_proto = 'http'
  830. internal_host = local_host
  831. internal_proto = 'http'
  832. zaqar_ws_public_proto = 'ws'
  833. zaqar_ws_internal_proto = 'ws'
  834. if (CONF.undercloud_service_certificate or
  835. CONF.generate_service_certificate):
  836. public_host = CONF.undercloud_public_host
  837. internal_host = CONF.undercloud_admin_host
  838. public_proto = 'https'
  839. zaqar_ws_public_proto = 'wss'
  840. endpoints = {}
  841. endpoint_list = [
  842. ('heat',
  843. '%s://%s:%d/v1/%%(tenant_id)s',
  844. {'host': public_host, 'port': 8004, 'ssl_port': 13004},
  845. {'host': internal_host, 'port': 8004}),
  846. ('heat-cfn',
  847. '%s://%s:%d/v1/%%(tenant_id)s',
  848. {'host': public_host, 'port': 8000, 'ssl_port': 13800},
  849. {'host': internal_host, 'port': 8000}),
  850. ('heat-ui-proxy',
  851. '%s://%s:%d',
  852. {'host': public_host, 'port': 8004, 'ssl_port': 13004},
  853. {'host': internal_host, 'port': 8004}),
  854. ('heat-ui-config',
  855. '%s://%s:%d/heat/v1/%%(project_id)s',
  856. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  857. {'host': internal_host, 'port': 3000}),
  858. ('neutron',
  859. '%s://%s:%d',
  860. {'host': public_host, 'port': 9696, 'ssl_port': 13696},
  861. {'host': internal_host, 'port': 9696}),
  862. ('glance',
  863. '%s://%s:%d',
  864. {'host': public_host, 'port': 9292, 'ssl_port': 13292},
  865. {'host': internal_host, 'port': 9292}),
  866. ('nova',
  867. '%s://%s:%d/v2.1',
  868. {'host': public_host, 'port': 8774, 'ssl_port': 13774},
  869. {'host': internal_host, 'port': 8774}),
  870. ('placement',
  871. '%s://%s:%d/placement',
  872. {'host': public_host, 'port': 8778, 'ssl_port': 13778},
  873. {'host': internal_host, 'port': 8778}),
  874. ('keystone',
  875. '%s://%s:%d',
  876. {'host': public_host, 'port': 5000, 'ssl_port': 13000},
  877. {'host': internal_host, 'port': 5000},
  878. {'host': internal_host, 'port': 35357}),
  879. ('keystone-ui-config',
  880. '%s://%s:%d/keystone/v3',
  881. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  882. {'host': internal_host, 'port': 3000},
  883. {'host': internal_host, 'port': 35357}),
  884. ('swift',
  885. '%s://%s:%d/v1/AUTH_%%(tenant_id)s',
  886. {'host': public_host, 'port': 8080, 'ssl_port': 13808},
  887. {'host': internal_host, 'port': 8080}),
  888. ('swift-ui-proxy',
  889. '%s://%s:%d',
  890. {'host': public_host, 'port': 8080, 'ssl_port': 13808},
  891. {'host': internal_host, 'port': 8080}),
  892. ('swift-ui-config',
  893. '%s://%s:%d/swift/v1/AUTH_%%(project_id)s',
  894. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  895. {'host': internal_host, 'port': 3000}),
  896. ('ironic',
  897. '%s://%s:%d',
  898. {'host': public_host, 'port': 6385, 'ssl_port': 13385},
  899. {'host': internal_host, 'port': 6385}),
  900. ('ironic-ui-config',
  901. '%s://%s:%d/ironic',
  902. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  903. {'host': internal_host, 'port': 3000}),
  904. ('ironic_inspector',
  905. '%s://%s:%d',
  906. {'host': public_host, 'port': 5050, 'ssl_port': 13050},
  907. {'host': internal_host, 'port': 5050}),
  908. ('ironic_inspector-ui-config',
  909. '%s://%s:%d/ironic-inspector',
  910. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  911. {'host': internal_host, 'port': 3000}),
  912. ('aodh',
  913. '%s://%s:%d',
  914. {'host': public_host, 'port': 8042, 'ssl_port': 13042},
  915. {'host': internal_host, 'port': 8042}),
  916. ('gnocchi',
  917. '%s://%s:%d',
  918. {'host': public_host, 'port': 8041, 'ssl_port': 13041},
  919. {'host': internal_host, 'port': 8041}),
  920. ('panko',
  921. '%s://%s:%d',
  922. {'host': public_host, 'port': 8977, 'ssl_port': 13977},
  923. {'host': internal_host, 'port': 8977}),
  924. ('mistral',
  925. '%s://%s:%d/v2',
  926. {'host': public_host, 'port': 8989, 'ssl_port': 13989},
  927. {'host': internal_host, 'port': 8989}),
  928. ('mistral-ui-proxy',
  929. '%s://%s:%d',
  930. {'host': public_host, 'port': 8989, 'ssl_port': 13989},
  931. {'host': internal_host, 'port': 8989}),
  932. ('mistral-ui-config',
  933. '%s://%s:%d/mistral/v2',
  934. {'host': public_host, 'port': 3000, 'ssl_port': 443},
  935. {'host': internal_host, 'port': 3000}),
  936. ('zaqar',
  937. '%s://%s:%d',
  938. {'host': public_host, 'port': 8888, 'ssl_port': 13888},
  939. {'host': internal_host, 'port': 8888}),
  940. ('cinder',
  941. '%s://%s:%d/v1/%%(tenant_id)s',
  942. {'host': public_host, 'port': 8776, 'ssl_port': 13776},
  943. {'host': internal_host, 'port': 8776}),
  944. ('cinder_v2',
  945. '%s://%s:%d/v2/%%(tenant_id)s',
  946. {'host': public_host, 'port': 8776, 'ssl_port': 13776},
  947. {'host': internal_host, 'port': 8776}),
  948. ('cinder_v3',
  949. '%s://%s:%d/v3/%%(tenant_id)s',
  950. {'host': public_host, 'port': 8776, 'ssl_port': 13776},
  951. {'host': internal_host, 'port': 8776}),
  952. ]
  953. for endpoint_data in endpoint_list:
  954. endpoints.update(
  955. _get_service_endpoints(*endpoint_data,
  956. public_proto=public_proto,
  957. internal_proto=internal_proto))
  958. # Zaqar's websocket endpoint
  959. # NOTE(jaosorior): Zaqar's websocket endpoint doesn't support being proxied
  960. # on a different port. If that's done it will ignore the handshake and
  961. # won't work.
  962. endpoints.update(_get_service_endpoints(
  963. 'zaqar-websocket',
  964. '%s://%s:%d',
  965. {'host': public_host, 'port': 9000, 'ssl_port': 9000},
  966. {'host': internal_host, 'port': 9000},
  967. public_proto=zaqar_ws_public_proto,
  968. internal_proto=zaqar_ws_internal_proto))
  969. endpoints.update(_get_service_endpoints(
  970. 'zaqar-ui-proxy',
  971. '%s://%s:%d',
  972. {'host': public_host, 'port': 9000, 'ssl_port': 443,
  973. 'zaqar_ws_public_proto': 'ws'},
  974. {'host': internal_host, 'port': 9000},
  975. public_proto=zaqar_ws_public_proto,
  976. internal_proto=zaqar_ws_internal_proto))
  977. endpoints.update(_get_service_endpoints(
  978. 'zaqar-ui-config',
  979. '%s://%s:%d/zaqar',
  980. {'host': public_host, 'port': 3000, 'ssl_port': 443,
  981. 'zaqar_ws_public_proto': 'wss'},
  982. {'host': internal_host, 'port': 3000},
  983. public_proto=zaqar_ws_public_proto,
  984. internal_proto=zaqar_ws_internal_proto))
  985. # The swift admin endpoint has a different format from the others
  986. endpoints['UNDERCLOUD_ENDPOINT_SWIFT_ADMIN'] = (
  987. '%s://%s:%s' % (internal_proto, internal_host, 8080))
  988. instack_env.update(endpoints)
  989. def _write_password_file(instack_env):
  990. with open(PATHS.PASSWORD_PATH, 'w') as password_file:
  991. password_file.write('[auth]\n')
  992. for opt in _auth_opts:
  993. env_name = opt.name.upper()
  994. value = CONF.auth[opt.name]
  995. if not value:
  996. # Heat requires this encryption key to be a specific length
  997. if env_name == 'UNDERCLOUD_HEAT_ENCRYPTION_KEY':
  998. value = _generate_password(32)
  999. else:
  1000. value = _generate_password()
  1001. LOG.info('Generated new password for %s', opt.name)
  1002. instack_env[env_name] = value
  1003. password_file.write('%s=%s\n' % (opt.name, value))
  1004. os.chmod(PATHS.PASSWORD_PATH, 0o600)
  1005. def _member_role_exists():
  1006. # This is a workaround for puppet removing the deprecated _member_
  1007. # role on upgrade - if it exists we must restore role assignments
  1008. # or trusts stored in the undercloud heat will break
  1009. user, password, project, auth_url = _get_auth_values()
  1010. auth_kwargs = {
  1011. 'auth_url': auth_url,
  1012. 'username': user,
  1013. 'password': password,
  1014. 'project_name': project,
  1015. 'project_domain_name': 'Default',
  1016. 'user_domain_name': 'Default',
  1017. }
  1018. auth_plugin = ks_auth.Password(**auth_kwargs)
  1019. sess = session.Session(auth=auth_plugin)
  1020. disc = discover.Discover(session=sess)
  1021. c = disc.create_client()
  1022. try:
  1023. member_role = [r for r in c.roles.list() if r.name == '_member_'][0]
  1024. except IndexError:
  1025. # Do nothing if there is no _member_ role
  1026. return
  1027. if c.version == 'v2.0':
  1028. client_projects = c.tenants
  1029. else:
  1030. client_projects = c.projects
  1031. admin_project = [t for t in client_projects.list() if t.name == 'admin'][0]
  1032. admin_user = [u for u in c.users.list() if u.name == 'admin'][0]
  1033. if c.version == 'v2.0':
  1034. try:
  1035. c.roles.add_user_role(admin_user, member_role, admin_project.id)
  1036. LOG.info('Added _member_ role to admin user')
  1037. except ks_exceptions.http.Conflict:
  1038. # They already had the role
  1039. pass
  1040. else:
  1041. try:
  1042. c.roles.grant(member_role,
  1043. user=admin_user,
  1044. project=admin_project.id)
  1045. LOG.info('Added _member_ role to admin user')
  1046. except ks_exceptions.http.Conflict:
  1047. # They already had the role
  1048. pass
  1049. class InstackEnvironment(dict):
  1050. """An environment to pass to Puppet with some safety checks.
  1051. Keeps lists of variables we add to the operating system environment,
  1052. and ensures that we don't anything not defined there.
  1053. """
  1054. INSTACK_KEYS = {'HOSTNAME', 'ELEMENTS_PATH', 'NODE_DIST', 'JSONFILE',
  1055. 'REG_METHOD', 'REG_HALT_UNREGISTER', 'PUBLIC_INTERFACE_IP'}
  1056. """The variables instack and/or used elements can read."""
  1057. DYNAMIC_KEYS = {'INSPECTION_COLLECTORS', 'INSPECTION_KERNEL_ARGS',
  1058. 'INSPECTION_NODE_NOT_FOUND_HOOK',
  1059. 'TRIPLEO_INSTALL_USER', 'TRIPLEO_UNDERCLOUD_CONF_FILE',
  1060. 'TRIPLEO_UNDERCLOUD_PASSWORD_FILE',
  1061. 'ENABLED_BOOT_INTERFACES', 'ENABLED_POWER_INTERFACES',
  1062. 'ENABLED_RAID_INTERFACES', 'ENABLED_VENDOR_INTERFACES',
  1063. 'ENABLED_MANAGEMENT_INTERFACES', 'SYSCTL_SETTINGS',
  1064. 'LOCAL_IP_WRAPPED', 'ENABLE_ARCHITECTURE_PPC64LE',
  1065. 'INSPECTION_SUBNETS', 'SUBNETS_CIDR_NAT_RULES',
  1066. 'SUBNETS_STATIC_ROUTES', 'MASQUERADE_NETWORKS'}
  1067. """The variables we calculate in _generate_environment call."""
  1068. PUPPET_KEYS = DYNAMIC_KEYS | {opt.name.upper() for _, group in list_opts()
  1069. for opt in group}
  1070. """Keys we pass for formatting the resulting hieradata."""
  1071. SET_ALLOWED_KEYS = DYNAMIC_KEYS | INSTACK_KEYS | PUPPET_KEYS
  1072. """Keys which we allow to add/change in this environment."""
  1073. def __init__(self):
  1074. super(InstackEnvironment, self).__init__(os.environ)
  1075. def __setitem__(self, key, value):
  1076. if key not in self.SET_ALLOWED_KEYS:
  1077. raise KeyError('Key %s is not allowed for an InstackEnvironment' %
  1078. key)
  1079. return super(InstackEnvironment, self).__setitem__(key, value)
  1080. def _make_list(values):
  1081. """Generate a list suitable to pass to templates."""
  1082. return '[%s]' % ', '.join('"%s"' % item for item in values)
  1083. def _generate_sysctl_settings():
  1084. sysctl_settings = {}
  1085. sysctl_settings.update({"net.ipv4.ip_nonlocal_bind": {"value": 1}})
  1086. if _check_ipv6_enabled():
  1087. sysctl_settings.update({"net.ipv6.ip_nonlocal_bind": {"value": 1}})
  1088. return json.dumps(sysctl_settings)
  1089. def _is_classic_driver(name):
  1090. """Poor man's way to detect if something is a driver or a hardware type.
  1091. To be removed when we remove support for classic drivers.
  1092. """
  1093. return (name == 'fake' or
  1094. name.startswith('fake_') or
  1095. name.startswith('pxe_') or
  1096. name.startswith('agent_') or
  1097. name.startswith('iscsi_'))
  1098. def _process_drivers_and_hardware_types(instack_env):
  1099. """Populate the environment with ironic driver information."""
  1100. # Ensure correct rendering of the list and uniqueness of the items
  1101. enabled_drivers = set(CONF.enabled_drivers)
  1102. enabled_hardware_types = set(CONF.enabled_hardware_types)
  1103. if CONF.enable_node_discovery:
  1104. if _is_classic_driver(CONF.discovery_default_driver):
  1105. if CONF.discovery_default_driver not in enabled_drivers:
  1106. enabled_drivers.add(CONF.discovery_default_driver)
  1107. else:
  1108. if CONF.discovery_default_driver not in enabled_hardware_types:
  1109. enabled_hardware_types.add(CONF.discovery_default_driver)
  1110. instack_env['INSPECTION_NODE_NOT_FOUND_HOOK'] = 'enroll'
  1111. else:
  1112. instack_env['INSPECTION_NODE_NOT_FOUND_HOOK'] = ''
  1113. # In most cases power and management interfaces are called the same, so we
  1114. # use one variable for them.
  1115. mgmt_interfaces = {'fake', 'ipmitool'}
  1116. # TODO(dtantsur): can we somehow avoid hardcoding hardware types here?
  1117. for hw_type in ('redfish', 'idrac', 'ilo', 'irmc', 'staging-ovirt'):
  1118. if hw_type in enabled_hardware_types:
  1119. mgmt_interfaces.add(hw_type)
  1120. for (hw_type, iface) in [('cisco-ucs-managed', 'ucsm'),
  1121. ('cisco-ucs-standalone', 'cimc')]:
  1122. if hw_type in enabled_hardware_types:
  1123. mgmt_interfaces.add(iface)
  1124. # Two hardware types use non-default boot interfaces.
  1125. boot_interfaces = {'pxe'}
  1126. for hw_type in ('ilo', 'irmc'):
  1127. if hw_type in enabled_hardware_types:
  1128. boot_interfaces.add('%s-pxe' % hw_type)
  1129. raid_interfaces = {'no-raid'}
  1130. if 'idrac' in enabled_hardware_types:
  1131. raid_interfaces.add('idrac')
  1132. vendor_interfaces = {'no-vendor'}
  1133. for (hw_type, iface) in [('ipmi', 'ipmitool'),
  1134. ('idrac', 'idrac')]:
  1135. if hw_type in enabled_hardware_types:
  1136. vendor_interfaces.add(iface)
  1137. instack_env['ENABLED_DRIVERS'] = _make_list(enabled_drivers)
  1138. instack_env['ENABLED_HARDWARE_TYPES'] = _make_list(enabled_hardware_types)
  1139. instack_env['ENABLED_BOOT_INTERFACES'] = _make_list(boot_interfaces)
  1140. instack_env['ENABLED_MANAGEMENT_INTERFACES'] = _make_list(mgmt_interfaces)
  1141. instack_env['ENABLED_RAID_INTERFACES'] = _make_list(raid_interfaces)
  1142. instack_env['ENABLED_VENDOR_INTERFACES'] = _make_list(vendor_interfaces)
  1143. # The snmp hardware type uses fake management and snmp power
  1144. if 'snmp' in enabled_hardware_types:
  1145. mgmt_interfaces.add('snmp')
  1146. instack_env['ENABLED_POWER_INTERFACES'] = _make_list(mgmt_interfaces)
  1147. def _generate_masquerade_networks():
  1148. env_list = []
  1149. for subnet in CONF.subnets:
  1150. s = CONF.get(subnet)
  1151. if s.masquerade:
  1152. env_list.append(s.cidr)
  1153. # NOTE(hjensas): Remove once deprecated masquerade_network option is gone
  1154. if CONF.masquerade_network and (CONF.masquerade_network not in env_list):
  1155. env_list.append(CONF.masquerade_network)
  1156. return json.dumps(env_list)
  1157. def _generate_inspection_subnets():
  1158. env_list = []
  1159. for subnet in CONF.subnets:
  1160. env_dict = {}
  1161. s = CONF.get(subnet)
  1162. env_dict['tag'] = subnet
  1163. env_dict['ip_range'] = s.inspection_iprange
  1164. env_dict['netmask'] = str(netaddr.IPNetwork(s.cidr).netmask)
  1165. env_dict['gateway'] = s.gateway
  1166. env_list.append(env_dict)
  1167. return json.dumps(env_list)
  1168. def _generate_subnets_static_routes():
  1169. env_list = []
  1170. local_router = CONF.get(CONF.local_subnet).gateway
  1171. for subnet in CONF.subnets:
  1172. if subnet == str(CONF.local_subnet):
  1173. continue
  1174. s = CONF.get(subnet)
  1175. env_list.append({'ip_netmask': s.cidr,
  1176. 'next_hop': local_router})
  1177. return json.dumps(env_list)
  1178. def _generate_subnets_cidr_nat_rules():
  1179. env_list = []
  1180. for subnet in CONF.subnets:
  1181. s = CONF.get(subnet)
  1182. data_format = '"140 {direction} {name} cidr nat": ' \
  1183. '{{"chain": "FORWARD", "{direction}": "{cidr}", ' \
  1184. '"proto": "all", "action": "accept"}}'
  1185. env_list.append(data_format.format(
  1186. name=subnet, direction='destination', cidr=s.cidr))
  1187. env_list.append(data_format.format(
  1188. name=subnet, direction='source', cidr=s.cidr))
  1189. # Whitespace after newline required for indentation in templated yaml
  1190. return '\n '.join(env_list)
  1191. def _generate_environment(instack_root):
  1192. """Generate an environment dict for instack
  1193. The returned dict will have the necessary values for use as the env
  1194. parameter when calling instack via the subprocess module.
  1195. :param instack_root: The path containing the instack-undercloud elements
  1196. and json files.
  1197. """
  1198. instack_env = InstackEnvironment()
  1199. # Rabbit uses HOSTNAME, so we need to make sure it's right
  1200. instack_env['HOSTNAME'] = CONF.undercloud_hostname or socket.gethostname()
  1201. # Find the paths we need
  1202. json_file_dir = '/usr/share/instack-undercloud/json-files'
  1203. if not os.path.isdir(json_file_dir):
  1204. json_file_dir = os.path.join(instack_root, 'json-files')
  1205. instack_undercloud_elements = '/usr/share/instack-undercloud'
  1206. if not os.path.isdir(instack_undercloud_elements):
  1207. instack_undercloud_elements = os.path.join(instack_root, 'elements')
  1208. tripleo_puppet_elements = '/usr/share/tripleo-puppet-elements'
  1209. if not os.path.isdir(tripleo_puppet_elements):
  1210. tripleo_puppet_elements = os.path.join(os.getcwd(),
  1211. 'tripleo-puppet-elements',
  1212. 'elements')
  1213. if 'ELEMENTS_PATH' in os.environ:
  1214. instack_env['ELEMENTS_PATH'] = os.environ['ELEMENTS_PATH']
  1215. else:
  1216. instack_env['ELEMENTS_PATH'] = (
  1217. '%s:%s:'
  1218. '/usr/share/tripleo-image-elements:'
  1219. '/usr/share/diskimage-builder/elements'
  1220. ) % (tripleo_puppet_elements, instack_undercloud_elements)
  1221. # Distro-specific values
  1222. distro = platform.linux_distribution()[0]
  1223. if distro.startswith('Red Hat Enterprise Linux'):
  1224. instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'rhel7'
  1225. instack_env['JSONFILE'] = (
  1226. os.environ.get('JSONFILE') or
  1227. os.path.join(json_file_dir, 'rhel-7-undercloud-packages.json')
  1228. )
  1229. instack_env['REG_METHOD'] = 'disable'
  1230. instack_env['REG_HALT_UNREGISTER'] = '1'
  1231. elif distro.startswith('CentOS'):
  1232. instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'centos7'
  1233. instack_env['JSONFILE'] = (
  1234. os.environ.get('JSONFILE') or
  1235. os.path.join(json_file_dir, 'centos-7-undercloud-packages.json')
  1236. )
  1237. elif distro.startswith('Fedora'):
  1238. instack_env['NODE_DIST'] = os.environ.get('NODE_DIST') or 'fedora'
  1239. raise RuntimeError('Fedora is not currently supported')
  1240. else:
  1241. raise RuntimeError('%s is not supported' % distro)
  1242. if CONF['additional_architectures']:
  1243. for arch in CONF['additional_architectures']:
  1244. env_name = ('enable_architecture_%s' % arch).upper()
  1245. instack_env[env_name] = six.text_type(True)
  1246. # Convert conf opts to env values
  1247. for opt in _opts:
  1248. env_name = opt.name.upper()
  1249. instack_env[env_name] = six.text_type(CONF[opt.name])
  1250. # Opts that needs extra processing
  1251. if CONF.inspection_runbench and not CONF.inspection_extras:
  1252. raise RuntimeError('inspection_extras must be enabled for '
  1253. 'inspection_runbench to work')
  1254. if CONF.inspection_extras:
  1255. instack_env['INSPECTION_COLLECTORS'] = ('default,extra-hardware,'
  1256. 'numa-topology,logs')
  1257. else:
  1258. instack_env['INSPECTION_COLLECTORS'] = 'default,logs'
  1259. inspection_kernel_args = []
  1260. if CONF.undercloud_debug:
  1261. inspection_kernel_args.append('ipa-debug=1')
  1262. if CONF.inspection_runbench:
  1263. inspection_kernel_args.append('ipa-inspection-benchmarks=cpu,mem,disk')
  1264. if CONF.inspection_extras:
  1265. inspection_kernel_args.append('ipa-inspection-dhcp-all-interfaces=1')
  1266. inspection_kernel_args.append('ipa-collect-lldp=1')
  1267. instack_env['INSPECTION_KERNEL_ARGS'] = ' '.join(inspection_kernel_args)
  1268. _process_drivers_and_hardware_types(instack_env)
  1269. instack_env['INSPECTION_SUBNETS'] = _generate_inspection_subnets()
  1270. instack_env['SUBNETS_CIDR_NAT_RULES'] = _generate_subnets_cidr_nat_rules()
  1271. instack_env['MASQUERADE_NETWORKS'] = _generate_masquerade_networks()
  1272. instack_env['SUBNETS_STATIC_ROUTES'] = _generate_subnets_static_routes()
  1273. instack_env['SYSCTL_SETTINGS'] = _generate_sysctl_settings()
  1274. if CONF.docker_registry_mirror:
  1275. instack_env['DOCKER_REGISTRY_MIRROR'] = CONF.docker_registry_mirror
  1276. instack_env['PUBLIC_INTERFACE_IP'] = instack_env['LOCAL_IP']
  1277. instack_env['LOCAL_IP'] = instack_env['LOCAL_IP'].split('/')[0]
  1278. instack_env['LOCAL_IP_WRAPPED'] = _wrap_ipv6(instack_env['LOCAL_IP'])
  1279. # We're not in a chroot so this doesn't make sense, and it causes weird
  1280. # errors if it's set.
  1281. if instack_env.get('DIB_YUM_REPO_CONF'):
  1282. del instack_env['DIB_YUM_REPO_CONF']
  1283. instack_env['TRIPLEO_INSTALL_USER'] = getpass.getuser()
  1284. instack_env['TRIPLEO_UNDERCLOUD_CONF_FILE'] = PATHS.CONF_PATH
  1285. instack_env['TRIPLEO_UNDERCLOUD_PASSWORD_FILE'] = PATHS.PASSWORD_PATH
  1286. # Mustache conditional logic requires ENABLE_NOVAJOIN to be undefined
  1287. # when novajoin is not enabled.
  1288. if instack_env['ENABLE_NOVAJOIN'].lower() == 'false':
  1289. del instack_env['ENABLE_NOVAJOIN']
  1290. _generate_endpoints(instack_env)
  1291. _write_password_file(instack_env)
  1292. if CONF.generate_service_certificate:
  1293. public_host = CONF.undercloud_public_host
  1294. instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = (
  1295. '/etc/pki/tls/certs/undercloud-%s.pem' % public_host)
  1296. elif instack_env['UNDERCLOUD_SERVICE_CERTIFICATE']:
  1297. raw_value = instack_env['UNDERCLOUD_SERVICE_CERTIFICATE']
  1298. abs_cert = os.path.abspath(raw_value)
  1299. if abs_cert != raw_value:
  1300. home_dir = os.path.expanduser('~')
  1301. if os.getcwd() != home_dir and os.path.exists(abs_cert):
  1302. LOG.warning('Using undercloud_service_certificate from '
  1303. 'current directory, please use an absolute path '
  1304. 'to remove ambiguity')
  1305. instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = abs_cert
  1306. else:
  1307. instack_env['UNDERCLOUD_SERVICE_CERTIFICATE'] = os.path.join(
  1308. home_dir, raw_value)
  1309. return instack_env
  1310. def _get_template_path(template):
  1311. local_template_path = os.path.join(
  1312. os.path.dirname(__file__),
  1313. '..',
  1314. 'templates',
  1315. template)
  1316. installed_template_path = os.path.join(
  1317. '/usr/share/instack-undercloud/templates',
  1318. template)
  1319. if os.path.exists(local_template_path):
  1320. return local_template_path
  1321. else:
  1322. return installed_template_path
  1323. def _generate_init_data(instack_env):
  1324. context = instack_env.copy()
  1325. if CONF.hieradata_override:
  1326. data_file = CONF.hieradata_override
  1327. hiera_entry = os.path.splitext(os.path.basename(data_file))[0]
  1328. dst = os.path.join('/etc/puppet/hieradata',
  1329. os.path.basename(data_file))
  1330. if os.path.abspath(CONF.hieradata_override) != data_file:
  1331. # If we don't have an absolute path, compute it
  1332. data_file = os.path.join(os.path.expanduser('~'), data_file)
  1333. if not os.path.exists(data_file):
  1334. raise RuntimeError(
  1335. "Could not find hieradata_override file '%s'" % data_file)
  1336. _run_command(['sudo', 'mkdir', '-p', '/etc/puppet/hieradata'])
  1337. _run_command(['sudo', 'cp', data_file, dst])
  1338. _run_command(['sudo', 'chmod', '0644', dst])
  1339. else:
  1340. hiera_entry = ''
  1341. if CONF.net_config_override:
  1342. net_config_json = open(CONF.net_config_override).read()
  1343. else:
  1344. net_config_json = \
  1345. open(_get_template_path('net-config.json.template')).read()
  1346. context['HIERADATA_OVERRIDE'] = hiera_entry
  1347. context['UNDERCLOUD_NAMESERVERS'] = json.dumps(
  1348. CONF.undercloud_nameservers)
  1349. partials = {'net_config': net_config_json}
  1350. renderer = pystache.Renderer(partials=partials)
  1351. template = _get_template_path('config.json.template')
  1352. with open(template) as f:
  1353. config_json = renderer.render(f.read(), context)
  1354. config_json = config_json.replace('&quot;', '"')
  1355. cfn_path = '/var/lib/heat-cfntools/cfn-init-data'
  1356. tmp_json = tempfile.mkstemp()[1]
  1357. with open(tmp_json, 'w') as f:
  1358. print(config_json, file=f)
  1359. if not os.path.exists(os.path.dirname(cfn_path)):
  1360. _run_command(['sudo', 'mkdir', '-p', os.path.dirname(cfn_path)])
  1361. _run_command(['sudo', 'mv', tmp_json, cfn_path])
  1362. _run_command(['sudo', 'chmod', '0644', cfn_path])
  1363. def _run_instack(instack_env):
  1364. args = ['sudo', '-E', 'instack', '-p', instack_env['ELEMENTS_PATH'],
  1365. '-j', instack_env['JSONFILE'],
  1366. ]
  1367. LOG.info('Running instack')
  1368. _run_live_command(args, instack_env, 'instack')
  1369. LOG.info('Instack completed successfully')
  1370. def _run_yum_clean_all(instack_env):
  1371. args = ['sudo', 'yum', 'clean', 'all']
  1372. LOG.info('Running yum clean all')
  1373. _run_live_command(args, instack_env, 'yum-clean-all')
  1374. LOG.info('yum-clean-all completed successfully')
  1375. def _run_yum_update(instack_env):
  1376. args = ['sudo', 'yum', 'update', '-y']
  1377. LOG.info('Running yum update')
  1378. _run_live_command(args, instack_env, 'yum-update')
  1379. LOG.info('yum-update completed successfully')
  1380. def _get_ovs_interfaces():
  1381. interfaces = glob.glob('/etc/sysconfig/network-scripts/ifcfg-*')
  1382. pattern = "OVSIntPort"
  1383. ovs_interfaces = []
  1384. for interface in interfaces:
  1385. with open(interface, "r") as text:
  1386. for line in text:
  1387. if re.findall(pattern, line):
  1388. # FIXME (holser). It might be better to get interface from
  1389. # DEVICE rather than name of file.
  1390. ovs_interfaces.append(interface.split('-')[-1])
  1391. return ovs_interfaces
  1392. def _run_restore_ovs_interfaces(interfaces):
  1393. for interface in interfaces:
  1394. LOG.info('Running restart OVS interface %s', interface)
  1395. _run_command(['sudo', 'ifup', interface])
  1396. LOG.info('Restart OVS interface %s completed successfully', interface)
  1397. def _run_orc(instack_env):
  1398. args = ['sudo', 'os-refresh-config']
  1399. LOG.info('Running os-refresh-config')
  1400. _run_live_command(args, instack_env, 'os-refresh-config')
  1401. LOG.info('os-refresh-config completed successfully')
  1402. def _extract_from_stackrc(name):
  1403. """Extract authentication values from stackrc
  1404. :param name: The value to be extracted. For example: OS_USERNAME or
  1405. OS_AUTH_URL.
  1406. """
  1407. with open(os.path.expanduser('~/stackrc')) as f:
  1408. for line in f:
  1409. if name in line:
  1410. parts = line.split('=')
  1411. return parts[1].rstrip()
  1412. def _ensure_user_identity(id_path):
  1413. if not os.path.isfile(id_path):
  1414. args = ['ssh-keygen', '-t', 'rsa', '-N', '', '-f', id_path]
  1415. _run_command(args)
  1416. LOG.info('Generated new ssh key in ~/.ssh/id_rsa')
  1417. def _get_auth_values():
  1418. """Get auth values from stackrc
  1419. Returns the user, password, project and auth_url as read from stackrc,
  1420. in that order as a tuple.
  1421. """
  1422. user = _extract_from_stackrc('OS_USERNAME')
  1423. password = _run_command(['sudo', 'hiera', 'admin_password']).rstrip()
  1424. project = _extract_from_stackrc('OS_PROJECT_NAME')
  1425. auth_url = _extract_from_stackrc('OS_AUTH_URL')
  1426. return user, password, project, auth_url
  1427. def _configure_ssh_keys(nova):
  1428. """Configure default ssh keypair in Nova
  1429. Generates a new ssh key for the current user if one does not already
  1430. exist, then uploads that to Nova as the 'default' keypair.
  1431. """
  1432. id_path = os.path.expanduser('~/.ssh/id_rsa')
  1433. _ensure_user_identity(id_path)
  1434. try:
  1435. nova.keypairs.get('default')
  1436. except exceptions.NotFound:
  1437. with open(id_path + '.pub') as pubkey:
  1438. nova.keypairs.create('default', pubkey.read().rstrip())
  1439. def _ensure_ssh_selinux_permission():
  1440. ssh_path = os.path.expanduser('~/.ssh')
  1441. try:
  1442. enforcing = _run_command(['getenforce'])
  1443. if os.path.isdir(ssh_path):
  1444. if 'Enforcing' in enforcing:
  1445. file_perms = _run_command(
  1446. ['find', ssh_path, '-exec', 'ls', '-lZ', '{}', ';'])
  1447. wrong_perm = False
  1448. for line in file_perms.splitlines():
  1449. if 'ssh_home_t' not in line:
  1450. wrong_perm = True
  1451. break
  1452. if wrong_perm:
  1453. cmd = ['semanage',
  1454. 'fcontext', '-a', '-t', 'ssh_home_t',
  1455. "{}(/.*)?".format(ssh_path)]
  1456. _run_command(cmd)
  1457. _run_command(['restorecon', '-R', ssh_path])
  1458. except OSError as e:
  1459. if e.errno == os.errno.ENOENT:
  1460. LOG.debug("Not a SeLinux platform")
  1461. else:
  1462. raise
  1463. def _delete_default_flavors(nova):
  1464. """Delete the default flavors from Nova
  1465. The m1.tiny, m1.small, etc. flavors are not useful on an undercloud.
  1466. """
  1467. to_delete = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge']
  1468. for f in nova.flavors.list():
  1469. if f.name in to_delete:
  1470. nova.flavors.delete(f.id)
  1471. def _ensure_flavor(nova, existing, name, profile=None):
  1472. rc_key_name = 'resources:CUSTOM_%s' % DEFAULT_NODE_RESOURCE_CLASS.upper()
  1473. keys = {
  1474. # First, make it request the default resource class
  1475. rc_key_name: "1",
  1476. # Then disable scheduling based on everything else
  1477. "resources:DISK_GB": "0",
  1478. "resources:MEMORY_MB": "0",
  1479. "resources:VCPU": "0"
  1480. }
  1481. if existing is None:
  1482. flavor = nova.flavors.create(name, 4096, 1, 40)
  1483. keys['capabilities:boot_option'] = 'local'
  1484. if profile is not None:
  1485. keys['capabilities:profile'] = profile
  1486. flavor.set_keys(keys)
  1487. message = 'Created flavor "%s" with profile "%s"'
  1488. LOG.info(message, name, profile)
  1489. else:
  1490. LOG.info('Not creating flavor "%s" because it already exists.', name)
  1491. # NOTE(dtantsur): it is critical to ensure that the flavors request
  1492. # the correct resource class, otherwise scheduling will fail.
  1493. old_keys = existing.get_keys()
  1494. for key in old_keys:
  1495. if key.startswith('resources:CUSTOM_') and key != rc_key_name:
  1496. LOG.warning('Not updating flavor %s, as it already has a '
  1497. 'custom resource class %s. Make sure you have '
  1498. 'enough nodes with this resource class.',
  1499. existing.name, key)
  1500. return
  1501. # Keep existing values
  1502. keys.update(old_keys)
  1503. existing.set_keys(keys)
  1504. LOG.info('Flavor %s updated to use custom resource class %s',
  1505. name, DEFAULT_NODE_RESOURCE_CLASS)
  1506. def _ensure_node_resource_classes(ironic):
  1507. for node in ironic.node.list(limit=0, fields=['uuid', 'resource_class']):
  1508. if node.resource_class:
  1509. if node.resource_class != DEFAULT_NODE_RESOURCE_CLASS:
  1510. LOG.warning('Node %s is using a resource class %s instead '
  1511. 'of the default %s. Make sure you use the correct '
  1512. 'flavor for it.', node.uuid, node.resource_class,
  1513. DEFAULT_NODE_RESOURCE_CLASS)
  1514. continue
  1515. ironic.node.update(node.uuid,
  1516. [{'path': '/resource_class', 'op': 'add',
  1517. 'value': DEFAULT_NODE_RESOURCE_CLASS}])
  1518. LOG.info('Node %s resource class was set to %s',
  1519. node.uuid, DEFAULT_NODE_RESOURCE_CLASS)
  1520. def _copy_stackrc():
  1521. args = ['sudo', 'cp', '/root/stackrc', os.path.expanduser('~')]
  1522. try:
  1523. _run_command(args, name='Copy stackrc')
  1524. except subprocess.CalledProcessError:
  1525. LOG.info("/root/stackrc not found, this is OK on initial deploy")
  1526. args = ['sudo', 'chown', getpass.getuser() + ':',
  1527. os.path.expanduser('~/stackrc')]
  1528. _run_command(args, name='Chown stackrc')
  1529. def _clean_os_refresh_config():
  1530. orc_dirs = glob.glob('/usr/libexec/os-refresh-config/*')
  1531. args = ['sudo', 'rm', '-rf'] + orc_dirs
  1532. _run_command(args, name='Clean os-refresh-config')
  1533. def _clean_os_collect_config():
  1534. occ_dir = '/var/lib/os-collect-config'
  1535. args = ['sudo', 'rm', '-fr', occ_dir]
  1536. _run_command(args, name='Clean os-collect-config')
  1537. def _create_mistral_config_environment(instack_env, mistral):
  1538. # Store all the required passwords from the Undercloud
  1539. # in a Mistral environment so they can be accessed
  1540. # by the Mistral actions.
  1541. config_data = {
  1542. 'undercloud_ceilometer_snmpd_password':
  1543. instack_env['UNDERCLOUD_CEILOMETER_SNMPD_PASSWORD'],
  1544. 'undercloud_db_password':
  1545. instack_env['UNDERCLOUD_DB_PASSWORD']
  1546. }
  1547. env_name = 'tripleo.undercloud-config'
  1548. try:
  1549. env_data = mistral.environments.get(env_name).variables
  1550. except (ks_exceptions.NotFound, mistralclient_exc.APIException):
  1551. # If the environment is not created, we need to
  1552. # create it with the information in config_data
  1553. mistral.environments.create(
  1554. name=env_name,
  1555. description='Undercloud configuration parameters',
  1556. variables=json.dumps(config_data, sort_keys=True)
  1557. )
  1558. return
  1559. # If we are upgrading from an environment without
  1560. # variables defined in config_data, we need to update
  1561. # the environment variables.
  1562. for var, value in iter(config_data.items()):
  1563. if var in env_data:
  1564. if env_data[var] != config_data[var]:
  1565. # Value in config_data is different
  1566. # need to update
  1567. env_data[var] = value
  1568. else:
  1569. # The value in config_data
  1570. # is new, we need to add it
  1571. env_data[var] = value
  1572. # Here we update the current environment
  1573. # with the variables updated
  1574. mistral.environments.update(
  1575. name=env_name,
  1576. description='Undercloud configuration parameters',
  1577. variables=json.dumps(env_data, sort_keys=True)
  1578. )
  1579. def _wait_for_mistral_execution(timeout_at, mistral, execution, message='',
  1580. fail_on_error=False):
  1581. while time.time() < timeout_at:
  1582. exe = mistral.executions.get(execution.id)
  1583. if exe.state == "RUNNING":
  1584. time.sleep(5)
  1585. continue
  1586. if exe.state == "SUCCESS":
  1587. return
  1588. else:
  1589. exe_out = ""
  1590. exe_created_at = time.strptime(exe.created_at,
  1591. "%Y-%m-%d %H:%M:%S")
  1592. ae_list = mistral.action_executions.list()
  1593. for ae in ae_list:
  1594. if ((ae.task_name == "run_validation") and
  1595. (ae.state == "ERROR") and
  1596. (time.strptime(ae.created_at, "%Y-%m-%d %H:%M:%S") >
  1597. exe_created_at)):
  1598. task = mistral.tasks.get(ae.task_execution_id)
  1599. task_res = task.to_dict().get('result')
  1600. exe_out = "%s %s" % (exe_out, task_res)
  1601. error_message = "ERROR %s %s Mistral execution ID: %s" % (
  1602. message, exe_out, execution.id)
  1603. LOG.error(error_message)
  1604. if fail_on_error:
  1605. raise RuntimeError(error_message)
  1606. return
  1607. else:
  1608. exe = mistral.executions.get(execution.id)
  1609. error_message = ("TIMEOUT waiting for execution %s to finish. "
  1610. "State: %s" % (exe.id, exe.state))
  1611. LOG.error(error_message)
  1612. if fail_on_error:
  1613. raise RuntimeError(error_message)
  1614. def _get_session():
  1615. user, password, project, auth_url = _get_auth_values()
  1616. auth_kwargs = {
  1617. 'auth_url': auth_url,
  1618. 'username': user,
  1619. 'password': password,
  1620. 'project_name': project,
  1621. 'project_domain_name': 'Default',
  1622. 'user_domain_name': 'Default',
  1623. }
  1624. auth_plugin = ks_auth.Password(**auth_kwargs)
  1625. return session.Session(auth=auth_plugin)
  1626. def _run_validation_groups(groups=[], mistral_url='', timeout=540,
  1627. fail_on_error=False):
  1628. sess = _get_session()
  1629. mistral = mistralclient.client(mistral_url=mistral_url, session=sess)
  1630. LOG.info('Starting and waiting for validation groups %s ', groups)
  1631. execution = mistral.executions.create(
  1632. 'tripleo.validations.v1.run_groups',
  1633. workflow_input={'group_names': groups}
  1634. )
  1635. fail_message = ("error running the validation groups %s " % groups)
  1636. timeout_at = time.time() + timeout
  1637. _wait_for_mistral_execution(timeout_at, mistral, execution, fail_message,
  1638. fail_on_error)
  1639. def _create_default_plan(mistral, plans, timeout=360):
  1640. plan_name = 'overcloud'
  1641. queue_name = str(uuid.uuid4())
  1642. if plan_name in plans:
  1643. LOG.info('Not creating default plan "%s" because it already exists.',
  1644. plan_name)
  1645. return
  1646. execution = mistral.executions.create(
  1647. 'tripleo.plan_management.v1.create_deployment_plan',
  1648. workflow_input={
  1649. 'container': plan_name,
  1650. 'queue_name': queue_name,
  1651. 'use_default_templates': True,
  1652. }
  1653. )
  1654. timeout_at = time.time() + timeout
  1655. fail_message = ("error creating the default Deployment Plan %s "
  1656. "Check the create_default_deployment_plan execution "
  1657. "in Mistral with openstack workflow execution list " %
  1658. plan_name)
  1659. _wait_for_mistral_execution(timeout_at, mistral, execution, fail_message,
  1660. fail_on_error=True)
  1661. def _prepare_ssh_environment(mistral):
  1662. mistral.executions.create('tripleo.validations.v1.copy_ssh_key')
  1663. def _create_logging_cron(mistral):
  1664. LOG.info('Configuring an hourly cron trigger for tripleo-ui logging')
  1665. mistral.cron_triggers.create(
  1666. 'publish-ui-logs-hourly',
  1667. 'tripleo.plan_management.v1.publish_ui_logs_to_swift',
  1668. pattern='0 * * * *'
  1669. )
  1670. def _post_config_mistral(instack_env, mistral, swift):
  1671. LOG.info('Configuring Mistral workbooks')
  1672. for workbook in [w for w in mistral.workbooks.list()
  1673. if w.name.startswith('tripleo')]:
  1674. mistral.workbooks.delete(workbook.name)
  1675. managed_tag = 'tripleo-common-managed'
  1676. all_workflows = mistral.workflows.list()
  1677. workflow_tags = set()
  1678. for workflow in all_workflows:
  1679. workflow_tags.update(workflow.tags)
  1680. # If at least one workflow is tagged, then we should only delete those.
  1681. # Otherwise we should revert to the previous behaviour - this is required
  1682. # for the initial upgrade.
  1683. # TODO(d0ugal): From Q onwards we should only ever delete workflows with
  1684. # the tripleo-common tag.
  1685. if 'tripleo-common-managed' in workflow_tags:
  1686. workflows_delete = [w.name for w in all_workflows
  1687. if managed_tag in w.tags]
  1688. else:
  1689. workflows_delete = [w.name for w in all_workflows
  1690. if w.name.startswith('tripleo')]
  1691. # in order to delete workflows they should have no triggers associated
  1692. for trigger in [t for t in mistral.cron_triggers.list()
  1693. if t.workflow_name in workflows_delete]:
  1694. mistral.cron_triggers.delete(trigger.name)
  1695. for workflow_name in workflows_delete:
  1696. mistral.workflows.delete(workflow_name)
  1697. for workbook in [f for f in os.listdir(PATHS.WORKBOOK_PATH)
  1698. if os.path.isfile(os.path.join(PATHS.WORKBOOK_PATH, f))]:
  1699. mistral.workbooks.create(os.path.join(PATHS.WORKBOOK_PATH, workbook))
  1700. LOG.info('Mistral workbooks configured successfully')
  1701. plans = [container["name"] for container in swift.get_account()[1]]
  1702. _create_mistral_config_environment(instack_env, mistral)
  1703. _create_default_plan(mistral, plans)
  1704. _create_logging_cron(mistral)
  1705. if CONF.enable_validations:
  1706. _prepare_ssh_environment(mistral)
  1707. def _migrate_to_convergence(heat):
  1708. """Migrate all active stacks to use the convergence engine
  1709. This appears to be a noop if the stack has already been migrated, so it
  1710. should be safe to run unconditionally.
  1711. :param heat: A heat client instance
  1712. """
  1713. for stack in heat.stacks.list():
  1714. LOG.info('Migrating stack "%s" to convergence engine', stack.id)
  1715. args = ['sudo', '-E', 'heat-manage', 'migrate_convergence_1', stack.id]
  1716. _run_command(args, name='heat-manage')
  1717. LOG.info('Finished migrating stack "%s"', stack.id)
  1718. def _post_config(instack_env, upgrade):
  1719. _copy_stackrc()
  1720. user, password, project, auth_url = _get_auth_values()
  1721. sess = _get_session()
  1722. nova = novaclient.Client(2, session=sess)
  1723. ironic = ir_client.get_client(1, session=sess,
  1724. os_ironic_api_version='1.21')
  1725. sdk = os_client_config.make_sdk(auth_url=auth_url,
  1726. project_name=project,
  1727. username=user,
  1728. password=password,
  1729. project_domain_name='Default',
  1730. user_domain_name='Default')
  1731. network = _ensure_neutron_network(sdk)
  1732. _config_neutron_segments_and_subnets(sdk, network.id)
  1733. _configure_ssh_keys(nova)
  1734. _ensure_ssh_selinux_permission()
  1735. _delete_default_flavors(nova)
  1736. _ensure_node_resource_classes(ironic)
  1737. all_flavors = {f.name: f for f in nova.flavors.list()}
  1738. for name, profile in [('baremetal', None),
  1739. ('control', 'control'),
  1740. ('compute', 'compute'),
  1741. ('ceph-storage', 'ceph-storage'),
  1742. ('block-storage', 'block-storage'),
  1743. ('swift-storage', 'swift-storage')]:
  1744. _ensure_flavor(nova, all_flavors.get(name), name, profile)
  1745. mistral_url = instack_env['UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC']
  1746. mistral = mistralclient.client(
  1747. mistral_url=mistral_url,
  1748. session=sess)
  1749. swift = swiftclient.Connection(
  1750. authurl=auth_url,
  1751. session=sess
  1752. )
  1753. _post_config_mistral(instack_env, mistral, swift)
  1754. _member_role_exists()
  1755. # NOTE(bnemec): We are turning on the convergence engine in Queens, so we
  1756. # need to migrate all existing stacks on upgrade. This functionality can
  1757. # be removed in Rocky as all stacks should have been migrated by then.
  1758. if upgrade:
  1759. heat = os_client_config.make_client('orchestration',
  1760. auth_url=auth_url,
  1761. username=user,
  1762. password=password,
  1763. project_name=project,
  1764. project_domain_name='Default',
  1765. user_domain_name='Default')
  1766. _migrate_to_convergence(heat)
  1767. def _ensure_neutron_network(sdk):
  1768. try:
  1769. network = list(sdk.network.networks(name=PHYSICAL_NETWORK))
  1770. if not network:
  1771. network = sdk.network.create_network(
  1772. name=PHYSICAL_NETWORK, provider_network_type='flat',
  1773. provider_physical_network=PHYSICAL_NETWORK)
  1774. LOG.info("Network created %s", network)
  1775. # (hjensas) Delete the default segment, we create a new segment
  1776. # per subnet later.
  1777. segments = list(sdk.network.segments(network_id=network.id))
  1778. sdk.network.delete_segment(segments[0].id)
  1779. LOG.info("Default segment on network %s deleted.", network.name)
  1780. else:
  1781. LOG.info("Not creating %s network, because it already exists.",
  1782. PHYSICAL_NETWORK)
  1783. network = network[0]
  1784. except Exception as e:
  1785. LOG.info("Network create/update failed %s", e)
  1786. raise
  1787. return network
  1788. def _neutron_subnet_create(sdk, network_id, cidr, gateway, host_routes,
  1789. allocation_pool, name, segment_id):
  1790. try:
  1791. # DHCP_START contains a ":" then assume a IPv6 subnet
  1792. if ':' in allocation_pool[0]['start']:
  1793. host_routes = ''
  1794. subnet = sdk.network.create_subnet(
  1795. name=name,
  1796. cidr=cidr,
  1797. gateway_ip=gateway,
  1798. host_routes=host_routes,
  1799. enable_dhcp=True,
  1800. ip_version='6',
  1801. ipv6_address_mode='dhcpv6-stateless',
  1802. ipv6_ra_mode='dhcpv6-stateless',
  1803. allocation_pools=allocation_pool,
  1804. network_id=network_id,
  1805. segment_id=segment_id)
  1806. else:
  1807. subnet = sdk.network.create_subnet(
  1808. name=name,
  1809. cidr=cidr,
  1810. gateway_ip=gateway,
  1811. host_routes=host_routes,
  1812. enable_dhcp=True,
  1813. ip_version='4',
  1814. allocation_pools=allocation_pool,
  1815. network_id=network_id,
  1816. segment_id=segment_id)
  1817. LOG.info("Subnet created %s", subnet)
  1818. except Exception as e:
  1819. LOG.error("Create subnet %s failed: %s", name, e)
  1820. raise
  1821. return subnet
  1822. def _neutron_subnet_update(sdk, subnet_id, gateway, host_routes,
  1823. allocation_pool, name):
  1824. try:
  1825. # DHCP_START contains a ":" then assume a IPv6 subnet
  1826. if ':' in allocation_pool[0]['start']:
  1827. host_routes = ''
  1828. subnet = sdk.network.update_subnet(
  1829. subnet_id,
  1830. name=name,
  1831. gateway_ip=gateway,
  1832. host_routes=host_routes,
  1833. allocation_pools=allocation_pool)
  1834. LOG.info("Subnet updated %s", subnet)
  1835. except Exception as e:
  1836. LOG.error("Update subnet %s failed: %s", name, e)
  1837. raise
  1838. def _neutron_segment_create(sdk, name, network_id, phynet):
  1839. try:
  1840. segment = sdk.network.create_segment(
  1841. name=name,
  1842. network_id=network_id,
  1843. physical_network=phynet,
  1844. network_type='flat')
  1845. LOG.info("Neutron Segment created %s", segment)
  1846. except Exception as e:
  1847. LOG.info("Neutron Segment %s create failed %s", name, e)
  1848. raise
  1849. return segment
  1850. def _neutron_segment_update(sdk, segment_id, name):
  1851. try:
  1852. segment = sdk.network.update_segment(segment_id, name=name)
  1853. LOG.info("Neutron Segment updated %s", segment)
  1854. except Exception as e:
  1855. LOG.info("Neutron Segment %s update failed %s", name, e)
  1856. raise
  1857. def _ensure_neutron_router(sdk, name, subnet_id):
  1858. try:
  1859. router = sdk.network.create_router(name=name, admin_state_up='true')
  1860. sdk.network.add_interface_to_router(router.id, subnet_id=subnet_id)
  1861. except Exception as e:
  1862. LOG.error("Create router for subnet %s failed: %s", name, e)
  1863. raise
  1864. def _get_subnet(sdk, cidr, network_id):
  1865. try:
  1866. subnet = list(sdk.network.subnets(cidr=cidr, network_id=network_id))
  1867. except Exception:
  1868. raise
  1869. return False if not subnet else subnet[0]
  1870. def _get_segment(sdk, phy, network_id):
  1871. try:
  1872. segment = list(sdk.network.segments(physical_network=phy,
  1873. network_id=network_id))
  1874. except Exception:
  1875. raise
  1876. return False if not segment else segment[0]
  1877. def _config_neutron_segments_and_subnets(sdk, ctlplane_id):
  1878. s = CONF.get(CONF.local_subnet)
  1879. subnet = _get_subnet(sdk, s.cidr, ctlplane_id)
  1880. if subnet and not subnet.segment_id:
  1881. LOG.warn("Local subnet %s already exists and is not associated with a "
  1882. "network segment. Any additional subnets will be ignored.",
  1883. CONF.local_subnet)
  1884. host_routes = [{'destination': '169.254.169.254/32',
  1885. 'nexthop': str(netaddr.IPNetwork(CONF.local_ip).ip)}]
  1886. allocation_pool = [{'start': s.dhcp_start, 'end': s.dhcp_end}]
  1887. _neutron_subnet_update(sdk, subnet.id, s.gateway, host_routes,
  1888. allocation_pool, CONF.local_subnet)
  1889. # If the subnet is IPv6 we need to start a router so that router
  1890. # advertisments are sent out for stateless IP addressing to work.
  1891. if ':' in s.dhcp_start:
  1892. _ensure_neutron_router(sdk, CONF.local_subnet, subnet.id)
  1893. else:
  1894. for name in CONF.subnets:
  1895. s = CONF.get(name)
  1896. phynet = name
  1897. if name == CONF.local_subnet:
  1898. phynet = PHYSICAL_NETWORK
  1899. metadata_nexthop = s.gateway
  1900. if str(netaddr.IPNetwork(CONF.local_ip).ip) in s.cidr:
  1901. metadata_nexthop = str(netaddr.IPNetwork(CONF.local_ip).ip)
  1902. host_routes = [{'destination': '169.254.169.254/32',
  1903. 'nexthop': metadata_nexthop}]
  1904. allocation_pool = [{'start': s.dhcp_start, 'end': s.dhcp_end}]
  1905. subnet = _get_subnet(sdk, s.cidr, ctlplane_id)
  1906. segment = _get_segment(sdk, phynet, ctlplane_id)
  1907. if name == CONF.local_subnet:
  1908. if ((subnet and not segment) or
  1909. (subnet and segment and subnet.segment_id != segment.id)):
  1910. LOG.error(
  1911. 'The cidr: %s of the local subnet is already used in '
  1912. 'subnet: %s associated with segment_id: %s.' %
  1913. (s.cidr, subnet.id, subnet.segment_id))
  1914. raise RuntimeError('Local subnet cidr already associated.')
  1915. if subnet:
  1916. _neutron_segment_update(sdk, subnet.segment_id, name)
  1917. _neutron_subnet_update(sdk, subnet.id, s.gateway, host_routes,
  1918. allocation_pool, name)
  1919. else:
  1920. if segment:
  1921. _neutron_segment_update(sdk, segment.id, name)
  1922. else:
  1923. segment = _neutron_segment_create(sdk, name,
  1924. ctlplane_id, phynet)
  1925. if CONF.enable_routed_networks:
  1926. subnet = _neutron_subnet_create(sdk, ctlplane_id, s.cidr,
  1927. s.gateway, host_routes,
  1928. allocation_pool, name,
  1929. segment.id)
  1930. elif name == CONF.local_subnet:
  1931. # Create subnet with segment_id: None if routed networks
  1932. # is not enabled.
  1933. # TODO(hjensas): Deprecate option and remove this once
  1934. # tripleo-ui can support managing baremetal port
  1935. # attributes.
  1936. subnet = _neutron_subnet_create(sdk, ctlplane_id, s.cidr,
  1937. s.gateway, host_routes,
  1938. allocation_pool, name,
  1939. None)
  1940. # If the subnet is IPv6 we need to start a router so that router
  1941. # advertisments are sent out for stateless IP addressing to work.
  1942. if ':' in s.dhcp_start:
  1943. _ensure_neutron_router(sdk, name, subnet.id)
  1944. def _handle_upgrade_fact(upgrade=False):
  1945. """Create an upgrade fact for use in puppet
  1946. Since we don't run different puppets depending on if it's an upgrade or
  1947. not, we need to be able to pass a flag into puppet to let it know if
  1948. we're doing an upgrade. This is helpful when trying to handle state
  1949. transitions from an already installed undercloud. This function creates
  1950. a static fact named undercloud_upgrade only after the install has occurred.
  1951. When invoked with upgrade=True, the $::undercloud_upgrade fact should
  1952. be set to true.
  1953. :param upgrade: Boolean indicating if this is an upgrade action or not
  1954. """
  1955. fact_string = 'undercloud_upgrade={}'.format(upgrade)
  1956. fact_path = '/etc/facter/facts.d/undercloud_upgrade.txt'
  1957. if not os.path.exists(os.path.dirname(fact_path)) and upgrade:
  1958. _run_command(['sudo', 'mkdir', '-p', os.path.dirname(fact_path)])
  1959. # We only need to ensure the fact is correct when we've already installed
  1960. # the undercloud.
  1961. if os.path.exists(os.path.dirname(fact_path)):
  1962. tmp_fact = tempfile.mkstemp()[1]
  1963. with open(tmp_fact, 'w') as f:
  1964. f.write(fact_string.lower())
  1965. _run_command(['sudo', 'mv', tmp_fact, fact_path])
  1966. _run_command(['sudo', 'chmod', '0644', fact_path])
  1967. def _die_tuskar_die():
  1968. """Remove tuskar* packages
  1969. Make sure to remove tuskar https://bugs.launchpad.net/tripleo/+bug/1691744
  1970. # openstack-[tuskar, tuskar-ui, tuskar-ui-extras] & python-tuskarclient
  1971. """
  1972. try:
  1973. _run_command(['sudo', 'yum', 'remove', '-y', '*tuskar*'])
  1974. except subprocess.CalledProcessError as e:
  1975. LOG.error('Error with tuskar removal task %s - continuing', e.output)
  1976. def install(instack_root, upgrade=False):
  1977. """Install the undercloud
  1978. :param instack_root: The path containing the instack-undercloud elements
  1979. and json files.
  1980. """
  1981. undercloud_operation = "upgrade" if upgrade else "install"
  1982. try:
  1983. _configure_logging(DEFAULT_LOG_LEVEL, PATHS.LOG_FILE)
  1984. LOG.info('Logging to %s', PATHS.LOG_FILE)
  1985. _load_config()
  1986. _load_subnets_config_groups()
  1987. # Since 'subnets' parameter in opts is used to dynamically
  1988. # add per network groups, re-load config.
  1989. _load_config()
  1990. _clean_os_refresh_config()
  1991. _clean_os_collect_config()
  1992. _validate_configuration()
  1993. instack_env = _generate_environment(instack_root)
  1994. _generate_init_data(instack_env)
  1995. ovs_interfaces = _get_ovs_interfaces()
  1996. if upgrade:
  1997. # Even if we backport https://review.openstack.org/#/c/457478/
  1998. # into stable branches of puppet-ironic, we still need a way
  1999. # to handle existing deployments.
  2000. # This task will fix ironic-dbsync.log ownership on existing
  2001. # deployments during an upgrade. It can be removed after we
  2002. # release Pike.
  2003. _run_command(['sudo', '/usr/bin/chown', 'ironic:ironic',
  2004. '/var/log/ironic/ironic-dbsync.log'])
  2005. _die_tuskar_die()
  2006. if CONF.undercloud_update_packages:
  2007. _run_yum_clean_all(instack_env)
  2008. if ovs_interfaces:
  2009. _run_restore_ovs_interfaces(ovs_interfaces)
  2010. _run_yum_update(instack_env)
  2011. _handle_upgrade_fact(upgrade)
  2012. _run_instack(instack_env)
  2013. _run_orc(instack_env)
  2014. # FIXME (holser). The RC of issue is in OVS flow restore. Once
  2015. # 'systemctl reload openvswitch' is fixed ovs port restoration can be
  2016. # removed.
  2017. if ovs_interfaces:
  2018. _run_restore_ovs_interfaces(ovs_interfaces)
  2019. _post_config(instack_env, upgrade)
  2020. _run_command(['sudo', 'rm', '-f', '/tmp/svc-map-services'], None, 'rm')
  2021. if upgrade and CONF.enable_validations: # Run post-upgrade validations
  2022. mistral_url = instack_env['UNDERCLOUD_ENDPOINT_MISTRAL_PUBLIC']
  2023. _run_validation_groups(["post-upgrade"], mistral_url)
  2024. except Exception as e:
  2025. LOG.debug("An exception occurred", exc_info=True)
  2026. LOG.error(FAILURE_MESSAGE,
  2027. {'undercloud_operation': undercloud_operation,
  2028. 'exception': six.text_type(e),
  2029. 'log_file': PATHS.LOG_FILE})
  2030. if CONF.undercloud_debug:
  2031. raise
  2032. sys.exit(1)
  2033. else:
  2034. LOG.info(COMPLETION_MESSAGE,
  2035. {'undercloud_operation': undercloud_operation,
  2036. 'password_path': PATHS.PASSWORD_PATH,
  2037. 'stackrc_path': os.path.expanduser('~/stackrc')})
  2038. def pre_upgrade():
  2039. _configure_logging(DEFAULT_LOG_LEVEL, PATHS.LOG_FILE)
  2040. args = ['sudo', 'systemctl', 'stop', 'openstack-*', 'neutron-*',
  2041. 'openvswitch', 'httpd']
  2042. LOG.info('Stopping OpenStack and related services')
  2043. _run_live_command(args, name='systemctl stop')
  2044. LOG.info('Services stopped successfully')
  2045. # Ensure nova data migrations are complete before upgrading packages
  2046. LOG.info('Running Nova online data migration')
  2047. _run_command(['sudo', '-E', '/usr/bin/nova-manage', 'db',
  2048. 'online_data_migrations'])
  2049. LOG.info('Nova online data migration completed')
  2050. args = ['sudo', 'yum', 'install', '-y', 'ansible-pacemaker']
  2051. LOG.info('Installing Ansible Pacemaker module')
  2052. _run_live_command(args, name='install ansible')
  2053. LOG.info('Ansible pacemaker install completed successfully')
  2054. args = ['sudo', 'yum', 'update', '-y']
  2055. LOG.info('Updating full system')
  2056. _run_live_command(args, name='yum update')
  2057. LOG.info('Update completed successfully')