diff --git a/distributedcloud/dccommon/consts.py b/distributedcloud/dccommon/consts.py index 85d9f52b9..fde8aa10f 100644 --- a/distributedcloud/dccommon/consts.py +++ b/distributedcloud/dccommon/consts.py @@ -210,3 +210,9 @@ ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK = \ '/usr/share/ansible/stx-ansible/playbooks/install.yml' ENROLL_INIT_SEED_ISO_NAME = 'seed.iso' + +ANSIBLE_SUBCLOUD_ENROLL_PLAYBOOK = \ + "/usr/share/ansible/stx-ansible/playbooks/enroll_subcloud.yml" + +# Sysinv client default timeout +SYSINV_CLIENT_REST_DEFAULT_TIMEOUT = 600 diff --git a/distributedcloud/dccommon/drivers/openstack/sysinv_v1.py b/distributedcloud/dccommon/drivers/openstack/sysinv_v1.py index d5da9ebc6..617828583 100644 --- a/distributedcloud/dccommon/drivers/openstack/sysinv_v1.py +++ b/distributedcloud/dccommon/drivers/openstack/sysinv_v1.py @@ -100,8 +100,6 @@ KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA_FAILED = 'updating-host-trust-new-ca-failed # by dcmanager upgrade orchestration for the load import operations. HOST_FS_NAME_SCRATCH = 'scratch' -SYSINV_CLIENT_REST_DEFAULT_TIMEOUT = 600 - def make_sysinv_patch(update_dict): patch = [] @@ -122,7 +120,7 @@ class SysinvClient(base.DriverBase): """Sysinv V1 driver.""" def __init__(self, region, session, - timeout=SYSINV_CLIENT_REST_DEFAULT_TIMEOUT, + timeout=consts.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT, endpoint_type=consts.KS_ENDPOINT_ADMIN, endpoint=None): try: diff --git a/distributedcloud/dccommon/subcloud_enrollment.py b/distributedcloud/dccommon/subcloud_enrollment.py index 07852ec4d..7c78fbf4d 100644 --- a/distributedcloud/dccommon/subcloud_enrollment.py +++ b/distributedcloud/dccommon/subcloud_enrollment.py @@ -69,12 +69,12 @@ class SubcloudEnrollmentInit(object): network_cloud_config = [ { 'type': 'physical', - 'name': iso_values['bootstrap_interface'], + 'name': iso_values['install_values']['bootstrap_interface'], 'subnets': [ { 'type': 'static', 'address': iso_values['external_oam_floating_address'], - 'netmask': iso_values['network_mask'], + 'netmask': iso_values['install_values']['network_mask'], 'gateway': iso_values['external_oam_gateway_address'], } ] diff --git a/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py b/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py index 8950ef836..22294fb54 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/api/controllers/v1/phased_subcloud_deploy.py @@ -58,6 +58,7 @@ SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS = ( SUBCLOUD_ENROLL_GET_FILE_CONTENTS = ( consts.BOOTSTRAP_VALUES, + consts.INSTALL_VALUES ) SUBCLOUD_CONFIG_GET_FILE_CONTENTS = ( @@ -102,7 +103,7 @@ VALID_STATES_FOR_DEPLOY_ENROLL = ( consts.DEPLOY_STATE_ENROLL_FAILED, consts.DEPLOY_STATE_ENROLLED, consts.DEPLOY_STATE_PRE_ENROLL, - consts.DEPLOY_STATE_ENROLLING, + consts.DEPLOY_STATE_INIT_ENROLL_FAILED ) FILES_FOR_RESUME_INSTALL = \ @@ -531,21 +532,28 @@ class PhasedSubcloudDeployController(object): payload = psd_common.get_request_data( request, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS) - psd_common.validate_enroll_parameter(payload, request) - # Try to load the existing override values override_file = psd_common.get_config_file_path(subcloud.name) if os.path.exists(override_file): if not has_bootstrap_values: psd_common.populate_payload_with_pre_existing_data( - payload, subcloud, SUBCLOUD_BOOTSTRAP_GET_FILE_CONTENTS) + payload, subcloud, SUBCLOUD_ENROLL_GET_FILE_CONTENTS) elif not has_bootstrap_values: msg = ("Required bootstrap-values file was not provided and it was " f"not previously available at {override_file}") pecan.abort(400, _(msg)) + psd_common.validate_enroll_parameter(payload) + payload['software_version'] = subcloud.software_version + # Use bootstrap file verification + psd_common.pre_deploy_bootstrap(context, payload, subcloud, + has_bootstrap_values) + + self.dcmanager_rpc_client.subcloud_deploy_enroll( + context, subcloud.id, payload) + pecan.abort(400, "subcloud deploy enrollment is not " "available yet") diff --git a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py index 7ba03e7f7..b5399b7e7 100644 --- a/distributedcloud/dcmanager/api/controllers/v1/subclouds.py +++ b/distributedcloud/dcmanager/api/controllers/v1/subclouds.py @@ -536,7 +536,8 @@ class SubcloudsController(object): psd_common.validate_secondary_parameter(payload, request) - psd_common.validate_enroll_parameter(payload, request) + if payload.get('enroll'): + psd_common.validate_enroll_parameter(payload) # Compares to match both supplied and bootstrap name param # of the subcloud if migrate is on diff --git a/distributedcloud/dcmanager/audit/subcloud_audit_manager.py b/distributedcloud/dcmanager/audit/subcloud_audit_manager.py index 83822ed1d..8bdc603b3 100644 --- a/distributedcloud/dcmanager/audit/subcloud_audit_manager.py +++ b/distributedcloud/dcmanager/audit/subcloud_audit_manager.py @@ -24,7 +24,6 @@ from oslo_log import log as logging from tsconfig.tsconfig import CONFIG_PATH from dccommon import consts as dccommon_consts -from dccommon.drivers.openstack import sysinv_v1 from dcmanager.audit import firmware_audit from dcmanager.audit import kube_rootca_update_audit from dcmanager.audit import kubernetes_audit @@ -448,7 +447,7 @@ class SubcloudAuditManager(manager.Manager): # an extra audit interval. last_audit_fixup_threshold = current_time - datetime.timedelta( seconds=( - sysinv_v1.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT + dccommon_consts.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT + CONF.scheduler.subcloud_audit_interval ) ) diff --git a/distributedcloud/dcmanager/common/consts.py b/distributedcloud/dcmanager/common/consts.py index e4d1302a5..658f9873c 100644 --- a/distributedcloud/dcmanager/common/consts.py +++ b/distributedcloud/dcmanager/common/consts.py @@ -233,7 +233,9 @@ DEPLOY_STATE_PRE_REHOME = 'pre-rehome' DEPLOY_STATE_PRE_ENROLL = 'pre-enroll' DEPLOY_STATE_PRE_ENROLL_FAILED = 'pre-enroll-failed' DEPLOY_STATE_PRE_ENROLL_COMPLETE = 'pre-enroll-complete' -DEPLOY_STATE_INIT_ENROLL = 'init-enroll' +DEPLOY_STATE_PRE_INIT_ENROLL = 'pre-init-enroll' +DEPLOY_STATE_PRE_INIT_ENROLL_FAILED = 'pre-init-enroll-failed' +DEPLOY_STATE_INITIATING_ENROLL = 'initiating-enroll' DEPLOY_STATE_INIT_ENROLL_FAILED = 'init-enroll-failed' DEPLOY_STATE_INIT_ENROLL_COMPLETE = 'init-enroll-complete' # If any of the following rehoming or secondary statuses diff --git a/distributedcloud/dcmanager/common/phased_subcloud_deploy.py b/distributedcloud/dcmanager/common/phased_subcloud_deploy.py index 019af52c6..0b2036457 100644 --- a/distributedcloud/dcmanager/common/phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/common/phased_subcloud_deploy.py @@ -162,11 +162,15 @@ def validate_migrate_parameter(payload, request): 'not allowed')) -def validate_enroll_parameter(payload, request): - enroll_str = payload.get('enroll') - if enroll_str and enroll_str not in ["true", "false"]: - pecan.abort(400, _('The enroll option is invalid, ' - 'valid options are true and false.')) +def validate_enroll_parameter(payload): + install_values = payload.get('install_values') + if not 'install_values': + pecan.abort(400, _("Install values is necessary for " + "subcloud enrollment")) + + # Update the install values in payload + if not payload.get('bmc_password'): + payload.update({'bmc_password': install_values.get('bmc_password')}) def validate_secondary_parameter(payload, request): diff --git a/distributedcloud/dcmanager/common/utils.py b/distributedcloud/dcmanager/common/utils.py index fe007dd9e..afba60c7d 100644 --- a/distributedcloud/dcmanager/common/utils.py +++ b/distributedcloud/dcmanager/common/utils.py @@ -36,6 +36,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import base64 import pecan +import requests import six.moves import tsconfig.tsconfig as tsc import yaml @@ -677,6 +678,9 @@ def is_subcloud_name_format_valid(name): return False +# TODO(glyraper): Replace get_region_from_subcloud_address() +# with get_region_name once all the subclouds support +# '/v1/isystems/region_id' API def get_region_from_subcloud_address(payload): """Retrieves the current region from the subcloud being migrated @@ -763,6 +767,24 @@ def get_region_from_subcloud_address(payload): return (subcloud_region, err_cause) +def get_region_name(endpoint, + timeout=dccommon_consts.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT): + url = endpoint + '/v1/isystems/region_id' + response = requests.get(url, timeout=timeout) + + if response.status_code == 200: + data = response.json() + if 'region_name' not in data: + raise exceptions.NotFound + + region_name = data['region_name'] + return region_name + else: + msg = f'GET region_name from {url} FAILED WITH RC {response.status_code}' + LOG.error(msg) + raise exceptions.ServiceUnavailable + + def find_ansible_error_msg(subcloud_name, log_file, stage=None): """Find errors into ansible logs. diff --git a/distributedcloud/dcmanager/db/api.py b/distributedcloud/dcmanager/db/api.py index 328e8f30e..d5e417d6f 100644 --- a/distributedcloud/dcmanager/db/api.py +++ b/distributedcloud/dcmanager/db/api.py @@ -201,7 +201,7 @@ def subcloud_update( group_id=None, data_install=None, data_upgrade=None, first_identity_sync_complete=None, systemcontroller_gateway_ip=None, peer_group_id=None, rehome_data=None, rehomed=None, - prestage_status=None, prestage_versions=None + prestage_status=None, prestage_versions=None, region_name=None ): """Update a subcloud or raise if it does not exist.""" return IMPL.subcloud_update( @@ -211,7 +211,7 @@ def subcloud_update( audit_fail_count, deploy_status, backup_status, backup_datetime, error_description, openstack_installed, group_id, data_install, data_upgrade, first_identity_sync_complete, systemcontroller_gateway_ip, peer_group_id, - rehome_data, rehomed, prestage_status, prestage_versions + rehome_data, rehomed, prestage_status, prestage_versions, region_name ) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/api.py b/distributedcloud/dcmanager/db/sqlalchemy/api.py index a36ff71b6..f917e9166 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/api.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/api.py @@ -443,7 +443,8 @@ def subcloud_update(context, subcloud_id, management_state=None, systemcontroller_gateway_ip=None, peer_group_id=None, rehome_data=None, rehomed=None, - prestage_status=None, prestage_versions=None): + prestage_status=None, prestage_versions=None, + region_name=None): with write_session() as session: subcloud_ref = subcloud_get(context, subcloud_id) if management_state is not None: @@ -502,6 +503,8 @@ def subcloud_update(context, subcloud_id, management_state=None, subcloud_ref.prestage_status = prestage_status if prestage_versions is not None: subcloud_ref.prestage_versions = prestage_versions + if region_name is not None: + subcloud_ref.region_name = region_name subcloud_ref.save(session) return subcloud_ref diff --git a/distributedcloud/dcmanager/manager/service.py b/distributedcloud/dcmanager/manager/service.py index dd75f14b7..8e47db7c4 100644 --- a/distributedcloud/dcmanager/manager/service.py +++ b/distributedcloud/dcmanager/manager/service.py @@ -284,6 +284,14 @@ class DCManagerService(service.Service): return self.subcloud_manager.subcloud_deploy_install( context, subcloud_id, payload, initial_deployment) + @run_in_thread + @request_context + def subcloud_deploy_enroll(self, context, subcloud_id, payload): + # Enroll a subcloud + LOG.info(f'Handling subcloud_deploy_enroll request for: {subcloud_id}') + return self.subcloud_manager.subcloud_deploy_enroll( + context, subcloud_id, payload) + @request_context def subcloud_deploy_complete(self, context, subcloud_id): # Complete the subcloud deployment diff --git a/distributedcloud/dcmanager/manager/subcloud_manager.py b/distributedcloud/dcmanager/manager/subcloud_manager.py index 116f2793d..47b2237d5 100644 --- a/distributedcloud/dcmanager/manager/subcloud_manager.py +++ b/distributedcloud/dcmanager/manager/subcloud_manager.py @@ -48,6 +48,7 @@ from dccommon.drivers.openstack.sysinv_v1 import SysinvClient from dccommon.exceptions import PlaybookExecutionFailed from dccommon.exceptions import SubcloudNotFound from dccommon import kubeoperator +from dccommon.subcloud_enrollment import SubcloudEnrollmentInit from dccommon.subcloud_install import SubcloudInstall from dccommon.utils import AnsiblePlaybook from dccommon.utils import LAST_SW_VERSION_IN_CENTOS @@ -70,7 +71,6 @@ from dcmanager.manager.system_peer_manager import SystemPeerManager from dcmanager.rpc import client as dcmanager_rpc_client from dcorch.rpc import client as dcorch_rpc_client - LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -140,6 +140,10 @@ TRANSITORY_STATES = { # TODO(gherzman): remove states when they are no longer needed consts.DEPLOY_STATE_PRE_DEPLOY: consts.DEPLOY_STATE_PRE_CONFIG_FAILED, consts.DEPLOY_STATE_DEPLOYING: consts.DEPLOY_STATE_CONFIG_FAILED, + consts.DEPLOY_STATE_PRE_ENROLL: consts.DEPLOY_STATE_PRE_ENROLL_FAILED, + consts.DEPLOY_STATE_ENROLLING: consts.DEPLOY_STATE_ENROLL_FAILED, + consts.DEPLOY_STATE_PRE_INIT_ENROLL: consts.DEPLOY_STATE_PRE_INIT_ENROLL_FAILED, + consts.DEPLOY_STATE_INITIATING_ENROLL: consts.DEPLOY_STATE_INIT_ENROLL_FAILED } TRANSITORY_BACKUP_STATES = { @@ -289,8 +293,8 @@ class SubcloudManager(manager.Manager): data = secret.data if ('ca.crt' not in data or - 'tls.crt' not in data or 'tls.key' not in data) or \ - not (data['ca.crt'] and data['tls.crt'] and data['tls.key']): + 'tls.crt' not in data or 'tls.key' not in data) or \ + not (data['ca.crt'] and data['tls.crt'] and data['tls.key']): # ca cert, certificate and key pair are needed and must exist # for creating an intermediate ca. If not, certificate is not # ready yet. @@ -330,6 +334,31 @@ class SubcloudManager(manager.Manager): dccommon_consts.RVMC_CONFIG_FILE_NAME)] return install_command + # TODO(glyraper): software_version will be used in the future + def compose_enroll_command(self, subcloud_name, + subcloud_region, + ansible_subcloud_inventory_file, + software_version, + state): + + if state == "init": + LOG.info(f'Initiating enroll for subcloud: {subcloud_name}') + return True + elif state == "enroll": + extra_vars = "override_files_dir='%s' region_name=%s" % ( + dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud_region) + + enroll_command = [ + "ansible-playbook", + dccommon_consts.ANSIBLE_SUBCLOUD_ENROLL_PLAYBOOK, + "-i", ansible_subcloud_inventory_file, + "--limit", subcloud_name, + "-e", extra_vars] + + return enroll_command + else: + raise exceptions.InvalidInputError + def compose_bootstrap_command(self, subcloud_name, subcloud_region, ansible_subcloud_inventory_file, @@ -379,8 +408,8 @@ class SubcloudManager(manager.Manager): backup_command = [ "ansible-playbook", ANSIBLE_SUBCLOUD_BACKUP_DELETE_PLAYBOOK, "-e", "subcloud_bnr_overrides=%s" % - dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + - subcloud_name + "_backup_delete_values.yml" + dccommon_consts.ANSIBLE_OVERRIDES_PATH + "/" + + subcloud_name + "_backup_delete_values.yml" ] if ansible_subcloud_inventory_file: # Backup stored in subcloud storage @@ -603,7 +632,7 @@ class SubcloudManager(manager.Manager): return for fault in faults: entity_instance_id_str = "peer_group=%s,peer=" % \ - (peer_group.peer_group_name) + (peer_group.peer_group_name) if entity_instance_id_str in fault.entity_instance_id: LOG.info("Clear alarm for peer group %s" % peer_group.peer_group_name) @@ -846,13 +875,14 @@ class SubcloudManager(manager.Manager): rehoming = payload.get('migrate', '').lower() == "true" secondary = (payload.get('secondary', '').lower() == "true") enroll = payload.get('enroll', '').lower() == "true" - initial_deployment = True if not rehoming else False + initial_deployment = True if not (rehoming or enroll) else False # Create the subcloud subcloud = self.subcloud_deploy_create(context, subcloud_id, payload, rehoming, initial_deployment, - return_as_dict=False) + return_as_dict=False, + enroll=enroll) # return if 'secondary' subcloud if secondary: @@ -873,9 +903,9 @@ class SubcloudManager(manager.Manager): # Define which deploy phases should be run phases_to_run = [] - if consts.INSTALL_VALUES in payload: + if consts.INSTALL_VALUES in payload and not enroll: phases_to_run.append(consts.DEPLOY_PHASE_INSTALL) - if enroll and consts.INSTALL_VALUES not in payload: + if enroll and consts.INSTALL_VALUES in payload: phases_to_run.append(consts.DEPLOY_PHASE_ENROLL) else: phases_to_run.append(consts.DEPLOY_PHASE_BOOTSTRAP) @@ -930,7 +960,7 @@ class SubcloudManager(manager.Manager): group_id = payload.get('group') # Retrieve either a single subcloud or all subclouds in a group - subclouds = [db_api.subcloud_get(context, subcloud_id)] if subcloud_id\ + subclouds = [db_api.subcloud_get(context, subcloud_id)] if subcloud_id \ else db_api.subcloud_get_for_group(context, group_id) self._filter_subclouds_with_ongoing_backup(subclouds) @@ -1147,14 +1177,16 @@ class SubcloudManager(manager.Manager): def _deploy_install_prep(self, subcloud, payload: dict, ansible_subcloud_inventory_file, - initial_deployment=False): - """Run the preparation steps needed to run the install operation + initial_deployment=False, init_enroll=False): + """Run preparation steps for install or init enroll operations :param subcloud: target subcloud model object :param payload: install request parameters :param ansible_subcloud_inventory_file: the ansible inventory file path :param initial_deployment: initial_deployment flag from subcloud inventory + :param init_enroll: which operation should be run, install or init-enroll :return: ansible command needed to run the install playbook + """ payload['install_values']['ansible_ssh_pass'] = \ payload['sysadmin_password'] @@ -1181,12 +1213,20 @@ class SubcloudManager(manager.Manager): utils.create_subcloud_inventory(subcloud_params, ansible_subcloud_inventory_file, initial_deployment) + if init_enroll: + init_enroll_command = self.compose_enroll_command( + subcloud.name, + subcloud.region_name, + ansible_subcloud_inventory_file, + subcloud.software_version, + state="init" + ) + return init_enroll_command install_command = self.compose_install_command( subcloud.name, ansible_subcloud_inventory_file, payload['software_version']) - return install_command def subcloud_deploy_abort(self, context, subcloud_id, deploy_status): @@ -1318,7 +1358,7 @@ class SubcloudManager(manager.Manager): def subcloud_deploy_create(self, context, subcloud_id, payload, rehoming=False, initial_deployment=True, - return_as_dict=True): + return_as_dict=True, enroll=False): """Create subcloud and notify orchestrators. :param context: request context object @@ -1327,6 +1367,7 @@ class SubcloudManager(manager.Manager): :param rehoming: flag indicating if this is part of a rehoming operation :param initial_deployment: initial_deployment flag from subcloud inventory :param return_as_dict: converts the subcloud DB object to a dict before + :param enroll: define steps to run when running enroll operation returning :return: resulting subcloud DB object or dictionary """ @@ -1374,57 +1415,11 @@ class SubcloudManager(manager.Manager): payload['systemcontroller_gateway_address'], 1) - # Create endpoints to this subcloud on the - # management-start-ip of the subcloud which will be allocated - # as the floating Management IP of the Subcloud if the - # Address Pool is not shared. Incase the endpoint entries - # are incorrect, or the management IP of the subcloud is changed - # in the future, it will not go managed or will show up as - # out of sync. To fix this use Openstack endpoint commands - # on the SystemController to change the subcloud endpoints. - # The non-identity endpoints are added to facilitate horizon access - # from the System Controller to the subcloud. - endpoint_config = [] - endpoint_ip = utils.get_management_start_address(payload) - if netaddr.IPAddress(endpoint_ip).version == 6: - endpoint_ip = '[' + endpoint_ip + ']' - - for service in m_ks_client.services_list: - admin_endpoint_url = ENDPOINT_URLS.get(service.type, None) - if admin_endpoint_url: - admin_endpoint_url = admin_endpoint_url.format(endpoint_ip) - endpoint_config.append( - {"id": service.id, - "admin_endpoint_url": admin_endpoint_url}) - - if len(endpoint_config) < len(ENDPOINT_URLS): - raise exceptions.BadRequest( - resource='subcloud', - msg='Missing service in SystemController') - - for endpoint in endpoint_config: - try: - m_ks_client.keystone_client.endpoints.create( - endpoint["id"], - endpoint['admin_endpoint_url'], - interface=dccommon_consts.KS_ENDPOINT_ADMIN, - region=subcloud.region_name) - except Exception as e: - # Keystone service must be temporarily busy, retry - LOG.error(str(e)) - m_ks_client.keystone_client.endpoints.create( - endpoint["id"], - endpoint['admin_endpoint_url'], - interface=dccommon_consts.KS_ENDPOINT_ADMIN, - region=subcloud.region_name) - - # Inform orchestrator that subcloud has been added - self.dcorch_rpc_client.add_subcloud( - context, - subcloud.region_name, - subcloud.software_version, - subcloud.management_start_ip - ) + if not enroll: + self._create_subcloud_endpoints(m_ks_client=m_ks_client, + payload=payload, + subcloud=subcloud, + context=context) # create entry into alarm summary table, will get real values later alarm_updates = {'critical_alarms': -1, @@ -1468,8 +1463,9 @@ class SubcloudManager(manager.Manager): ansible_subcloud_inventory_file, initial_deployment) - # create subcloud intermediate certificate and pass in keys - self._create_intermediate_ca_cert(payload) + if not enroll: + # create subcloud intermediate certificate and pass in keys + self._create_intermediate_ca_cert(payload) # Write this subclouds overrides to file # NOTE: This file should not be deleted if subcloud add fails @@ -1575,10 +1571,72 @@ class SubcloudManager(manager.Manager): deploy_status=consts.DEPLOY_STATE_PRE_INSTALL_FAILED) return False - def subcloud_deploy_enroll(self, context, subcloud_id, payload, - initial_deployment=False): + def subcloud_deploy_enroll(self, context, subcloud_id, payload: dict): - raise NotImplementedError + db_api.subcloud_update( + context, + subcloud_id, + deploy_status=consts.DEPLOY_STATE_PRE_INIT_ENROLL + ) + + subcloud = db_api.subcloud_get(context, subcloud_id) + enrollment = SubcloudEnrollmentInit(subcloud.name) + enrollment.prep(dccommon_consts.ANSIBLE_OVERRIDES_PATH, payload) + + if self.subcloud_init_enroll(context, subcloud.id, payload): + try: + + db_api.subcloud_update( + context, + subcloud_id, + deploy_status=consts.DEPLOY_STATE_PRE_ENROLL + ) + + endpoint = ("https://" + + payload.get("external_oam_floating_address") + ":6385") + subcloud_region_name = utils.get_region_name(endpoint) + subcloud.region_name = subcloud_region_name + + m_ks_client = OpenStackDriver( + region_name=dccommon_consts.DEFAULT_REGION_NAME, + region_clients=None).keystone_client + + self._create_subcloud_endpoints(m_ks_client=m_ks_client, + payload=payload, + subcloud=subcloud, + context=context) + self._create_intermediate_ca_cert(payload=payload) + + log_file = ( + os.path.join(consts.DC_ANSIBLE_LOG_DIR, subcloud.name) + + "_playbook_output.log" + ) + ansible_subcloud_inventory_file = self._get_ansible_filename( + subcloud.name, INVENTORY_FILE_POSTFIX) + + enroll_playbook_command = self.compose_enroll_command( + subcloud.name, + subcloud.region_name, + ansible_subcloud_inventory_file, + subcloud.software_version, + state="enroll" + ) + self._run_subcloud_enroll(context, + subcloud, + enroll_playbook_command, + log_file, + region_name=subcloud_region_name) + + except Exception: + LOG.exception(f'Failed to enroll subcloud {subcloud.name}') + db_api.subcloud_update( + context, subcloud_id, + deploy_status=consts.DEPLOY_STATE_PRE_ENROLL_FAILED) + return False + else: + LOG.error(f'Initial enrollment failed for subcloud {subcloud.name}') + + return subcloud def subcloud_deploy_bootstrap(self, context, subcloud_id, payload, initial_deployment=False): @@ -1729,6 +1787,56 @@ class SubcloudManager(manager.Manager): return valid_subclouds, invalid_subclouds + def subcloud_init_enroll(self, context, subcloud_id, payload: dict) -> bool: + """Init subcloud enroll + + :param context: request context object + :param subcloud_id: subcloud id from db + :param payload: subcloud Install + :param initial_deployment: initial_deployment flag from subcloud inventory + :return: success status + """ + + # Retrieve the subcloud details from the database + subcloud = db_api.subcloud_update( + context, + subcloud_id, + deploy_status=consts.DEPLOY_STATE_INITIATING_ENROLL, + data_install=json.dumps(payload['install_values'])) + + LOG.info("Initiating subcloud %s enrollment." % subcloud.name) + + try: + # TODO(glyraper): log_file to be used in the playbook execution + # log_file = ( + # os.path.join(consts.DC_ANSIBLE_LOG_DIR, subcloud.name) + # + "_playbook_output.log" + # ) + ansible_subcloud_inventory_file = self._get_ansible_filename( + subcloud.name, INVENTORY_FILE_POSTFIX) + init_enroll_command = self._deploy_install_prep( + subcloud, payload, ansible_subcloud_inventory_file, + init_enroll=True) + if init_enroll_command: + LOG.info('Subcloud enrollment initial phase successful ' + f'for subcloud {subcloud.name}') + + db_api.subcloud_update( + context, + subcloud_id, + deploy_status=consts.DEPLOY_STATE_INIT_ENROLL_COMPLETE, + error_description=consts.ERROR_DESC_EMPTY) + return True + + except Exception: + LOG.exception("Failed to enroll subcloud %s" % subcloud.name) + # If we failed to initiate the subcloud enroll, + # update the deployment status + db_api.subcloud_update( + context, subcloud_id, + deploy_status=consts.DEPLOY_STATE_PRE_INIT_ENROLL_FAILED) + return False + @staticmethod def _mark_invalid_subclouds_for_backup(context, invalid_subclouds): try: @@ -1765,7 +1873,7 @@ class SubcloudManager(manager.Manager): @staticmethod def _update_backup_status(context, subclouds, backup_status): subcloud_ids = [subcloud.id for subcloud in subclouds] - return SubcloudManager.\ + return SubcloudManager. \ _update_backup_status_by_ids(context, subcloud_ids, backup_status) @@ -1963,7 +2071,8 @@ class SubcloudManager(manager.Manager): failed_subcloud_names = [subcloud.name for subcloud in failed_subclouds] notice = ( - "Subcloud backup %s operation completed with warnings:\n" % operation) + "Subcloud backup %s operation completed with warnings:\n" + % operation) if invalid_subclouds: notice += ("The following subclouds were skipped for local backup " "%s operation: %s." @@ -1974,6 +2083,61 @@ class SubcloudManager(manager.Manager): % (operation, ' ,'.join(failed_subcloud_names))) return notice + def _create_subcloud_endpoints(self, m_ks_client, payload, subcloud, + context): + + # Create endpoints to this subcloud on the + # management-start-ip of the subcloud which will be allocated + # as the floating Management IP of the Subcloud if the + # Address Pool is not shared. Incase the endpoint entries + # are incorrect, or the management IP of the subcloud is changed + # in the future, it will not go managed or will show up as + # out of sync. To fix this use Openstack endpoint commands + # on the SystemController to change the subcloud endpoints. + # The non-identity endpoints are added to facilitate horizon access + # from the System Controller to the subcloud. + endpoint_config = [] + endpoint_ip = utils.get_management_start_address(payload) + if netaddr.IPAddress(endpoint_ip).version == 6: + endpoint_ip = '[' + endpoint_ip + ']' + + for service in m_ks_client.services_list: + admin_endpoint_url = ENDPOINT_URLS.get(service.type, None) + if admin_endpoint_url: + admin_endpoint_url = admin_endpoint_url.format(endpoint_ip) + endpoint_config.append( + {"id": service.id, + "admin_endpoint_url": admin_endpoint_url}) + + if len(endpoint_config) < len(ENDPOINT_URLS): + raise exceptions.BadRequest( + resource='subcloud', + msg='Missing service in SystemController') + + for endpoint in endpoint_config: + try: + m_ks_client.keystone_client.endpoints.create( + endpoint["id"], + endpoint['admin_endpoint_url'], + interface=dccommon_consts.KS_ENDPOINT_ADMIN, + region=subcloud.region_name) + except Exception as e: + # Keystone service must be temporarily busy, retry + LOG.error(str(e)) + m_ks_client.keystone_client.endpoints.create( + endpoint["id"], + endpoint['admin_endpoint_url'], + interface=dccommon_consts.KS_ENDPOINT_ADMIN, + region=subcloud.region_name) + + # Inform orchestrator that subcloud has been added + self.dcorch_rpc_client.add_subcloud( + context, + subcloud.region_name, + subcloud.software_version, + subcloud.management_start_ip + ) + def _create_subcloud_inventory_file(self, subcloud, bootstrap_address=None, initial_deployment=False): # Ansible inventory filename for the specified subcloud @@ -1985,7 +2149,7 @@ class SubcloudManager(manager.Manager): keystone_client = OpenStackDriver( region_name=subcloud.region_name, region_clients=None).keystone_client - bootstrap_address = utils.get_oam_addresses(subcloud, keystone_client)\ + bootstrap_address = utils.get_oam_addresses(subcloud, keystone_client) \ .oam_floating_ip # Add parameters used to generate inventory @@ -2242,7 +2406,8 @@ class SubcloudManager(manager.Manager): context, subcloud_id, payload, initial_deployment) if succeeded and consts.DEPLOY_PHASE_ENROLL in deploy_phases_to_run: succeeded = self.subcloud_deploy_enroll( - context, subcloud_id, payload, initial_deployment) + context, subcloud_id, payload) + raise NotImplementedError if succeeded and consts.DEPLOY_PHASE_BOOTSTRAP in deploy_phases_to_run: succeeded = self.subcloud_deploy_bootstrap( context, subcloud_id, payload, initial_deployment) @@ -2338,6 +2503,41 @@ class SubcloudManager(manager.Manager): LOG.info("Successfully installed %s" % subcloud.name) return True + def _run_subcloud_enroll(self, context, subcloud, enroll_command, + log_file, region_name): + + # Update the subcloud deploy_status to enrolling + db_api.subcloud_update( + context, subcloud.id, + deploy_status=consts.DEPLOY_STATE_ENROLLING, + error_description=consts.ERROR_DESC_EMPTY) + + LOG.info(f'Starting enroll of subcloud {subcloud.name}') + try: + ansible = AnsiblePlaybook(subcloud.name) + ansible.run_playbook(log_file, enroll_command) + except PlaybookExecutionFailed: + msg = utils.find_ansible_error_msg( + subcloud.name, log_file, consts.DEPLOY_STATE_ENROLLING) + LOG.error(f'Enroll failed for subcloud {subcloud.name}: {msg}') + db_api.subcloud_update( + context, subcloud.id, + deploy_status=consts.DEPLOY_STATE_ENROLL_FAILED, + error_description=msg[0:consts.ERROR_DESCRIPTION_LENGTH]) + return False + + # Ensure rehomed=False after bootstrapped from central cloud, it + # applies on both initial deployment and re-deployment. + db_api.subcloud_update( + context, subcloud.id, + deploy_status=consts.DEPLOY_STATE_ENROLLED, + error_description=consts.ERROR_DESC_EMPTY, + region_name=region_name, + rehomed=False) + + LOG.info(f'Successfully enrolled {subcloud.name}') + return True + def _run_subcloud_bootstrap(self, context, subcloud, bootstrap_command, log_file): # Update the subcloud deploy_status to bootstrapping @@ -2432,8 +2632,9 @@ class SubcloudManager(manager.Manager): """Create the deploy value files for the subcloud""" deploy_values_file = os.path.join( - dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud_name + - '_deploy_values.yml') + dccommon_consts.ANSIBLE_OVERRIDES_PATH, + subcloud_name + '_deploy_values.yml' + ) with open(deploy_values_file, 'w') as f_out_deploy_values_file: json.dump(payload['deploy_values'], f_out_deploy_values_file) @@ -2613,7 +2814,6 @@ class SubcloudManager(manager.Manager): if mkey in data and 'hosts' in data[mkey] and \ cur_sc_name in data[mkey]['hosts']: - data[mkey]['hosts'][new_sc_name] = \ data[mkey]['hosts'].pop(cur_sc_name) @@ -2706,7 +2906,7 @@ class SubcloudManager(manager.Manager): :param new_subcloud_name: new subcloud name """ try: - subcloud = db_api.\ + subcloud = db_api. \ subcloud_get_by_name_or_region_name(context, new_subcloud_name) except exceptions.SubcloudNameOrRegionNameNotFound: @@ -2902,7 +3102,7 @@ class SubcloudManager(manager.Manager): # When trying to manage a 'rehome-pending' subcloud, revert its deploy # status back to 'complete' if its not specified if (management_state == dccommon_consts.MANAGEMENT_MANAGED and - subcloud.deploy_status == consts.DEPLOY_STATE_REHOME_PENDING and + subcloud.deploy_status == consts.DEPLOY_STATE_REHOME_PENDING and not deploy_status): deploy_status = consts.DEPLOY_STATE_DONE @@ -2934,8 +3134,8 @@ class SubcloudManager(manager.Manager): # Update route if the systemcontroller_gateway_ip has been updated if ( - systemcontroller_gateway_ip is not None and - systemcontroller_gateway_ip != subcloud.systemcontroller_gateway_ip + systemcontroller_gateway_ip is not None and + systemcontroller_gateway_ip != subcloud.systemcontroller_gateway_ip ): m_ks_client = OpenStackDriver( region_name=dccommon_consts.DEFAULT_REGION_NAME, @@ -3002,8 +3202,8 @@ class SubcloudManager(manager.Manager): # Do not ignore the dc-cert endpoint for secondary or rehome # pending subclouds as cert-mon does not audit them if subcloud.deploy_status in ( - consts.DEPLOY_STATE_SECONDARY, - consts.DEPLOY_STATE_REHOME_PENDING + consts.DEPLOY_STATE_SECONDARY, + consts.DEPLOY_STATE_REHOME_PENDING ): ignore_endpoints = None @@ -3069,8 +3269,8 @@ class SubcloudManager(manager.Manager): LOG.exception("Failed to update subcloud %s" % subcloud_name) def _run_network_reconfiguration( - self, subcloud_name, update_command, overrides_file, - payload, context, subcloud + self, subcloud_name, update_command, overrides_file, + payload, context, subcloud ): log_file = (os.path.join(consts.DC_ANSIBLE_LOG_DIR, subcloud_name) + '_playbook_output.log') @@ -3232,8 +3432,9 @@ class SubcloudManager(manager.Manager): def _create_subcloud_update_overrides_file( self, payload, subcloud_name, filename_suffix): update_overrides_file = os.path.join( - dccommon_consts.ANSIBLE_OVERRIDES_PATH, subcloud_name + '_' + - filename_suffix + '.yml') + dccommon_consts.ANSIBLE_OVERRIDES_PATH, + subcloud_name + '_' + filename_suffix + '.yml' + ) self._update_override_values(payload) @@ -3364,8 +3565,9 @@ class SubcloudManager(manager.Manager): @utils.synchronized("regionone-data-cache", external=False) def _get_cached_regionone_data( self, regionone_keystone_client, regionone_sysinv_client=None): - if (not SubcloudManager.regionone_data or SubcloudManager.regionone_data[ - 'expiry'] <= datetime.datetime.utcnow()): + if (not SubcloudManager.regionone_data or + SubcloudManager.regionone_data['expiry'] <= + datetime.datetime.utcnow()): user_list = regionone_keystone_client.get_enabled_users(id_only=False) for user in user_list: if user.name == dccommon_consts.ADMIN_USER_NAME: diff --git a/distributedcloud/dcmanager/rpc/client.py b/distributedcloud/dcmanager/rpc/client.py index ac4ec50d2..b02af4ab8 100644 --- a/distributedcloud/dcmanager/rpc/client.py +++ b/distributedcloud/dcmanager/rpc/client.py @@ -221,6 +221,11 @@ class ManagerClient(RPCClient): payload=payload, initial_deployment=initial_deployment)) + def subcloud_deploy_enroll(self, ctxt, subcloud_id, payload): + return self.cast(ctxt, self.make_msg('subcloud_deploy_enroll', + subcloud_id=subcloud_id, + payload=payload)) + def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload, initial_deployment): return self.cast(ctxt, self.make_msg('subcloud_deploy_bootstrap', diff --git a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py index ab6f98379..0bc3d88de 100644 --- a/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py +++ b/distributedcloud/dcmanager/tests/unit/api/v1/controllers/test_phased_subcloud_deploy.py @@ -1107,11 +1107,17 @@ class TestPhasedSubcloudDeployPatchEnroll(BaseTestPhasedSubcloudDeployPatch): deploy_status=consts.DEPLOY_STATE_CREATED, software_version=SW_VERSION ) - modified_bootstrap_data = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) + modified_bootstrap_data = copy.copy( + fake_subcloud.FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD) + modified_bootstrap_data.update({"name": "fake subcloud1"}) + modified_install_data = copy.copy(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES) fake_content = json.dumps(modified_bootstrap_data).encode("utf-8") + install_fake_content = json.dumps(modified_install_data).encode("utf-8") - self.upload_files = \ - [("bootstrap_values", "bootstrap_fake_filename", fake_content)] + self.upload_files = [( + "bootstrap_values", "bootstrap_fake_filename", fake_content), + ("install_values", "install_values_fake_filename", install_fake_content) + ] def test_patch_enroll_fails(self): """Test patch enroll fails""" diff --git a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py index 3206ea493..fa6b17032 100644 --- a/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py +++ b/distributedcloud/dcmanager/tests/unit/manager/test_subcloud_manager.py @@ -384,8 +384,11 @@ class BaseTestSubcloudManager(base.DCManagerTestCase): self.peer_group = self.create_subcloud_peer_group_static(self.ctx) self.mock_keyring.get_password.return_value = "testpassword" - self.fake_install_values = \ - copy.copy(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES) + self.fake_install_values = copy.copy( + fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES) + self.fake_bootstrap_values = copy.copy( + fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA) + self.fake_payload = {"sysadmin_password": "testpass", "deploy_playbook": "test_playbook.yaml", "deploy_overrides": "test_overrides.yaml", @@ -395,10 +398,29 @@ class BaseTestSubcloudManager(base.DCManagerTestCase): 'install_values': self.fake_install_values, 'software_version': SW_VERSION, 'sysadmin_password': 'sys_pass'} + self.fake_payload_enroll = {'bmc_password': 'bmc_pass', + 'install_values': self.fake_install_values, + 'software_version': SW_VERSION, + 'sysadmin_password': 'sys_pass', + 'admin_password': 'sys_pass' + } + self.fake_payload_enroll = dict(self.fake_payload_enroll, + **self.fake_bootstrap_values, + **self.fake_install_values) + + rel_version = self.fake_payload_enroll.get('software_version') + + self.iso_dir = (f'/opt/platform/iso/{rel_version}/' + f'nodes/{self.subcloud.name}') + self.iso_file = f'{self.iso_dir}/seed.iso' + # Reset the regionone_data cache between tests subcloud_manager.SubcloudManager.regionone_data = \ collections.defaultdict(dict) + def patched_isdir(self, path): + return path != self.iso_dir + def _mock_dcorch_api(self): """Mock the DCOrch API""" @@ -481,6 +503,13 @@ class BaseTestSubcloudManager(base.DCManagerTestCase): mock_patch = mock.patch.object(ostree_mount, 'validate_ostree_iso_mount') self.mock_validate_ostree_iso_mount = mock_patch.start() + + def _mock_subcloud_manager_run_subcloud_enroll(self): + """Mock subcloud manager's _run_subcloud_enroll""" + + mock_patch = mock.patch.object(subcloud_manager.SubcloudManager, + '_run_subcloud_enroll') + self.mock_run_subcloud_enroll = mock_patch.start() self.addCleanup(mock_patch.stop) def _mock_subcloud_manager_create_intermediate_ca_cert(self): @@ -499,6 +528,14 @@ class BaseTestSubcloudManager(base.DCManagerTestCase): self.mock_compose_install_command = mock_patch.start() self.addCleanup(mock_patch.stop) + def _mock_subcloud_manager_compose_enroll_command(self): + """Mock subcloud manager compose_enroll_command""" + + mock_patch = mock.patch.object(subcloud_manager.SubcloudManager, + 'compose_enroll_command') + self.mock_compose_enroll_command = mock_patch.start() + self.addCleanup(mock_patch.stop) + def _mock_netaddr_ipaddress(self): """Mock netaddr's IPAddress""" @@ -1247,6 +1284,139 @@ class TestSubcloudDeploy(BaseTestSubcloudManager): ) self.assertEqual(consts.DEPLOY_STATE_DONE, ret.deploy_status) + @mock.patch.object(cutils, 'get_region_name') + @mock.patch.object(subcloud_enrollment.SubcloudEnrollmentInit, 'prep') + def test_deploy_subcloud_enroll( + self, mock_subcloud_enrollment_prep, mock_get_region_name): + + mock_run_patch_patch = mock.patch('eventlet.green.subprocess.run') + mock_mkdtemp_patch = mock.patch('tempfile.mkdtemp') + mock_makedirs_patch = mock.patch('os.makedirs') + mock_rmtree_patch = mock.patch('shutil.rmtree') + + self.seed_data_dir = '/temp/seed_data' + mock_get_region_name.return_value = '11111' + + self.mock_run = mock_run_patch_patch.start() + self.mock_mkdtemp = mock_mkdtemp_patch.start() + self.mock_makedirs = mock_makedirs_patch.start() + self.mock_rmtree = mock_rmtree_patch.start() + + self.addCleanup(mock_run_patch_patch.stop) + self.addCleanup(mock_mkdtemp_patch.stop) + self.addCleanup(mock_makedirs_patch.stop) + self.addCleanup(mock_rmtree_patch.stop) + + self._mock_builtins_open() + + self.mock_builtins_open.side_effect = mock.mock_open() + self.mock_os_path_exists.return_value = True + self.mock_mkdtemp.return_value = self.seed_data_dir + self.mock_os_path_isdir.return_value = True + self.mock_run.return_value = mock.MagicMock(returncode=0, + stdout=b'Success') + + self._mock_subcloud_manager_compose_enroll_command() + self.fake_payload_enroll['software_version'] = FAKE_PREVIOUS_SW_VERSION + self.subcloud['deploy_status'] = consts.DEPLOY_STATE_PRE_INIT_ENROLL + self.fake_payload_enroll['software_version'] = SW_VERSION + + with mock.patch('os.path.isdir', side_effect=self.patched_isdir): + self.sm.subcloud_deploy_enroll( + self.ctx, self.subcloud.id, payload=self.fake_payload_enroll) + + # Verify subcloud was updated with correct values + updated_subcloud = db_api.subcloud_get_by_name(self.ctx, + self.subcloud.name) + self.assertEqual(consts.DEPLOY_STATE_ENROLLED, + updated_subcloud.deploy_status) + + @mock.patch.object(subcloud_enrollment.SubcloudEnrollmentInit, + 'prep') + @mock.patch.object(subcloud_manager.SubcloudManager, + '_deploy_install_prep') + def test_subcloud_deploy_pre_init_enroll_failed( + self, mock_deploy_install_prep, mock_subcloud_enrollment_prep): + + mock_deploy_install_prep.side_effect = base.FakeException('boom') + + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + deploy_status=consts.DEPLOY_STATE_CREATED, + data_install=json.dumps(self.fake_payload_enroll['install_values']) + ) + + self.sm.subcloud_init_enroll( + self.ctx, subcloud.id, self.fake_payload_enroll) + + # Verify subcloud was updated with correct values + updated_subcloud = db_api.subcloud_get_by_name(self.ctx, + self.payload['name']) + + self.assertEqual(consts.DEPLOY_STATE_PRE_INIT_ENROLL_FAILED, + updated_subcloud.deploy_status) + + @mock.patch.object(subcloud_enrollment.SubcloudEnrollmentInit, + 'prep') + @mock.patch.object(subcloud_manager.SubcloudManager, + '_create_subcloud_endpoints') + def test_subcloud_deploy_enroll_failed( + self, mock_create_subcloud_endpoints, mock_subcloud_enrollment_prep): + mock_create_subcloud_endpoints.side_effect = base.FakeException('boom') + + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + deploy_status=consts.DEPLOY_STATE_INIT_ENROLL_COMPLETE, + data_install=json.dumps(self.fake_payload_enroll['install_values']) + ) + + self.sm.subcloud_deploy_enroll( + self.ctx, subcloud.id, self.fake_payload_enroll) + + # Verify subcloud was updated with correct values + updated_subcloud = db_api.subcloud_get_by_name(self.ctx, + self.payload['name']) + + self.assertEqual(consts.DEPLOY_STATE_PRE_ENROLL_FAILED, + updated_subcloud.deploy_status) + + @mock.patch.object(subcloud_enrollment.SubcloudEnrollmentInit, + 'prep') + @mock.patch.object(cutils, 'get_region_name') + def test_subcloud_deploy_enroll_run_playbook_failed( + self, mock_get_region_name, mock_subcloud_enrollment_prep): + + self.mock_ansible_run_playbook.side_effect = PlaybookExecutionFailed() + mock_get_region_name.return_value = "11111" + + subcloud = fake_subcloud.create_fake_subcloud( + self.ctx, + name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"], + deploy_status=consts.DEPLOY_STATE_PRE_ENROLL_COMPLETE, + data_install=json.dumps(self.fake_payload_enroll['install_values']) + ) + + self.sm.subcloud_deploy_enroll( + self.ctx, subcloud.id, self.fake_payload_enroll) + + self.mock_ansible_run_playbook.assert_called_once() + + # Verify subcloud was updated with correct values + updated_subcloud = db_api.subcloud_get_by_name(self.ctx, + self.payload['name']) + self.assertEqual(consts.DEPLOY_STATE_ENROLL_FAILED, + updated_subcloud.deploy_status) + # Verify the subcloud rehomed flag is False after bootstrapped + self.assertFalse(updated_subcloud.rehomed) + self.mock_log.error.assert_called_once_with( + 'Enroll failed for subcloud fake subcloud1: ' + 'FAILED enrolling playbook of (fake subcloud1).' + '\ncheck individual log at ' + '/var/log/dcmanager/ansible/fake ' + 'subcloud1_playbook_output.log for detailed output ') + class TestSubcloudAdd(BaseTestSubcloudManager): """Test class for testing subcloud add""" @@ -4082,6 +4252,8 @@ class TestSubcloudEnrollment(BaseTestSubcloudManager): self.seed_data_dir = '/temp/seed_data' self.enroll_init = subcloud_enrollment.\ SubcloudEnrollmentInit(self.subcloud_name) + self.fake_install_values = \ + copy.copy(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES) self.iso_values = { 'software_version': self.rel_version, @@ -4090,6 +4262,7 @@ class TestSubcloudEnrollment(BaseTestSubcloudManager): 'external_oam_floating_address': '10.10.10.2', 'network_mask': '255.255.255.0', 'external_oam_gateway_address': '10.10.10.1', + 'install_values': self.fake_install_values } mock_run_patch_patch = mock.patch('eventlet.green.subprocess.run')