diff --git a/magnum/conductor/handlers/cluster_conductor.py b/magnum/conductor/handlers/cluster_conductor.py index 3a90117abe..9719bbfa88 100644 --- a/magnum/conductor/handlers/cluster_conductor.py +++ b/magnum/conductor/handlers/cluster_conductor.py @@ -15,7 +15,6 @@ from heatclient import exc from oslo_log import log as logging from oslo_service import loopingcall -from oslo_utils import importutils from pycadf import cadftaxonomy as taxonomy import six @@ -27,8 +26,8 @@ from magnum.conductor import scale_manager from magnum.conductor import utils as conductor_utils import magnum.conf from magnum.drivers.common import driver +from magnum.drivers.heat import driver as heat_driver from magnum.i18n import _ -from magnum.i18n import _LE from magnum.i18n import _LI from magnum import objects from magnum.objects import fields @@ -64,8 +63,9 @@ class Handler(object): ct.cluster_distro, ct.coe) # Create cluster - created_stack = cluster_driver.create_stack(context, osc, cluster, - create_timeout) + cluster_driver.create_cluster(context, cluster, create_timeout) + cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS + cluster.status_reason = None except Exception as e: cluster.status = fields.ClusterStatus.CREATE_FAILED cluster.status_reason = six.text_type(e) @@ -79,19 +79,14 @@ class Handler(object): raise e raise - cluster.stack_id = created_stack['stack']['id'] - cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS cluster.create() - self._poll_and_check(osc, cluster, cluster_driver) - return cluster def cluster_update(self, context, cluster, rollback=False): LOG.debug('cluster_heat cluster_update') osc = clients.OpenStackClients(context) - stack = osc.heat().stacks.get(cluster.stack_id) allow_update_status = ( fields.ClusterStatus.CREATE_COMPLETE, fields.ClusterStatus.UPDATE_COMPLETE, @@ -102,11 +97,11 @@ class Handler(object): fields.ClusterStatus.CHECK_COMPLETE, fields.ClusterStatus.ADOPT_COMPLETE ) - if stack.stack_status not in allow_update_status: + if cluster.status not in allow_update_status: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) - operation = _('Updating a cluster when stack status is ' - '"%s"') % stack.stack_status + operation = _('Updating a cluster when status is ' + '"%s"') % cluster.status raise exception.NotSupported(operation=operation) delta = cluster.obj_what_changed() @@ -115,36 +110,51 @@ class Handler(object): manager = scale_manager.get_scale_manager(context, osc, cluster) - conductor_utils.notify_about_cluster_operation( - context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) - # Get driver ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) - # Create cluster - cluster_driver.update_stack(context, osc, cluster, manager, rollback) + # Update cluster + try: + conductor_utils.notify_about_cluster_operation( + context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING) + cluster_driver.update_cluster(context, cluster, manager, rollback) + cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS + cluster.status_reason = None + except Exception as e: + cluster.status = fields.ClusterStatus.UPDATE_FAILED + cluster.status_reason = six.text_type(e) + cluster.save() + conductor_utils.notify_about_cluster_operation( + context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE) + if isinstance(e, exc.HTTPBadRequest): + e = exception.InvalidParameterValue(message=six.text_type(e)) + raise e + raise + + cluster.save() self._poll_and_check(osc, cluster, cluster_driver) return cluster def cluster_delete(self, context, uuid): - LOG.debug('cluster_heat cluster_delete') + LOG.debug('cluster_conductor cluster_delete') osc = clients.OpenStackClients(context) cluster = objects.Cluster.get_by_uuid(context, uuid) ct = conductor_utils.retrieve_cluster_template(context, cluster) cluster_driver = driver.Driver.get_driver(ct.server_type, ct.cluster_distro, ct.coe) - try: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING) - cluster_driver.delete_stack(context, osc, cluster) + cluster_driver.delete_cluster(context, cluster) + cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS + cluster.status_reason = None except exc.HTTPNotFound: - LOG.info(_LI('The stack %s was not found during cluster' - ' deletion.'), cluster.stack_id) + LOG.info(_LI('The cluster %s was not found during cluster' + ' deletion.'), cluster.id) try: trust_manager.delete_trustee_and_trust(osc, context, cluster) cert_manager.delete_certificates_from_cluster(cluster, @@ -160,147 +170,21 @@ class Handler(object): conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) raise exception.OperationInProgress(cluster_name=cluster.name) - except Exception: + except Exception as unexp: conductor_utils.notify_about_cluster_operation( context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE) + cluster.status = fields.ClusterStatus.DELETE_FAILED + cluster.status_reason = six.text_type(unexp) + cluster.save() raise - cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS cluster.save() - self._poll_and_check(osc, cluster, cluster_driver) - return None def _poll_and_check(self, osc, cluster, cluster_driver): - poller = HeatPoller(osc, cluster, cluster_driver) + # TODO(randall): this is a temporary hack. Next patch will sort the + # status update checking + poller = heat_driver.HeatPoller(osc, cluster, cluster_driver) lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check) lc.start(CONF.cluster_heat.wait_interval, True) - - -class HeatPoller(object): - - def __init__(self, openstack_client, cluster, cluster_driver): - self.openstack_client = openstack_client - self.context = self.openstack_client.context - self.cluster = cluster - self.attempts = 0 - self.cluster_template = conductor_utils.retrieve_cluster_template( - self.context, cluster) - self.template_def = cluster_driver.get_template_definition() - - def poll_and_check(self): - # TODO(yuanying): temporary implementation to update api_address, - # node_addresses and cluster status - stack = self.openstack_client.heat().stacks.get(self.cluster.stack_id) - self.attempts += 1 - status_to_event = { - fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE, - fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE, - fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE, - fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE, - fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE, - fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE, - fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE, - fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE - } - # poll_and_check is detached and polling long time to check status, - # so another user/client can call delete cluster/stack. - if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE: - self._delete_complete() - conductor_utils.notify_about_cluster_operation( - self.context, status_to_event[stack.stack_status], - taxonomy.OUTCOME_SUCCESS) - raise loopingcall.LoopingCallDone() - - if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE, - fields.ClusterStatus.UPDATE_COMPLETE): - self._sync_cluster_and_template_status(stack) - conductor_utils.notify_about_cluster_operation( - self.context, status_to_event[stack.stack_status], - taxonomy.OUTCOME_SUCCESS) - raise loopingcall.LoopingCallDone() - elif stack.stack_status != self.cluster.status: - self._sync_cluster_status(stack) - - if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED, - fields.ClusterStatus.DELETE_FAILED, - fields.ClusterStatus.UPDATE_FAILED, - fields.ClusterStatus.ROLLBACK_COMPLETE, - fields.ClusterStatus.ROLLBACK_FAILED): - self._sync_cluster_and_template_status(stack) - self._cluster_failed(stack) - conductor_utils.notify_about_cluster_operation( - self.context, status_to_event[stack.stack_status], - taxonomy.OUTCOME_FAILURE) - raise loopingcall.LoopingCallDone() - # only check max attempts when the stack is being created when - # the timeout hasn't been set. If the timeout has been set then - # the loop will end when the stack completes or the timeout occurs - if stack.stack_status == fields.ClusterStatus.CREATE_IN_PROGRESS: - if (stack.timeout_mins is None and - self.attempts > CONF.cluster_heat.max_attempts): - LOG.error(_LE('Cluster check exit after %(attempts)s attempts,' - 'stack_id: %(id)s, stack_status: %(status)s') % - {'attempts': CONF.cluster_heat.max_attempts, - 'id': self.cluster.stack_id, - 'status': stack.stack_status}) - raise loopingcall.LoopingCallDone() - else: - if self.attempts > CONF.cluster_heat.max_attempts: - LOG.error(_LE('Cluster check exit after %(attempts)s attempts,' - 'stack_id: %(id)s, stack_status: %(status)s') % - {'attempts': CONF.cluster_heat.max_attempts, - 'id': self.cluster.stack_id, - 'status': stack.stack_status}) - raise loopingcall.LoopingCallDone() - - def _delete_complete(self): - LOG.info(_LI('Cluster has been deleted, stack_id: %s') - % self.cluster.stack_id) - try: - trust_manager.delete_trustee_and_trust(self.openstack_client, - self.context, - self.cluster) - cert_manager.delete_certificates_from_cluster(self.cluster, - context=self.context) - self.cluster.destroy() - except exception.ClusterNotFound: - LOG.info(_LI('The cluster %s has been deleted by others.') - % self.cluster.uuid) - - def _sync_cluster_status(self, stack): - self.cluster.status = stack.stack_status - self.cluster.status_reason = stack.stack_status_reason - stack_nc_param = self.template_def.get_heat_param( - cluster_attr='node_count') - self.cluster.node_count = stack.parameters[stack_nc_param] - self.cluster.save() - - def get_version_info(self, stack): - stack_param = self.template_def.get_heat_param( - cluster_attr='coe_version') - if stack_param: - self.cluster.coe_version = stack.parameters[stack_param] - - version_module_path = self.template_def.driver_module_path+'.version' - try: - ver = importutils.import_module(version_module_path) - container_version = ver.container_version - except Exception: - container_version = None - self.cluster.container_version = container_version - - def _sync_cluster_and_template_status(self, stack): - self.template_def.update_outputs(stack, self.cluster_template, - self.cluster) - self.get_version_info(stack) - self._sync_cluster_status(stack) - - def _cluster_failed(self, stack): - LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, ' - 'stack_id: %(stack_id)s, ' - 'reason: %(reason)s') % - {'cluster_status': stack.stack_status, - 'stack_id': self.cluster.stack_id, - 'reason': self.cluster.status_reason}) diff --git a/magnum/drivers/common/driver.py b/magnum/drivers/common/driver.py index 6ebd1c40b8..32c034803d 100644 --- a/magnum/drivers/common/driver.py +++ b/magnum/drivers/common/driver.py @@ -12,47 +12,25 @@ # License for the specific language governing permissions and limitations # under the License. -import os +import abc +import six -from heatclient.common import template_utils from oslo_config import cfg from oslo_log import log as logging from pkg_resources import iter_entry_points from stevedore import driver from magnum.common import exception -from magnum.common import short_id -from magnum.conductor import utils as conductor_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) -def _extract_template_definition(context, cluster, scale_manager=None): - cluster_template = conductor_utils.retrieve_cluster_template(context, - cluster) - cluster_driver = Driver().get_driver(cluster_template.server_type, - cluster_template.cluster_distro, - cluster_template.coe) - definition = cluster_driver.get_template_definition() - return definition.extract_definition(context, cluster_template, cluster, - scale_manager=scale_manager) - - -def _get_env_files(template_path, env_rel_paths): - template_dir = os.path.dirname(template_path) - env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths] - environment_files = [] - env_map, merged_env = ( - template_utils.process_multiple_environments_and_files( - env_paths=env_abs_paths, env_list_tracker=environment_files)) - return environment_files, env_map - - +@six.add_metaclass(abc.ABCMeta) class Driver(object): + definitions = None - provides = list() @classmethod def load_entry_points(cls): @@ -96,7 +74,7 @@ class Driver(object): if not cls.definitions: cls.definitions = dict() for entry_point, def_class in cls.load_entry_points(): - for cluster_type in def_class.provides: + for cluster_type in def_class().provides: cluster_type_tuple = (cluster_type['server_type'], cluster_type['os'], cluster_type['coe']) @@ -157,55 +135,26 @@ class Driver(object): return driver.DriverManager("magnum.drivers", driver_info['entry_point_name']).driver() - def create_stack(self, context, osc, cluster, cluster_create_timeout): - template_path, heat_params, env_files = ( - _extract_template_definition(context, cluster)) + @abc.abstractproperty + def provides(self): + '''return a list of (server_type, os, coe) tuples - tpl_files, template = template_utils.get_template_contents( - template_path) + Returns a list of cluster configurations supported by this driver + ''' + raise NotImplementedError("Subclasses must implement 'provides'.") - environment_files, env_map = _get_env_files(template_path, env_files) - tpl_files.update(env_map) + @abc.abstractmethod + def create_cluster(self, context, cluster, cluster_create_timeout): + raise NotImplementedError("Subclasses must implement " + "'create_cluster'.") - # Make sure no duplicate stack name - stack_name = '%s-%s' % (cluster.name, short_id.generate_id()) - if cluster_create_timeout: - heat_timeout = cluster_create_timeout - else: - # no cluster_create_timeout value was passed in to the request - # so falling back on configuration file value - heat_timeout = cfg.CONF.cluster_heat.create_timeout - fields = { - 'stack_name': stack_name, - 'parameters': heat_params, - 'environment_files': environment_files, - 'template': template, - 'files': tpl_files, - 'timeout_mins': heat_timeout - } - created_stack = osc.heat().stacks.create(**fields) + @abc.abstractmethod + def update_cluster(self, context, cluster, scale_manager=None, + rollback=False): + raise NotImplementedError("Subclasses must implement " + "'update_cluster'.") - return created_stack - - def update_stack(self, context, osc, cluster, scale_manager=None, - rollback=False): - template_path, heat_params, env_files = _extract_template_definition( - context, cluster, scale_manager=scale_manager) - - tpl_files, template = template_utils.get_template_contents( - template_path) - environment_files, env_map = _get_env_files(template_path, env_files) - tpl_files.update(env_map) - - fields = { - 'parameters': heat_params, - 'environment_files': environment_files, - 'template': template, - 'files': tpl_files, - 'disable_rollback': not rollback - } - - return osc.heat().stacks.update(cluster.stack_id, **fields) - - def delete_stack(self, context, osc, cluster): - osc.heat().stacks.delete(cluster.stack_id) + @abc.abstractmethod + def delete_cluster(self, context, cluster): + raise NotImplementedError("Subclasses must implement " + "'delete_cluster'.") diff --git a/magnum/drivers/heat/__init__.py b/magnum/drivers/heat/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/magnum/drivers/heat/driver.py b/magnum/drivers/heat/driver.py new file mode 100644 index 0000000000..2ace79df22 --- /dev/null +++ b/magnum/drivers/heat/driver.py @@ -0,0 +1,273 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import os +from pycadf import cadftaxonomy as taxonomy +import six + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_service import loopingcall +from oslo_utils import importutils + +from heatclient.common import template_utils + +from magnum.common import clients +from magnum.common import exception +from magnum.common import short_id +from magnum.conductor.handlers.common import cert_manager +from magnum.conductor.handlers.common import trust_manager +from magnum.conductor import utils as conductor_utils +from magnum.drivers.common import driver +from magnum.i18n import _LE +from magnum.i18n import _LI +from magnum.objects import fields + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class HeatDriver(driver.Driver): + '''Base Driver class for using Heat + + Abstract class for implementing Drivers that leverage OpenStack Heat for + orchestrating cluster lifecycle operations + ''' + + def _extract_template_definition(self, context, cluster, + scale_manager=None): + cluster_template = conductor_utils.retrieve_cluster_template(context, + cluster) + definition = self.get_template_definition() + return definition.extract_definition(context, cluster_template, + cluster, + scale_manager=scale_manager) + + def _get_env_files(self, template_path, env_rel_paths): + template_dir = os.path.dirname(template_path) + env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths] + environment_files = [] + env_map, merged_env = ( + template_utils.process_multiple_environments_and_files( + env_paths=env_abs_paths, env_list_tracker=environment_files)) + return environment_files, env_map + + @abc.abstractmethod + def get_template_definition(self): + '''return an implementation of + + magnum.drivers.common.drivers.heat.TemplateDefinition + ''' + + raise NotImplementedError("Must implement 'get_template_definition'") + + def create_cluster(self, context, cluster, cluster_create_timeout): + stack = self._create_stack(context, clients.OpenStackClients(context), + cluster, cluster_create_timeout) + # TODO(randall): keeping this for now to reduce/eliminate data + # migration. Should probably come up with something more generic in + # the future once actual non-heat-based drivers are implemented. + cluster.stack_id = stack['stack']['id'] + + def update_cluster(self, context, cluster, scale_manager=None, + rollback=False): + self._update_stack(context, clients.OpenStackClients(context), cluster, + scale_manager, rollback) + + def delete_cluster(self, context, cluster): + self._delete_stack(context, clients.OpenStackClients(context), cluster) + + def _create_stack(self, context, osc, cluster, cluster_create_timeout): + template_path, heat_params, env_files = ( + self._extract_template_definition(context, cluster)) + + tpl_files, template = template_utils.get_template_contents( + template_path) + + environment_files, env_map = self._get_env_files(template_path, + env_files) + tpl_files.update(env_map) + + # Make sure no duplicate stack name + stack_name = '%s-%s' % (cluster.name, short_id.generate_id()) + if cluster_create_timeout: + heat_timeout = cluster_create_timeout + else: + # no cluster_create_timeout value was passed in to the request + # so falling back on configuration file value + heat_timeout = cfg.CONF.cluster_heat.create_timeout + fields = { + 'stack_name': stack_name, + 'parameters': heat_params, + 'environment_files': environment_files, + 'template': template, + 'files': tpl_files, + 'timeout_mins': heat_timeout + } + created_stack = osc.heat().stacks.create(**fields) + + return created_stack + + def _update_stack(self, context, osc, cluster, scale_manager=None, + rollback=False): + template_path, heat_params, env_files = ( + self._extract_template_definition(context, cluster, + scale_manager=scale_manager)) + + tpl_files, template = template_utils.get_template_contents( + template_path) + environment_files, env_map = self._get_env_files(template_path, + env_files) + tpl_files.update(env_map) + + fields = { + 'parameters': heat_params, + 'environment_files': environment_files, + 'template': template, + 'files': tpl_files, + 'disable_rollback': not rollback + } + + osc.heat().stacks.update(cluster.stack_id, **fields) + + def _delete_stack(self, context, osc, cluster): + osc.heat().stacks.delete(cluster.stack_id) + + +class HeatPoller(object): + + def __init__(self, openstack_client, cluster, cluster_driver): + self.openstack_client = openstack_client + self.context = self.openstack_client.context + self.cluster = cluster + self.attempts = 0 + self.cluster_template = conductor_utils.retrieve_cluster_template( + self.context, cluster) + self.template_def = cluster_driver.get_template_definition() + + def poll_and_check(self): + # TODO(yuanying): temporary implementation to update api_address, + # node_addresses and cluster status + stack = self.openstack_client.heat().stacks.get(self.cluster.stack_id) + self.attempts += 1 + status_to_event = { + fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE, + fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE, + fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE, + fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE, + fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE, + fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE, + fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE, + fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE + } + # poll_and_check is detached and polling long time to check status, + # so another user/client can call delete cluster/stack. + if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE: + self._delete_complete() + conductor_utils.notify_about_cluster_operation( + self.context, status_to_event[stack.stack_status], + taxonomy.OUTCOME_SUCCESS) + raise loopingcall.LoopingCallDone() + + if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE, + fields.ClusterStatus.UPDATE_COMPLETE): + self._sync_cluster_and_template_status(stack) + conductor_utils.notify_about_cluster_operation( + self.context, status_to_event[stack.stack_status], + taxonomy.OUTCOME_SUCCESS) + raise loopingcall.LoopingCallDone() + elif stack.stack_status != self.cluster.status: + self._sync_cluster_status(stack) + + if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED, + fields.ClusterStatus.DELETE_FAILED, + fields.ClusterStatus.UPDATE_FAILED, + fields.ClusterStatus.ROLLBACK_COMPLETE, + fields.ClusterStatus.ROLLBACK_FAILED): + self._sync_cluster_and_template_status(stack) + self._cluster_failed(stack) + conductor_utils.notify_about_cluster_operation( + self.context, status_to_event[stack.stack_status], + taxonomy.OUTCOME_FAILURE) + raise loopingcall.LoopingCallDone() + # only check max attempts when the stack is being created when + # the timeout hasn't been set. If the timeout has been set then + # the loop will end when the stack completes or the timeout occurs + if stack.stack_status == fields.ClusterStatus.CREATE_IN_PROGRESS: + if (stack.timeout_mins is None and + self.attempts > cfg.CONF.cluster_heat.max_attempts): + LOG.error(_LE('Cluster check exit after %(attempts)s attempts,' + 'stack_id: %(id)s, stack_status: %(status)s') % + {'attempts': cfg.CONF.cluster_heat.max_attempts, + 'id': self.cluster.stack_id, + 'status': stack.stack_status}) + raise loopingcall.LoopingCallDone() + else: + if self.attempts > cfg.CONF.cluster_heat.max_attempts: + LOG.error(_LE('Cluster check exit after %(attempts)s attempts,' + 'stack_id: %(id)s, stack_status: %(status)s') % + {'attempts': cfg.CONF.cluster_heat.max_attempts, + 'id': self.cluster.stack_id, + 'status': stack.stack_status}) + raise loopingcall.LoopingCallDone() + + def _delete_complete(self): + LOG.info(_LI('Cluster has been deleted, stack_id: %s') + % self.cluster.stack_id) + try: + trust_manager.delete_trustee_and_trust(self.openstack_client, + self.context, + self.cluster) + cert_manager.delete_certificates_from_cluster(self.cluster, + context=self.context) + self.cluster.destroy() + except exception.ClusterNotFound: + LOG.info(_LI('The cluster %s has been deleted by others.') + % self.cluster.uuid) + + def _sync_cluster_status(self, stack): + self.cluster.status = stack.stack_status + self.cluster.status_reason = stack.stack_status_reason + stack_nc_param = self.template_def.get_heat_param( + cluster_attr='node_count') + self.cluster.node_count = stack.parameters[stack_nc_param] + self.cluster.save() + + def get_version_info(self, stack): + stack_param = self.template_def.get_heat_param( + cluster_attr='coe_version') + if stack_param: + self.cluster.coe_version = stack.parameters[stack_param] + + version_module_path = self.template_def.driver_module_path+'.version' + try: + ver = importutils.import_module(version_module_path) + container_version = ver.container_version + except Exception: + container_version = None + self.cluster.container_version = container_version + + def _sync_cluster_and_template_status(self, stack): + self.template_def.update_outputs(stack, self.cluster_template, + self.cluster) + self.get_version_info(stack) + self._sync_cluster_status(stack) + + def _cluster_failed(self, stack): + LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, ' + 'stack_id: %(stack_id)s, ' + 'reason: %(reason)s') % + {'cluster_status': stack.stack_status, + 'stack_id': self.cluster.stack_id, + 'reason': self.cluster.status_reason}) diff --git a/magnum/drivers/common/k8s_fedora_template_def.py b/magnum/drivers/heat/k8s_fedora_template_def.py similarity index 97% rename from magnum/drivers/common/k8s_fedora_template_def.py rename to magnum/drivers/heat/k8s_fedora_template_def.py index b7cc9c1c0d..35e7e4d475 100644 --- a/magnum/drivers/common/k8s_fedora_template_def.py +++ b/magnum/drivers/heat/k8s_fedora_template_def.py @@ -12,8 +12,8 @@ from oslo_log import log as logging -from magnum.drivers.common import k8s_template_def -from magnum.drivers.common import template_def +from magnum.drivers.heat import k8s_template_def +from magnum.drivers.heat import template_def from oslo_config import cfg CONF = cfg.CONF diff --git a/magnum/drivers/common/k8s_template_def.py b/magnum/drivers/heat/k8s_template_def.py similarity index 99% rename from magnum/drivers/common/k8s_template_def.py rename to magnum/drivers/heat/k8s_template_def.py index dba44175f3..852baf3f12 100644 --- a/magnum/drivers/common/k8s_template_def.py +++ b/magnum/drivers/heat/k8s_template_def.py @@ -12,7 +12,7 @@ from oslo_config import cfg -from magnum.drivers.common import template_def +from magnum.drivers.heat import template_def CONF = cfg.CONF diff --git a/magnum/drivers/common/swarm_fedora_template_def.py b/magnum/drivers/heat/swarm_fedora_template_def.py similarity index 99% rename from magnum/drivers/common/swarm_fedora_template_def.py rename to magnum/drivers/heat/swarm_fedora_template_def.py index 737570252b..0ac3325095 100644 --- a/magnum/drivers/common/swarm_fedora_template_def.py +++ b/magnum/drivers/heat/swarm_fedora_template_def.py @@ -11,7 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import template_def +from magnum.drivers.heat import template_def from oslo_config import cfg CONF = cfg.CONF diff --git a/magnum/drivers/common/template_def.py b/magnum/drivers/heat/template_def.py similarity index 100% rename from magnum/drivers/common/template_def.py rename to magnum/drivers/heat/template_def.py diff --git a/magnum/drivers/k8s_coreos_v1/driver.py b/magnum/drivers/k8s_coreos_v1/driver.py index d8c7cfae50..7e61e5cd89 100644 --- a/magnum/drivers/k8s_coreos_v1/driver.py +++ b/magnum/drivers/k8s_coreos_v1/driver.py @@ -12,16 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import driver +from magnum.drivers.heat import driver from magnum.drivers.k8s_coreos_v1 import template_def -class Driver(driver.Driver): - provides = [ - {'server_type': 'vm', - 'os': 'coreos', - 'coe': 'kubernetes'}, - ] +class Driver(driver.HeatDriver): + + @property + def provides(self): + return [ + {'server_type': 'vm', + 'os': 'coreos', + 'coe': 'kubernetes'}, + ] def get_template_definition(self): return template_def.CoreOSK8sTemplateDefinition() diff --git a/magnum/drivers/k8s_coreos_v1/template_def.py b/magnum/drivers/k8s_coreos_v1/template_def.py index 6d57f370ee..049410c2bf 100644 --- a/magnum/drivers/k8s_coreos_v1/template_def.py +++ b/magnum/drivers/k8s_coreos_v1/template_def.py @@ -14,8 +14,8 @@ import os import magnum.conf -from magnum.drivers.common import k8s_template_def -from magnum.drivers.common import template_def +from magnum.drivers.heat import k8s_template_def +from magnum.drivers.heat import template_def CONF = magnum.conf.CONF diff --git a/magnum/drivers/k8s_fedora_atomic_v1/driver.py b/magnum/drivers/k8s_fedora_atomic_v1/driver.py index f7f4108324..cb8ac8fc66 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/driver.py +++ b/magnum/drivers/k8s_fedora_atomic_v1/driver.py @@ -12,16 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import driver +from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_atomic_v1 import template_def -class Driver(driver.Driver): - provides = [ - {'server_type': 'vm', - 'os': 'fedora-atomic', - 'coe': 'kubernetes'}, - ] +class Driver(driver.HeatDriver): + + @property + def provides(self): + return [ + {'server_type': 'vm', + 'os': 'fedora-atomic', + 'coe': 'kubernetes'}, + ] def get_template_definition(self): return template_def.AtomicK8sTemplateDefinition() diff --git a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py b/magnum/drivers/k8s_fedora_atomic_v1/template_def.py index 1f09ce065a..afd99d12fc 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py +++ b/magnum/drivers/k8s_fedora_atomic_v1/template_def.py @@ -15,7 +15,7 @@ import os import magnum.conf -from magnum.drivers.common import k8s_fedora_template_def as kftd +from magnum.drivers.heat import k8s_fedora_template_def as kftd CONF = magnum.conf.CONF diff --git a/magnum/drivers/k8s_fedora_ironic_v1/driver.py b/magnum/drivers/k8s_fedora_ironic_v1/driver.py index ffe960254f..24f8789610 100644 --- a/magnum/drivers/k8s_fedora_ironic_v1/driver.py +++ b/magnum/drivers/k8s_fedora_ironic_v1/driver.py @@ -12,16 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import driver +from magnum.drivers.heat import driver from magnum.drivers.k8s_fedora_ironic_v1 import template_def -class Driver(driver.Driver): - provides = [ - {'server_type': 'bm', - 'os': 'fedora', - 'coe': 'kubernetes'}, - ] +class Driver(driver.HeatDriver): + + @property + def provides(self): + return [ + {'server_type': 'bm', + 'os': 'fedora', + 'coe': 'kubernetes'}, + ] def get_template_definition(self): return template_def.FedoraK8sIronicTemplateDefinition() diff --git a/magnum/drivers/k8s_fedora_ironic_v1/template_def.py b/magnum/drivers/k8s_fedora_ironic_v1/template_def.py index 186b4edddb..929d0361d5 100644 --- a/magnum/drivers/k8s_fedora_ironic_v1/template_def.py +++ b/magnum/drivers/k8s_fedora_ironic_v1/template_def.py @@ -16,7 +16,7 @@ import os from oslo_log import log as logging from magnum.common import exception -from magnum.drivers.common import k8s_fedora_template_def as kftd +from magnum.drivers.heat import k8s_fedora_template_def as kftd from oslo_config import cfg CONF = cfg.CONF diff --git a/magnum/drivers/mesos_ubuntu_v1/driver.py b/magnum/drivers/mesos_ubuntu_v1/driver.py index 0367873cef..e4e4f41e4c 100644 --- a/magnum/drivers/mesos_ubuntu_v1/driver.py +++ b/magnum/drivers/mesos_ubuntu_v1/driver.py @@ -12,16 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import driver +from magnum.drivers.heat import driver from magnum.drivers.mesos_ubuntu_v1 import template_def -class Driver(driver.Driver): - provides = [ - {'server_type': 'vm', - 'os': 'ubuntu', - 'coe': 'mesos'}, - ] +class Driver(driver.HeatDriver): + + @property + def provides(self): + return [ + {'server_type': 'vm', + 'os': 'ubuntu', + 'coe': 'mesos'}, + ] def get_template_definition(self): return template_def.UbuntuMesosTemplateDefinition() diff --git a/magnum/drivers/mesos_ubuntu_v1/template_def.py b/magnum/drivers/mesos_ubuntu_v1/template_def.py index e6867c8af8..c2f11cf242 100644 --- a/magnum/drivers/mesos_ubuntu_v1/template_def.py +++ b/magnum/drivers/mesos_ubuntu_v1/template_def.py @@ -13,7 +13,7 @@ # under the License. import os -from magnum.drivers.common import template_def +from magnum.drivers.heat import template_def class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition): diff --git a/magnum/drivers/swarm_fedora_atomic_v1/driver.py b/magnum/drivers/swarm_fedora_atomic_v1/driver.py index b2246d2d75..9aa2cb595b 100644 --- a/magnum/drivers/swarm_fedora_atomic_v1/driver.py +++ b/magnum/drivers/swarm_fedora_atomic_v1/driver.py @@ -12,16 +12,19 @@ # License for the specific language governing permissions and limitations # under the License. -from magnum.drivers.common import driver +from magnum.drivers.heat import driver from magnum.drivers.swarm_fedora_atomic_v1 import template_def -class Driver(driver.Driver): - provides = [ - {'server_type': 'vm', - 'os': 'fedora-atomic', - 'coe': 'swarm'}, - ] +class Driver(driver.HeatDriver): + + @property + def provides(self): + return [ + {'server_type': 'vm', + 'os': 'fedora-atomic', + 'coe': 'swarm'}, + ] def get_template_definition(self): return template_def.AtomicSwarmTemplateDefinition() diff --git a/magnum/drivers/swarm_fedora_atomic_v1/template_def.py b/magnum/drivers/swarm_fedora_atomic_v1/template_def.py index 83276779c0..13d71489cd 100644 --- a/magnum/drivers/swarm_fedora_atomic_v1/template_def.py +++ b/magnum/drivers/swarm_fedora_atomic_v1/template_def.py @@ -13,7 +13,7 @@ # under the License. import os -from magnum.drivers.common import swarm_fedora_template_def as sftd +from magnum.drivers.heat import swarm_fedora_template_def as sftd class AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition): diff --git a/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py index fa3d400e0e..152f9ef118 100644 --- a/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_cluster_conductor.py @@ -28,7 +28,6 @@ import magnum.conf from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status -from magnum.tests import base from magnum.tests import fake_notifier from magnum.tests.unit.db import base as db_base from magnum.tests.unit.db import utils @@ -72,6 +71,7 @@ class TestHandler(db_base.DbTestCase): mock_driver.return_value = mock_dr self.cluster.node_count = 2 + self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_update(self.context, self.cluster) notifications = fake_notifier.NOTIFICATIONS @@ -81,9 +81,9 @@ class TestHandler(db_base.DbTestCase): self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - mock_dr.update_stack.assert_called_once_with( - self.context, mock_openstack_client, self.cluster, - mock_scale_manager.return_value, False) + mock_dr.update_cluster.assert_called_once_with( + self.context, self.cluster, mock_scale_manager.return_value, + False) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) @@ -105,6 +105,7 @@ class TestHandler(db_base.DbTestCase): mock_openstack_client.heat.return_value = mock_heat_client self.cluster.node_count = 2 + self.cluster.status = cluster_status.CREATE_FAILED self.assertRaises(exception.NotSupported, self.handler.cluster_update, self.context, self.cluster) @@ -141,6 +142,7 @@ class TestHandler(db_base.DbTestCase): mock_driver.return_value = mock_dr self.cluster.node_count = 2 + self.cluster.status = cluster_status.CREATE_COMPLETE self.handler.cluster_update(self.context, self.cluster) notifications = fake_notifier.NOTIFICATIONS @@ -150,9 +152,8 @@ class TestHandler(db_base.DbTestCase): self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - mock_dr.update_stack.assert_called_once_with( - self.context, mock_openstack_client, self.cluster, - mock_scale_manager.return_value, False) + mock_dr.update_cluster.assert_called_once_with( + self.context, self.cluster, mock_scale_manager.return_value, False) cluster = objects.Cluster.get(self.context, self.cluster.uuid) self.assertEqual(2, cluster.node_count) @@ -184,7 +185,7 @@ class TestHandler(db_base.DbTestCase): self._test_update_cluster_status_complete( cluster_status.ADOPT_COMPLETE) - @patch('magnum.conductor.handlers.cluster_conductor.HeatPoller') + @patch('magnum.drivers.heat.driver.HeatPoller') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') @patch('magnum.drivers.common.driver.Driver.get_driver') @@ -227,9 +228,8 @@ class TestHandler(db_base.DbTestCase): self.assertEqual( taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome']) - mock_dr.create_stack.assert_called_once_with(self.context, - mock.sentinel.osc, - self.cluster, timeout) + mock_dr.create_cluster.assert_called_once_with(self.context, + self.cluster, timeout) mock_cm.generate_certificates_to_cluster.assert_called_once_with( self.cluster, context=self.context) self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) @@ -279,7 +279,7 @@ class TestHandler(db_base.DbTestCase): mock_cluster_create): mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr - mock_dr.create_stack.side_effect = exc.HTTPBadRequest + mock_dr.create_cluster.side_effect = exc.HTTPBadRequest self._test_create_failed( mock_openstack_client_class, @@ -369,7 +369,7 @@ class TestHandler(db_base.DbTestCase): characters, must start with alpha""") mock_dr = mock.MagicMock() mock_driver.return_value = mock_dr - mock_dr.create_stack.side_effect = exc.HTTPBadRequest(error_message) + mock_dr.create_cluster.side_effect = exc.HTTPBadRequest(error_message) self._test_create_failed( mock_openstack_client_class, @@ -390,13 +390,14 @@ class TestHandler(db_base.DbTestCase): self.assertEqual( taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) - @patch('magnum.conductor.handlers.cluster_conductor.HeatPoller') + @patch('magnum.drivers.heat.driver.HeatPoller') @patch('heatclient.common.template_utils' '.process_multiple_environments_and_files') @patch('heatclient.common.template_utils.get_template_contents') @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - @patch('magnum.drivers.common.driver._extract_template_definition') + @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' + '_extract_template_definition') @patch('magnum.drivers.common.driver.Driver.get_driver') @patch('magnum.common.clients.OpenStackClients') @patch('magnum.common.short_id.generate_id') @@ -524,292 +525,3 @@ class TestHandler(db_base.DbTestCase): taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) self.assertEqual( 0, cert_manager.delete_certificates_from_cluster.call_count) - - -class TestHeatPoller(base.TestCase): - - @patch('magnum.conductor.utils.retrieve_cluster_template') - @patch('oslo_config.cfg') - @patch('magnum.common.clients.OpenStackClients') - @patch('magnum.drivers.common.driver.Driver.get_driver') - def setup_poll_test(self, mock_driver, mock_openstack_client, cfg, - mock_retrieve_cluster_template): - cfg.CONF.cluster_heat.max_attempts = 10 - - cluster = mock.MagicMock() - cluster_template_dict = utils.get_test_cluster_template( - coe='kubernetes') - mock_heat_stack = mock.MagicMock() - mock_heat_client = mock.MagicMock() - mock_heat_client.stacks.get.return_value = mock_heat_stack - mock_openstack_client.heat.return_value = mock_heat_client - cluster_template = objects.ClusterTemplate(self.context, - **cluster_template_dict) - mock_retrieve_cluster_template.return_value = cluster_template - mock_driver.return_value = k8s_atomic_dr.Driver() - poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster, - k8s_atomic_dr.Driver()) - poller.get_version_info = mock.MagicMock() - return (mock_heat_stack, cluster, poller) - - def test_poll_and_check_send_notification(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - mock_heat_stack.stack_status = cluster_status.DELETE_FAILED - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(6, poller.attempts) - notifications = fake_notifier.NOTIFICATIONS - self.assertEqual(6, len(notifications)) - self.assertEqual( - 'magnum.cluster.create', notifications[0].event_type) - self.assertEqual( - taxonomy.OUTCOME_SUCCESS, notifications[0].payload['outcome']) - self.assertEqual( - 'magnum.cluster.create', notifications[1].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) - self.assertEqual( - 'magnum.cluster.delete', notifications[2].event_type) - self.assertEqual( - taxonomy.OUTCOME_SUCCESS, notifications[2].payload['outcome']) - self.assertEqual( - 'magnum.cluster.delete', notifications[3].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[3].payload['outcome']) - self.assertEqual( - 'magnum.cluster.update', notifications[4].event_type) - self.assertEqual( - taxonomy.OUTCOME_SUCCESS, notifications[4].payload['outcome']) - self.assertEqual( - 'magnum.cluster.update', notifications[5].event_type) - self.assertEqual( - taxonomy.OUTCOME_FAILURE, notifications[5].payload['outcome']) - - def test_poll_no_save(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - cluster.status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - - self.assertEqual(0, cluster.save.call_count) - self.assertEqual(1, poller.attempts) - - def test_poll_save(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - cluster.status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - mock_heat_stack.stack_status_reason = 'Create failed' - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.CREATE_FAILED, cluster.status) - self.assertEqual('Create failed', cluster.status_reason) - self.assertEqual(1, poller.attempts) - - def test_poll_done(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - self.assertEqual(2, poller.attempts) - - def test_poll_done_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - mock_heat_stack.parameters = {'number_of_minions': 2} - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(1, cluster.save.call_count) - self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) - self.assertEqual(2, cluster.node_count) - self.assertEqual(1, poller.attempts) - - def test_poll_done_by_update_failed(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED - mock_heat_stack.parameters = {'number_of_minions': 2} - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) - self.assertEqual(2, cluster.node_count) - self.assertEqual(1, poller.attempts) - - def test_poll_done_by_rollback_complete(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE - mock_heat_stack.parameters = {'number_of_minions': 1} - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status) - self.assertEqual(1, cluster.node_count) - self.assertEqual(1, poller.attempts) - - def test_poll_done_by_rollback_failed(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED - mock_heat_stack.parameters = {'number_of_minions': 1} - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(2, cluster.save.call_count) - self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status) - self.assertEqual(1, cluster.node_count) - self.assertEqual(1, poller.attempts) - - def test_poll_destroy(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_FAILED - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - # Destroy method is not called when stack delete failed - self.assertEqual(0, cluster.destroy.call_count) - - mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS - poller.poll_and_check() - self.assertEqual(0, cluster.destroy.call_count) - self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) - - mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - # The cluster status should still be DELETE_IN_PROGRESS, because - # the destroy() method may be failed. If success, this cluster record - # will delete directly, change status is meaningless. - self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) - self.assertEqual(1, cluster.destroy.call_count) - - def test_poll_delete_in_progress_timeout_set(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS - mock_heat_stack.timeout_mins = 60 - # timeout only affects stack creation so expecting this - # to process normally - poller.poll_and_check() - - def test_poll_delete_in_progress_max_attempts_reached(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS - poller.attempts = CONF.cluster_heat.max_attempts - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - def test_poll_create_in_prog_max_att_reached_no_timeout(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.attempts = CONF.cluster_heat.max_attempts - mock_heat_stack.timeout_mins = None - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - def test_poll_create_in_prog_max_att_reached_timeout_set(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.attempts = CONF.cluster_heat.max_attempts - mock_heat_stack.timeout_mins = 60 - # since the timeout is set the max attempts gets ignored since - # the timeout will eventually stop the poller either when - # the stack gets created or the timeout gets reached - poller.poll_and_check() - - def test_poll_create_in_prog_max_att_reached_timed_out(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - poller.attempts = CONF.cluster_heat.max_attempts - mock_heat_stack.timeout_mins = 60 - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - def test_poll_create_in_prog_max_att_not_reached_no_timeout(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.timeout.mins = None - poller.poll_and_check() - - def test_poll_create_in_prog_max_att_not_reached_timeout_set(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.timeout_mins = 60 - poller.poll_and_check() - - def test_poll_create_in_prog_max_att_not_reached_timed_out(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.stack_status = cluster_status.CREATE_FAILED - mock_heat_stack.timeout_mins = 60 - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - def test_poll_node_count(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_minions': 1} - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - poller.poll_and_check() - - self.assertEqual(1, cluster.node_count) - - def test_poll_node_count_by_update(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - - mock_heat_stack.parameters = {'number_of_minions': 2} - mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE - self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) - - self.assertEqual(2, cluster.node_count) - - @patch('magnum.conductor.handlers.cluster_conductor.trust_manager') - @patch('magnum.conductor.handlers.cluster_conductor.cert_manager') - def test_delete_complete(self, cert_manager, trust_manager): - mock_heat_stack, cluster, poller = self.setup_poll_test() - poller._delete_complete() - self.assertEqual(1, cluster.destroy.call_count) - self.assertEqual( - 1, cert_manager.delete_certificates_from_cluster.call_count) - self.assertEqual(1, - trust_manager.delete_trustee_and_trust.call_count) - - def test_create_or_complete(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE - mock_heat_stack.stack_status_reason = 'stack complete' - poller._sync_cluster_and_template_status(mock_heat_stack) - self.assertEqual('stack complete', cluster.status_reason) - self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status) - self.assertEqual(1, cluster.save.call_count) - - def test_sync_cluster_status(self): - mock_heat_stack, cluster, poller = self.setup_poll_test() - mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS - mock_heat_stack.stack_status_reason = 'stack incomplete' - poller._sync_cluster_status(mock_heat_stack) - self.assertEqual('stack incomplete', cluster.status_reason) - self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) - - @patch('magnum.conductor.handlers.cluster_conductor.LOG') - def test_cluster_failed(self, logger): - mock_heat_stack, cluster, poller = self.setup_poll_test() - poller._sync_cluster_and_template_status(mock_heat_stack) - poller._cluster_failed(mock_heat_stack) - self.assertEqual(1, logger.error.call_count) diff --git a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py index 72d443996a..b057022a3d 100644 --- a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py @@ -16,7 +16,6 @@ import mock from mock import patch import magnum.conf -from magnum.drivers.common import driver from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_dr from magnum import objects @@ -124,8 +123,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) mapping = { 'dns_nameserver': 'dns_nameserver', @@ -224,8 +223,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', @@ -305,8 +304,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'auth_url': 'http://192.168.10.10:5000/v3', @@ -363,8 +362,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -423,8 +422,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -607,8 +606,8 @@ class TestClusterConductorWithK8s(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -656,8 +655,11 @@ class TestClusterConductorWithK8s(base.TestCase): @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.common.driver._extract_template_definition') + @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' + '_extract_template_definition') + @patch('magnum.common.clients.OpenStackClients') def test_create_stack(self, + mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): @@ -674,13 +676,12 @@ class TestClusterConductorWithK8s(base.TestCase): mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.heat.return_value = mock_heat_client + mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name - k8s_dr.Driver().create_stack(self.context, mock_osc, - mock_cluster, expected_timeout) + k8s_dr.Driver().create_cluster(self.context, mock_cluster, + expected_timeout) expected_args = { 'stack_name': expected_stack_name, @@ -694,9 +695,12 @@ class TestClusterConductorWithK8s(base.TestCase): @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.common.driver._extract_template_definition') + @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' + '_extract_template_definition') + @patch('magnum.common.clients.OpenStackClients') def test_create_stack_no_timeout_specified( self, + mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): @@ -713,13 +717,11 @@ class TestClusterConductorWithK8s(base.TestCase): mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.heat.return_value = mock_heat_client + mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name - k8s_dr.Driver().create_stack(self.context, mock_osc, - mock_cluster, None) + k8s_dr.Driver().create_cluster(self.context, mock_cluster, None) expected_args = { 'stack_name': expected_stack_name, @@ -733,9 +735,12 @@ class TestClusterConductorWithK8s(base.TestCase): @patch('magnum.common.short_id.generate_id') @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.common.driver._extract_template_definition') + @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' + '_extract_template_definition') + @patch('magnum.common.clients.OpenStackClients') def test_create_stack_timeout_is_zero( self, + mock_osc, mock_extract_template_definition, mock_get_template_contents, mock_generate_id): @@ -753,13 +758,12 @@ class TestClusterConductorWithK8s(base.TestCase): mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.heat.return_value = mock_heat_client + mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.name = dummy_cluster_name - k8s_dr.Driver().create_stack(self.context, mock_osc, - mock_cluster, cluster_timeout) + k8s_dr.Driver().create_cluster(self.context, mock_cluster, + cluster_timeout) expected_args = { 'stack_name': expected_stack_name, @@ -772,8 +776,11 @@ class TestClusterConductorWithK8s(base.TestCase): mock_heat_client.stacks.create.assert_called_once_with(**expected_args) @patch('heatclient.common.template_utils.get_template_contents') - @patch('magnum.drivers.common.driver._extract_template_definition') + @patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.' + '_extract_template_definition') + @patch('magnum.common.clients.OpenStackClients') def test_update_stack(self, + mock_osc, mock_extract_template_definition, mock_get_template_contents): @@ -786,12 +793,11 @@ class TestClusterConductorWithK8s(base.TestCase): mock_extract_template_definition.return_value = ('template/path', {}, []) mock_heat_client = mock.MagicMock() - mock_osc = mock.MagicMock() - mock_osc.heat.return_value = mock_heat_client + mock_osc.return_value.heat.return_value = mock_heat_client mock_cluster = mock.MagicMock() mock_cluster.stack_id = mock_stack_id - k8s_dr.Driver().update_stack({}, mock_osc, mock_cluster) + k8s_dr.Driver().update_cluster({}, mock_cluster) expected_args = { 'parameters': {}, diff --git a/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py index b4613c494e..dd818e1c5c 100644 --- a/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py @@ -16,8 +16,7 @@ import mock from mock import patch from oslo_service import loopingcall -from magnum.conductor.handlers import cluster_conductor -from magnum.drivers.common import driver +from magnum.drivers.heat import driver as heat_driver from magnum.drivers.mesos_ubuntu_v1 import driver as mesos_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status @@ -95,8 +94,8 @@ class TestClusterConductorWithMesos(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -154,8 +153,8 @@ class TestClusterConductorWithMesos(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -200,8 +199,8 @@ class TestClusterConductorWithMesos(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -255,8 +254,8 @@ class TestClusterConductorWithMesos(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -310,8 +309,8 @@ class TestClusterConductorWithMesos(base.TestCase): cluster_template = objects.ClusterTemplate( self.context, **self.cluster_template_dict) mock_retrieve_cluster_template.return_value = cluster_template - poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster, - mesos_dr.Driver()) + poller = heat_driver.HeatPoller(mock_openstack_client, cluster, + mesos_dr.Driver()) poller.get_version_info = mock.MagicMock() return (mock_heat_stack, cluster, poller) diff --git a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py index 2bb8c597f6..834d0740ea 100644 --- a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py @@ -16,9 +16,8 @@ import mock from mock import patch from oslo_service import loopingcall -from magnum.conductor.handlers import cluster_conductor import magnum.conf -from magnum.drivers.common import driver +from magnum.drivers.heat import driver as heat_driver from magnum.drivers.swarm_fedora_atomic_v1 import driver as swarm_dr from magnum import objects from magnum.objects.fields import ClusterStatus as cluster_status @@ -107,8 +106,8 @@ class TestClusterConductorWithSwarm(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -177,8 +176,8 @@ class TestClusterConductorWithSwarm(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -254,8 +253,8 @@ class TestClusterConductorWithSwarm(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -309,8 +308,8 @@ class TestClusterConductorWithSwarm(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -376,8 +375,8 @@ class TestClusterConductorWithSwarm(base.TestCase): (template_path, definition, - env_files) = driver._extract_template_definition(self.context, - cluster) + env_files) = mock_driver()._extract_template_definition(self.context, + cluster) expected = { 'ssh_key_name': 'keypair_id', @@ -437,8 +436,8 @@ class TestClusterConductorWithSwarm(base.TestCase): mock_retrieve_cluster_template.return_value = \ cluster_template mock_driver.return_value = swarm_dr.Driver() - poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster, - swarm_dr.Driver()) + poller = heat_driver.HeatPoller(mock_openstack_client, cluster, + swarm_dr.Driver()) poller.get_version_info = mock.MagicMock() return (mock_heat_stack, cluster, poller) diff --git a/magnum/tests/unit/drivers/test_heat_driver.py b/magnum/tests/unit/drivers/test_heat_driver.py new file mode 100644 index 0000000000..bc1b412184 --- /dev/null +++ b/magnum/tests/unit/drivers/test_heat_driver.py @@ -0,0 +1,316 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from mock import patch +from oslo_service import loopingcall +from pycadf import cadftaxonomy as taxonomy + +import magnum.conf +from magnum.drivers.heat import driver as heat_driver +from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr +from magnum import objects +from magnum.objects.fields import ClusterStatus as cluster_status +from magnum.tests import base +from magnum.tests import fake_notifier +from magnum.tests.unit.db import utils + +CONF = magnum.conf.CONF + + +class TestHeatPoller(base.TestCase): + + @patch('magnum.conductor.utils.retrieve_cluster_template') + @patch('oslo_config.cfg') + @patch('magnum.common.clients.OpenStackClients') + @patch('magnum.drivers.common.driver.Driver.get_driver') + def setup_poll_test(self, mock_driver, mock_openstack_client, cfg, + mock_retrieve_cluster_template): + cfg.CONF.cluster_heat.max_attempts = 10 + + cluster = mock.MagicMock() + cluster_template_dict = utils.get_test_cluster_template( + coe='kubernetes') + mock_heat_stack = mock.MagicMock() + mock_heat_client = mock.MagicMock() + mock_heat_client.stacks.get.return_value = mock_heat_stack + mock_openstack_client.heat.return_value = mock_heat_client + cluster_template = objects.ClusterTemplate(self.context, + **cluster_template_dict) + mock_retrieve_cluster_template.return_value = cluster_template + mock_driver.return_value = k8s_atomic_dr.Driver() + poller = heat_driver.HeatPoller(mock_openstack_client, cluster, + k8s_atomic_dr.Driver()) + poller.get_version_info = mock.MagicMock() + return (mock_heat_stack, cluster, poller) + + def test_poll_and_check_send_notification(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + mock_heat_stack.stack_status = cluster_status.CREATE_FAILED + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + mock_heat_stack.stack_status = cluster_status.DELETE_FAILED + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(6, poller.attempts) + notifications = fake_notifier.NOTIFICATIONS + self.assertEqual(6, len(notifications)) + self.assertEqual( + 'magnum.cluster.create', notifications[0].event_type) + self.assertEqual( + taxonomy.OUTCOME_SUCCESS, notifications[0].payload['outcome']) + self.assertEqual( + 'magnum.cluster.create', notifications[1].event_type) + self.assertEqual( + taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome']) + self.assertEqual( + 'magnum.cluster.delete', notifications[2].event_type) + self.assertEqual( + taxonomy.OUTCOME_SUCCESS, notifications[2].payload['outcome']) + self.assertEqual( + 'magnum.cluster.delete', notifications[3].event_type) + self.assertEqual( + taxonomy.OUTCOME_FAILURE, notifications[3].payload['outcome']) + self.assertEqual( + 'magnum.cluster.update', notifications[4].event_type) + self.assertEqual( + taxonomy.OUTCOME_SUCCESS, notifications[4].payload['outcome']) + self.assertEqual( + 'magnum.cluster.update', notifications[5].event_type) + self.assertEqual( + taxonomy.OUTCOME_FAILURE, notifications[5].payload['outcome']) + + def test_poll_no_save(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + cluster.status = cluster_status.CREATE_IN_PROGRESS + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + poller.poll_and_check() + + self.assertEqual(0, cluster.save.call_count) + self.assertEqual(1, poller.attempts) + + def test_poll_save(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + cluster.status = cluster_status.CREATE_IN_PROGRESS + mock_heat_stack.stack_status = cluster_status.CREATE_FAILED + mock_heat_stack.stack_status_reason = 'Create failed' + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(2, cluster.save.call_count) + self.assertEqual(cluster_status.CREATE_FAILED, cluster.status) + self.assertEqual('Create failed', cluster.status_reason) + self.assertEqual(1, poller.attempts) + + def test_poll_done(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + mock_heat_stack.stack_status = cluster_status.CREATE_FAILED + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + self.assertEqual(2, poller.attempts) + + def test_poll_done_by_update(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE + mock_heat_stack.parameters = {'number_of_minions': 2} + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(1, cluster.save.call_count) + self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status) + self.assertEqual(2, cluster.node_count) + self.assertEqual(1, poller.attempts) + + def test_poll_done_by_update_failed(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED + mock_heat_stack.parameters = {'number_of_minions': 2} + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(2, cluster.save.call_count) + self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status) + self.assertEqual(2, cluster.node_count) + self.assertEqual(1, poller.attempts) + + def test_poll_done_by_rollback_complete(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE + mock_heat_stack.parameters = {'number_of_minions': 1} + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(2, cluster.save.call_count) + self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status) + self.assertEqual(1, cluster.node_count) + self.assertEqual(1, poller.attempts) + + def test_poll_done_by_rollback_failed(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED + mock_heat_stack.parameters = {'number_of_minions': 1} + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(2, cluster.save.call_count) + self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status) + self.assertEqual(1, cluster.node_count) + self.assertEqual(1, poller.attempts) + + def test_poll_destroy(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.DELETE_FAILED + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + # Destroy method is not called when stack delete failed + self.assertEqual(0, cluster.destroy.call_count) + + mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS + poller.poll_and_check() + self.assertEqual(0, cluster.destroy.call_count) + self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) + + mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + # The cluster status should still be DELETE_IN_PROGRESS, because + # the destroy() method may be failed. If success, this cluster record + # will delete directly, change status is meaningless. + self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status) + self.assertEqual(1, cluster.destroy.call_count) + + def test_poll_delete_in_progress_timeout_set(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS + mock_heat_stack.timeout_mins = 60 + # timeout only affects stack creation so expecting this + # to process normally + poller.poll_and_check() + + def test_poll_delete_in_progress_max_attempts_reached(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS + poller.attempts = CONF.cluster_heat.max_attempts + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + def test_poll_create_in_prog_max_att_reached_no_timeout(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + poller.attempts = CONF.cluster_heat.max_attempts + mock_heat_stack.timeout_mins = None + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + def test_poll_create_in_prog_max_att_reached_timeout_set(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + poller.attempts = CONF.cluster_heat.max_attempts + mock_heat_stack.timeout_mins = 60 + # since the timeout is set the max attempts gets ignored since + # the timeout will eventually stop the poller either when + # the stack gets created or the timeout gets reached + poller.poll_and_check() + + def test_poll_create_in_prog_max_att_reached_timed_out(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_FAILED + poller.attempts = CONF.cluster_heat.max_attempts + mock_heat_stack.timeout_mins = 60 + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + def test_poll_create_in_prog_max_att_not_reached_no_timeout(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + mock_heat_stack.timeout.mins = None + poller.poll_and_check() + + def test_poll_create_in_prog_max_att_not_reached_timeout_set(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + mock_heat_stack.timeout_mins = 60 + poller.poll_and_check() + + def test_poll_create_in_prog_max_att_not_reached_timed_out(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.stack_status = cluster_status.CREATE_FAILED + mock_heat_stack.timeout_mins = 60 + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + def test_poll_node_count(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.parameters = {'number_of_minions': 1} + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + poller.poll_and_check() + + self.assertEqual(1, cluster.node_count) + + def test_poll_node_count_by_update(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + + mock_heat_stack.parameters = {'number_of_minions': 2} + mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE + self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check) + + self.assertEqual(2, cluster.node_count) + + @patch('magnum.drivers.heat.driver.trust_manager') + @patch('magnum.drivers.heat.driver.cert_manager') + def test_delete_complete(self, cert_manager, trust_manager): + mock_heat_stack, cluster, poller = self.setup_poll_test() + poller._delete_complete() + self.assertEqual(1, cluster.destroy.call_count) + self.assertEqual( + 1, cert_manager.delete_certificates_from_cluster.call_count) + self.assertEqual(1, + trust_manager.delete_trustee_and_trust.call_count) + + def test_create_or_complete(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE + mock_heat_stack.stack_status_reason = 'stack complete' + poller._sync_cluster_and_template_status(mock_heat_stack) + self.assertEqual('stack complete', cluster.status_reason) + self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status) + self.assertEqual(1, cluster.save.call_count) + + def test_sync_cluster_status(self): + mock_heat_stack, cluster, poller = self.setup_poll_test() + mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS + mock_heat_stack.stack_status_reason = 'stack incomplete' + poller._sync_cluster_status(mock_heat_stack) + self.assertEqual('stack incomplete', cluster.status_reason) + self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status) + + @patch('magnum.drivers.heat.driver.LOG') + def test_cluster_failed(self, logger): + mock_heat_stack, cluster, poller = self.setup_poll_test() + poller._sync_cluster_and_template_status(mock_heat_stack) + poller._cluster_failed(mock_heat_stack) + self.assertEqual(1, logger.error.call_count) diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py index d800f171f0..a361aeacae 100644 --- a/magnum/tests/unit/drivers/test_template_definition.py +++ b/magnum/tests/unit/drivers/test_template_definition.py @@ -20,7 +20,7 @@ import six from magnum.common import exception import magnum.conf from magnum.drivers.common import driver -from magnum.drivers.common import template_def as cmn_tdef +from magnum.drivers.heat import template_def as cmn_tdef from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr from magnum.drivers.k8s_coreos_v1 import template_def as k8s_coreos_tdef from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8sa_dr @@ -46,7 +46,7 @@ class TemplateDefinitionTestCase(base.TestCase): mock_entry_points = [mock_entry_point] mock_iter_entry_points.return_value = mock_entry_points.__iter__() - entry_points = driver.Driver().load_entry_points() + entry_points = driver.Driver.load_entry_points() for (expected_entry_point, (actual_entry_point, loaded_cls)) in zip(mock_entry_points, @@ -109,7 +109,7 @@ class TemplateDefinitionTestCase(base.TestCase): def test_get_driver_not_supported(self): self.assertRaises(exception.ClusterTypeNotSupported, - driver.Driver().get_driver, + driver.Driver.get_driver, 'vm', 'not_supported', 'kubernetes') def test_required_param_not_set(self): @@ -222,9 +222,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.k8s_fedora_atomic_v1.template_def' '.AtomicK8sTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.common.template_def.BaseTemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') - @mock.patch('magnum.drivers.common.template_def.TemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_k8s_get_params(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): @@ -276,11 +276,11 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): **expected_kwargs) @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.common.template_def' + @mock.patch('magnum.drivers.heat.template_def' '.BaseTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.common.template_def.BaseTemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') - @mock.patch('magnum.drivers.common.template_def.TemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_k8s_get_params_insecure(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): @@ -689,9 +689,9 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase): @mock.patch('magnum.common.clients.OpenStackClients') @mock.patch('magnum.drivers.swarm_fedora_atomic_v1.template_def' '.AtomicSwarmTemplateDefinition.get_discovery_url') - @mock.patch('magnum.drivers.common.template_def.BaseTemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') - @mock.patch('magnum.drivers.common.template_def.TemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_swarm_get_params(self, mock_get_output, mock_get_params, mock_get_discovery_url, mock_osc_class): @@ -859,9 +859,9 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase): class UbuntuMesosTemplateDefinitionTestCase(base.TestCase): @mock.patch('magnum.common.clients.OpenStackClients') - @mock.patch('magnum.drivers.common.template_def.BaseTemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.BaseTemplateDefinition' '.get_params') - @mock.patch('magnum.drivers.common.template_def.TemplateDefinition' + @mock.patch('magnum.drivers.heat.template_def.TemplateDefinition' '.get_output') def test_mesos_get_params(self, mock_get_output, mock_get_params, mock_osc_class):