Browse Source

Refactor driver interface (pt 1)

Refactor driver interface to encapsulate the orchestration
strategy. This first patch only refactors the main driver
operations. A follow-on will handle the state synchronization
and removing the poller from the conductor.

1. Make driver interface abstract
2. Move external cluster operations into driver interface
3. Make Heat-based driver abstract and update based on
   driver interface changes
4. Move Heat driver code into its own module
5. Update existing Heat drivers based on interface changes

Change-Id: Icfa72e27dc496862d950ac608885567c911f47f2
Partial-Blueprint: bp-driver-consolodation
changes/81/396781/3
Randall Burt 5 years ago
parent
commit
7890725c52
  1. 194
      magnum/conductor/handlers/cluster_conductor.py
  2. 107
      magnum/drivers/common/driver.py
  3. 0
      magnum/drivers/heat/__init__.py
  4. 273
      magnum/drivers/heat/driver.py
  5. 4
      magnum/drivers/heat/k8s_fedora_template_def.py
  6. 2
      magnum/drivers/heat/k8s_template_def.py
  7. 2
      magnum/drivers/heat/swarm_fedora_template_def.py
  8. 0
      magnum/drivers/heat/template_def.py
  9. 17
      magnum/drivers/k8s_coreos_v1/driver.py
  10. 4
      magnum/drivers/k8s_coreos_v1/template_def.py
  11. 17
      magnum/drivers/k8s_fedora_atomic_v1/driver.py
  12. 2
      magnum/drivers/k8s_fedora_atomic_v1/template_def.py
  13. 17
      magnum/drivers/k8s_fedora_ironic_v1/driver.py
  14. 2
      magnum/drivers/k8s_fedora_ironic_v1/template_def.py
  15. 17
      magnum/drivers/mesos_ubuntu_v1/driver.py
  16. 2
      magnum/drivers/mesos_ubuntu_v1/template_def.py
  17. 17
      magnum/drivers/swarm_fedora_atomic_v1/driver.py
  18. 2
      magnum/drivers/swarm_fedora_atomic_v1/template_def.py
  19. 320
      magnum/tests/unit/conductor/handlers/test_cluster_conductor.py
  20. 70
      magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py
  21. 23
      magnum/tests/unit/conductor/handlers/test_mesos_cluster_conductor.py
  22. 27
      magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py
  23. 316
      magnum/tests/unit/drivers/test_heat_driver.py
  24. 24
      magnum/tests/unit/drivers/test_template_definition.py

194
magnum/conductor/handlers/cluster_conductor.py

@ -15,7 +15,6 @@
from heatclient import exc
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import importutils
from pycadf import cadftaxonomy as taxonomy
import six
@ -27,8 +26,8 @@ from magnum.conductor import scale_manager
from magnum.conductor import utils as conductor_utils
import magnum.conf
from magnum.drivers.common import driver
from magnum.drivers.heat import driver as heat_driver
from magnum.i18n import _
from magnum.i18n import _LE
from magnum.i18n import _LI
from magnum import objects
from magnum.objects import fields
@ -64,8 +63,9 @@ class Handler(object):
ct.cluster_distro,
ct.coe)
# Create cluster
created_stack = cluster_driver.create_stack(context, osc, cluster,
create_timeout)
cluster_driver.create_cluster(context, cluster, create_timeout)
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
cluster.status_reason = None
except Exception as e:
cluster.status = fields.ClusterStatus.CREATE_FAILED
cluster.status_reason = six.text_type(e)
@ -79,19 +79,14 @@ class Handler(object):
raise e
raise
cluster.stack_id = created_stack['stack']['id']
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
cluster.create()
self._poll_and_check(osc, cluster, cluster_driver)
return cluster
def cluster_update(self, context, cluster, rollback=False):
LOG.debug('cluster_heat cluster_update')
osc = clients.OpenStackClients(context)
stack = osc.heat().stacks.get(cluster.stack_id)
allow_update_status = (
fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE,
@ -102,11 +97,11 @@ class Handler(object):
fields.ClusterStatus.CHECK_COMPLETE,
fields.ClusterStatus.ADOPT_COMPLETE
)
if stack.stack_status not in allow_update_status:
if cluster.status not in allow_update_status:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
operation = _('Updating a cluster when stack status is '
'"%s"') % stack.stack_status
operation = _('Updating a cluster when status is '
'"%s"') % cluster.status
raise exception.NotSupported(operation=operation)
delta = cluster.obj_what_changed()
@ -115,36 +110,51 @@ class Handler(object):
manager = scale_manager.get_scale_manager(context, osc, cluster)
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
# Get driver
ct = conductor_utils.retrieve_cluster_template(context, cluster)
cluster_driver = driver.Driver.get_driver(ct.server_type,
ct.cluster_distro,
ct.coe)
# Create cluster
cluster_driver.update_stack(context, osc, cluster, manager, rollback)
# Update cluster
try:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
cluster_driver.update_cluster(context, cluster, manager, rollback)
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
cluster.status_reason = None
except Exception as e:
cluster.status = fields.ClusterStatus.UPDATE_FAILED
cluster.status_reason = six.text_type(e)
cluster.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
if isinstance(e, exc.HTTPBadRequest):
e = exception.InvalidParameterValue(message=six.text_type(e))
raise e
raise
cluster.save()
self._poll_and_check(osc, cluster, cluster_driver)
return cluster
def cluster_delete(self, context, uuid):
LOG.debug('cluster_heat cluster_delete')
LOG.debug('cluster_conductor cluster_delete')
osc = clients.OpenStackClients(context)
cluster = objects.Cluster.get_by_uuid(context, uuid)
ct = conductor_utils.retrieve_cluster_template(context, cluster)
cluster_driver = driver.Driver.get_driver(ct.server_type,
ct.cluster_distro,
ct.coe)
try:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING)
cluster_driver.delete_stack(context, osc, cluster)
cluster_driver.delete_cluster(context, cluster)
cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS
cluster.status_reason = None
except exc.HTTPNotFound:
LOG.info(_LI('The stack %s was not found during cluster'
' deletion.'), cluster.stack_id)
LOG.info(_LI('The cluster %s was not found during cluster'
' deletion.'), cluster.id)
try:
trust_manager.delete_trustee_and_trust(osc, context, cluster)
cert_manager.delete_certificates_from_cluster(cluster,
@ -160,147 +170,21 @@ class Handler(object):
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
raise exception.OperationInProgress(cluster_name=cluster.name)
except Exception:
except Exception as unexp:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
cluster.status = fields.ClusterStatus.DELETE_FAILED
cluster.status_reason = six.text_type(unexp)
cluster.save()
raise
cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS
cluster.save()
self._poll_and_check(osc, cluster, cluster_driver)
return None
def _poll_and_check(self, osc, cluster, cluster_driver):
poller = HeatPoller(osc, cluster, cluster_driver)
# TODO(randall): this is a temporary hack. Next patch will sort the
# status update checking
poller = heat_driver.HeatPoller(osc, cluster, cluster_driver)
lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check)
lc.start(CONF.cluster_heat.wait_interval, True)
class HeatPoller(object):
def __init__(self, openstack_client, cluster, cluster_driver):
self.openstack_client = openstack_client
self.context = self.openstack_client.context
self.cluster = cluster
self.attempts = 0
self.cluster_template = conductor_utils.retrieve_cluster_template(
self.context, cluster)
self.template_def = cluster_driver.get_template_definition()
def poll_and_check(self):
# TODO(yuanying): temporary implementation to update api_address,
# node_addresses and cluster status
stack = self.openstack_client.heat().stacks.get(self.cluster.stack_id)
self.attempts += 1
status_to_event = {
fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
}
# poll_and_check is detached and polling long time to check status,
# so another user/client can call delete cluster/stack.
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
self._delete_complete()
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_SUCCESS)
raise loopingcall.LoopingCallDone()
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE):
self._sync_cluster_and_template_status(stack)
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_SUCCESS)
raise loopingcall.LoopingCallDone()
elif stack.stack_status != self.cluster.status:
self._sync_cluster_status(stack)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
fields.ClusterStatus.DELETE_FAILED,
fields.ClusterStatus.UPDATE_FAILED,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._cluster_failed(stack)
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_FAILURE)
raise loopingcall.LoopingCallDone()
# only check max attempts when the stack is being created when
# the timeout hasn't been set. If the timeout has been set then
# the loop will end when the stack completes or the timeout occurs
if stack.stack_status == fields.ClusterStatus.CREATE_IN_PROGRESS:
if (stack.timeout_mins is None and
self.attempts > CONF.cluster_heat.max_attempts):
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': CONF.cluster_heat.max_attempts,
'id': self.cluster.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
else:
if self.attempts > CONF.cluster_heat.max_attempts:
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': CONF.cluster_heat.max_attempts,
'id': self.cluster.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
def _delete_complete(self):
LOG.info(_LI('Cluster has been deleted, stack_id: %s')
% self.cluster.stack_id)
try:
trust_manager.delete_trustee_and_trust(self.openstack_client,
self.context,
self.cluster)
cert_manager.delete_certificates_from_cluster(self.cluster,
context=self.context)
self.cluster.destroy()
except exception.ClusterNotFound:
LOG.info(_LI('The cluster %s has been deleted by others.')
% self.cluster.uuid)
def _sync_cluster_status(self, stack):
self.cluster.status = stack.stack_status
self.cluster.status_reason = stack.stack_status_reason
stack_nc_param = self.template_def.get_heat_param(
cluster_attr='node_count')
self.cluster.node_count = stack.parameters[stack_nc_param]
self.cluster.save()
def get_version_info(self, stack):
stack_param = self.template_def.get_heat_param(
cluster_attr='coe_version')
if stack_param:
self.cluster.coe_version = stack.parameters[stack_param]
version_module_path = self.template_def.driver_module_path+'.version'
try:
ver = importutils.import_module(version_module_path)
container_version = ver.container_version
except Exception:
container_version = None
self.cluster.container_version = container_version
def _sync_cluster_and_template_status(self, stack):
self.template_def.update_outputs(stack, self.cluster_template,
self.cluster)
self.get_version_info(stack)
self._sync_cluster_status(stack)
def _cluster_failed(self, stack):
LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, '
'stack_id: %(stack_id)s, '
'reason: %(reason)s') %
{'cluster_status': stack.stack_status,
'stack_id': self.cluster.stack_id,
'reason': self.cluster.status_reason})

107
magnum/drivers/common/driver.py

@ -12,47 +12,25 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
import abc
import six
from heatclient.common import template_utils
from oslo_config import cfg
from oslo_log import log as logging
from pkg_resources import iter_entry_points
from stevedore import driver
from magnum.common import exception
from magnum.common import short_id
from magnum.conductor import utils as conductor_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _extract_template_definition(context, cluster, scale_manager=None):
cluster_template = conductor_utils.retrieve_cluster_template(context,
cluster)
cluster_driver = Driver().get_driver(cluster_template.server_type,
cluster_template.cluster_distro,
cluster_template.coe)
definition = cluster_driver.get_template_definition()
return definition.extract_definition(context, cluster_template, cluster,
scale_manager=scale_manager)
def _get_env_files(template_path, env_rel_paths):
template_dir = os.path.dirname(template_path)
env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths]
environment_files = []
env_map, merged_env = (
template_utils.process_multiple_environments_and_files(
env_paths=env_abs_paths, env_list_tracker=environment_files))
return environment_files, env_map
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
definitions = None
provides = list()
@classmethod
def load_entry_points(cls):
@ -96,7 +74,7 @@ class Driver(object):
if not cls.definitions:
cls.definitions = dict()
for entry_point, def_class in cls.load_entry_points():
for cluster_type in def_class.provides:
for cluster_type in def_class().provides:
cluster_type_tuple = (cluster_type['server_type'],
cluster_type['os'],
cluster_type['coe'])
@ -157,55 +135,26 @@ class Driver(object):
return driver.DriverManager("magnum.drivers",
driver_info['entry_point_name']).driver()
def create_stack(self, context, osc, cluster, cluster_create_timeout):
template_path, heat_params, env_files = (
_extract_template_definition(context, cluster))
tpl_files, template = template_utils.get_template_contents(
template_path)
environment_files, env_map = _get_env_files(template_path, env_files)
tpl_files.update(env_map)
# Make sure no duplicate stack name
stack_name = '%s-%s' % (cluster.name, short_id.generate_id())
if cluster_create_timeout:
heat_timeout = cluster_create_timeout
else:
# no cluster_create_timeout value was passed in to the request
# so falling back on configuration file value
heat_timeout = cfg.CONF.cluster_heat.create_timeout
fields = {
'stack_name': stack_name,
'parameters': heat_params,
'environment_files': environment_files,
'template': template,
'files': tpl_files,
'timeout_mins': heat_timeout
}
created_stack = osc.heat().stacks.create(**fields)
return created_stack
def update_stack(self, context, osc, cluster, scale_manager=None,
rollback=False):
template_path, heat_params, env_files = _extract_template_definition(
context, cluster, scale_manager=scale_manager)
tpl_files, template = template_utils.get_template_contents(
template_path)
environment_files, env_map = _get_env_files(template_path, env_files)
tpl_files.update(env_map)
fields = {
'parameters': heat_params,
'environment_files': environment_files,
'template': template,
'files': tpl_files,
'disable_rollback': not rollback
}
return osc.heat().stacks.update(cluster.stack_id, **fields)
def delete_stack(self, context, osc, cluster):
osc.heat().stacks.delete(cluster.stack_id)
@abc.abstractproperty
def provides(self):
'''return a list of (server_type, os, coe) tuples
Returns a list of cluster configurations supported by this driver
'''
raise NotImplementedError("Subclasses must implement 'provides'.")
@abc.abstractmethod
def create_cluster(self, context, cluster, cluster_create_timeout):
raise NotImplementedError("Subclasses must implement "
"'create_cluster'.")
@abc.abstractmethod
def update_cluster(self, context, cluster, scale_manager=None,
rollback=False):
raise NotImplementedError("Subclasses must implement "
"'update_cluster'.")
@abc.abstractmethod
def delete_cluster(self, context, cluster):
raise NotImplementedError("Subclasses must implement "
"'delete_cluster'.")

0
magnum/drivers/heat/__init__.py

273
magnum/drivers/heat/driver.py

@ -0,0 +1,273 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import os
from pycadf import cadftaxonomy as taxonomy
import six
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import importutils
from heatclient.common import template_utils
from magnum.common import clients
from magnum.common import exception
from magnum.common import short_id
from magnum.conductor.handlers.common import cert_manager
from magnum.conductor.handlers.common import trust_manager
from magnum.conductor import utils as conductor_utils
from magnum.drivers.common import driver
from magnum.i18n import _LE
from magnum.i18n import _LI
from magnum.objects import fields
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class HeatDriver(driver.Driver):
'''Base Driver class for using Heat
Abstract class for implementing Drivers that leverage OpenStack Heat for
orchestrating cluster lifecycle operations
'''
def _extract_template_definition(self, context, cluster,
scale_manager=None):
cluster_template = conductor_utils.retrieve_cluster_template(context,
cluster)
definition = self.get_template_definition()
return definition.extract_definition(context, cluster_template,
cluster,
scale_manager=scale_manager)
def _get_env_files(self, template_path, env_rel_paths):
template_dir = os.path.dirname(template_path)
env_abs_paths = [os.path.join(template_dir, f) for f in env_rel_paths]
environment_files = []
env_map, merged_env = (
template_utils.process_multiple_environments_and_files(
env_paths=env_abs_paths, env_list_tracker=environment_files))
return environment_files, env_map
@abc.abstractmethod
def get_template_definition(self):
'''return an implementation of
magnum.drivers.common.drivers.heat.TemplateDefinition
'''
raise NotImplementedError("Must implement 'get_template_definition'")
def create_cluster(self, context, cluster, cluster_create_timeout):
stack = self._create_stack(context, clients.OpenStackClients(context),
cluster, cluster_create_timeout)
# TODO(randall): keeping this for now to reduce/eliminate data
# migration. Should probably come up with something more generic in
# the future once actual non-heat-based drivers are implemented.
cluster.stack_id = stack['stack']['id']
def update_cluster(self, context, cluster, scale_manager=None,
rollback=False):
self._update_stack(context, clients.OpenStackClients(context), cluster,
scale_manager, rollback)
def delete_cluster(self, context, cluster):
self._delete_stack(context, clients.OpenStackClients(context), cluster)
def _create_stack(self, context, osc, cluster, cluster_create_timeout):
template_path, heat_params, env_files = (
self._extract_template_definition(context, cluster))
tpl_files, template = template_utils.get_template_contents(
template_path)
environment_files, env_map = self._get_env_files(template_path,
env_files)
tpl_files.update(env_map)
# Make sure no duplicate stack name
stack_name = '%s-%s' % (cluster.name, short_id.generate_id())
if cluster_create_timeout:
heat_timeout = cluster_create_timeout
else:
# no cluster_create_timeout value was passed in to the request
# so falling back on configuration file value
heat_timeout = cfg.CONF.cluster_heat.create_timeout
fields = {
'stack_name': stack_name,
'parameters': heat_params,
'environment_files': environment_files,
'template': template,
'files': tpl_files,
'timeout_mins': heat_timeout
}
created_stack = osc.heat().stacks.create(**fields)
return created_stack
def _update_stack(self, context, osc, cluster, scale_manager=None,
rollback=False):
template_path, heat_params, env_files = (
self._extract_template_definition(context, cluster,
scale_manager=scale_manager))
tpl_files, template = template_utils.get_template_contents(
template_path)
environment_files, env_map = self._get_env_files(template_path,
env_files)
tpl_files.update(env_map)
fields = {
'parameters': heat_params,
'environment_files': environment_files,
'template': template,
'files': tpl_files,
'disable_rollback': not rollback
}
osc.heat().stacks.update(cluster.stack_id, **fields)
def _delete_stack(self, context, osc, cluster):
osc.heat().stacks.delete(cluster.stack_id)
class HeatPoller(object):
def __init__(self, openstack_client, cluster, cluster_driver):
self.openstack_client = openstack_client
self.context = self.openstack_client.context
self.cluster = cluster
self.attempts = 0
self.cluster_template = conductor_utils.retrieve_cluster_template(
self.context, cluster)
self.template_def = cluster_driver.get_template_definition()
def poll_and_check(self):
# TODO(yuanying): temporary implementation to update api_address,
# node_addresses and cluster status
stack = self.openstack_client.heat().stacks.get(self.cluster.stack_id)
self.attempts += 1
status_to_event = {
fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
}
# poll_and_check is detached and polling long time to check status,
# so another user/client can call delete cluster/stack.
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
self._delete_complete()
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_SUCCESS)
raise loopingcall.LoopingCallDone()
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE):
self._sync_cluster_and_template_status(stack)
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_SUCCESS)
raise loopingcall.LoopingCallDone()
elif stack.stack_status != self.cluster.status:
self._sync_cluster_status(stack)
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
fields.ClusterStatus.DELETE_FAILED,
fields.ClusterStatus.UPDATE_FAILED,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.ROLLBACK_FAILED):
self._sync_cluster_and_template_status(stack)
self._cluster_failed(stack)
conductor_utils.notify_about_cluster_operation(
self.context, status_to_event[stack.stack_status],
taxonomy.OUTCOME_FAILURE)
raise loopingcall.LoopingCallDone()
# only check max attempts when the stack is being created when
# the timeout hasn't been set. If the timeout has been set then
# the loop will end when the stack completes or the timeout occurs
if stack.stack_status == fields.ClusterStatus.CREATE_IN_PROGRESS:
if (stack.timeout_mins is None and
self.attempts > cfg.CONF.cluster_heat.max_attempts):
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': cfg.CONF.cluster_heat.max_attempts,
'id': self.cluster.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
else:
if self.attempts > cfg.CONF.cluster_heat.max_attempts:
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': cfg.CONF.cluster_heat.max_attempts,
'id': self.cluster.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
def _delete_complete(self):
LOG.info(_LI('Cluster has been deleted, stack_id: %s')
% self.cluster.stack_id)
try:
trust_manager.delete_trustee_and_trust(self.openstack_client,
self.context,
self.cluster)
cert_manager.delete_certificates_from_cluster(self.cluster,
context=self.context)
self.cluster.destroy()
except exception.ClusterNotFound:
LOG.info(_LI('The cluster %s has been deleted by others.')
% self.cluster.uuid)
def _sync_cluster_status(self, stack):
self.cluster.status = stack.stack_status
self.cluster.status_reason = stack.stack_status_reason
stack_nc_param = self.template_def.get_heat_param(
cluster_attr='node_count')
self.cluster.node_count = stack.parameters[stack_nc_param]
self.cluster.save()
def get_version_info(self, stack):
stack_param = self.template_def.get_heat_param(
cluster_attr='coe_version')
if stack_param:
self.cluster.coe_version = stack.parameters[stack_param]
version_module_path = self.template_def.driver_module_path+'.version'
try:
ver = importutils.import_module(version_module_path)
container_version = ver.container_version
except Exception:
container_version = None
self.cluster.container_version = container_version
def _sync_cluster_and_template_status(self, stack):
self.template_def.update_outputs(stack, self.cluster_template,
self.cluster)
self.get_version_info(stack)
self._sync_cluster_status(stack)
def _cluster_failed(self, stack):
LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, '
'stack_id: %(stack_id)s, '
'reason: %(reason)s') %
{'cluster_status': stack.stack_status,
'stack_id': self.cluster.stack_id,
'reason': self.cluster.status_reason})

4
magnum/drivers/common/k8s_fedora_template_def.py → magnum/drivers/heat/k8s_fedora_template_def.py

@ -12,8 +12,8 @@
from oslo_log import log as logging
from magnum.drivers.common import k8s_template_def
from magnum.drivers.common import template_def
from magnum.drivers.heat import k8s_template_def
from magnum.drivers.heat import template_def
from oslo_config import cfg
CONF = cfg.CONF

2
magnum/drivers/common/k8s_template_def.py → magnum/drivers/heat/k8s_template_def.py

@ -12,7 +12,7 @@
from oslo_config import cfg
from magnum.drivers.common import template_def
from magnum.drivers.heat import template_def
CONF = cfg.CONF

2
magnum/drivers/common/swarm_fedora_template_def.py → magnum/drivers/heat/swarm_fedora_template_def.py

@ -11,7 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import template_def
from magnum.drivers.heat import template_def
from oslo_config import cfg
CONF = cfg.CONF

0
magnum/drivers/common/template_def.py → magnum/drivers/heat/template_def.py

17
magnum/drivers/k8s_coreos_v1/driver.py

@ -12,16 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import driver
from magnum.drivers.heat import driver
from magnum.drivers.k8s_coreos_v1 import template_def
class Driver(driver.Driver):
provides = [
{'server_type': 'vm',
'os': 'coreos',
'coe': 'kubernetes'},
]
class Driver(driver.HeatDriver):
@property
def provides(self):
return [
{'server_type': 'vm',
'os': 'coreos',
'coe': 'kubernetes'},
]
def get_template_definition(self):
return template_def.CoreOSK8sTemplateDefinition()

4
magnum/drivers/k8s_coreos_v1/template_def.py

@ -14,8 +14,8 @@
import os
import magnum.conf
from magnum.drivers.common import k8s_template_def
from magnum.drivers.common import template_def
from magnum.drivers.heat import k8s_template_def
from magnum.drivers.heat import template_def
CONF = magnum.conf.CONF

17
magnum/drivers/k8s_fedora_atomic_v1/driver.py

@ -12,16 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import driver
from magnum.drivers.heat import driver
from magnum.drivers.k8s_fedora_atomic_v1 import template_def
class Driver(driver.Driver):
provides = [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'kubernetes'},
]
class Driver(driver.HeatDriver):
@property
def provides(self):
return [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'kubernetes'},
]
def get_template_definition(self):
return template_def.AtomicK8sTemplateDefinition()

2
magnum/drivers/k8s_fedora_atomic_v1/template_def.py

@ -15,7 +15,7 @@
import os
import magnum.conf
from magnum.drivers.common import k8s_fedora_template_def as kftd
from magnum.drivers.heat import k8s_fedora_template_def as kftd
CONF = magnum.conf.CONF

17
magnum/drivers/k8s_fedora_ironic_v1/driver.py

@ -12,16 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import driver
from magnum.drivers.heat import driver
from magnum.drivers.k8s_fedora_ironic_v1 import template_def
class Driver(driver.Driver):
provides = [
{'server_type': 'bm',
'os': 'fedora',
'coe': 'kubernetes'},
]
class Driver(driver.HeatDriver):
@property
def provides(self):
return [
{'server_type': 'bm',
'os': 'fedora',
'coe': 'kubernetes'},
]
def get_template_definition(self):
return template_def.FedoraK8sIronicTemplateDefinition()

2
magnum/drivers/k8s_fedora_ironic_v1/template_def.py

@ -16,7 +16,7 @@ import os
from oslo_log import log as logging
from magnum.common import exception
from magnum.drivers.common import k8s_fedora_template_def as kftd
from magnum.drivers.heat import k8s_fedora_template_def as kftd
from oslo_config import cfg
CONF = cfg.CONF

17
magnum/drivers/mesos_ubuntu_v1/driver.py

@ -12,16 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import driver
from magnum.drivers.heat import driver
from magnum.drivers.mesos_ubuntu_v1 import template_def
class Driver(driver.Driver):
provides = [
{'server_type': 'vm',
'os': 'ubuntu',
'coe': 'mesos'},
]
class Driver(driver.HeatDriver):
@property
def provides(self):
return [
{'server_type': 'vm',
'os': 'ubuntu',
'coe': 'mesos'},
]
def get_template_definition(self):
return template_def.UbuntuMesosTemplateDefinition()

2
magnum/drivers/mesos_ubuntu_v1/template_def.py

@ -13,7 +13,7 @@
# under the License.
import os
from magnum.drivers.common import template_def
from magnum.drivers.heat import template_def
class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition):

17
magnum/drivers/swarm_fedora_atomic_v1/driver.py

@ -12,16 +12,19 @@
# License for the specific language governing permissions and limitations
# under the License.
from magnum.drivers.common import driver
from magnum.drivers.heat import driver
from magnum.drivers.swarm_fedora_atomic_v1 import template_def
class Driver(driver.Driver):
provides = [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'swarm'},
]
class Driver(driver.HeatDriver):
@property
def provides(self):
return [
{'server_type': 'vm',
'os': 'fedora-atomic',
'coe': 'swarm'},
]
def get_template_definition(self):
return template_def.AtomicSwarmTemplateDefinition()

2
magnum/drivers/swarm_fedora_atomic_v1/template_def.py

@ -13,7 +13,7 @@
# under the License.
import os
from magnum.drivers.common import swarm_fedora_template_def as sftd
from magnum.drivers.heat import swarm_fedora_template_def as sftd
class AtomicSwarmTemplateDefinition(sftd.SwarmFedoraTemplateDefinition):

320
magnum/tests/unit/conductor/handlers/test_cluster_conductor.py

@ -28,7 +28,6 @@ import magnum.conf
from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr
from magnum import objects
from magnum.objects.fields import ClusterStatus as cluster_status
from magnum.tests import base
from magnum.tests import fake_notifier
from magnum.tests.unit.db import base as db_base
from magnum.tests.unit.db import utils
@ -72,6 +71,7 @@ class TestHandler(db_base.DbTestCase):
mock_driver.return_value = mock_dr
self.cluster.node_count = 2
self.cluster.status = cluster_status.CREATE_COMPLETE
self.handler.cluster_update(self.context, self.cluster)
notifications = fake_notifier.NOTIFICATIONS
@ -81,9 +81,9 @@ class TestHandler(db_base.DbTestCase):
self.assertEqual(
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
mock_dr.update_stack.assert_called_once_with(
self.context, mock_openstack_client, self.cluster,
mock_scale_manager.return_value, False)
mock_dr.update_cluster.assert_called_once_with(
self.context, self.cluster, mock_scale_manager.return_value,
False)
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
self.assertEqual(2, cluster.node_count)
@ -105,6 +105,7 @@ class TestHandler(db_base.DbTestCase):
mock_openstack_client.heat.return_value = mock_heat_client
self.cluster.node_count = 2
self.cluster.status = cluster_status.CREATE_FAILED
self.assertRaises(exception.NotSupported, self.handler.cluster_update,
self.context, self.cluster)
@ -141,6 +142,7 @@ class TestHandler(db_base.DbTestCase):
mock_driver.return_value = mock_dr
self.cluster.node_count = 2
self.cluster.status = cluster_status.CREATE_COMPLETE
self.handler.cluster_update(self.context, self.cluster)
notifications = fake_notifier.NOTIFICATIONS
@ -150,9 +152,8 @@ class TestHandler(db_base.DbTestCase):
self.assertEqual(
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
mock_dr.update_stack.assert_called_once_with(
self.context, mock_openstack_client, self.cluster,
mock_scale_manager.return_value, False)
mock_dr.update_cluster.assert_called_once_with(
self.context, self.cluster, mock_scale_manager.return_value, False)
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
self.assertEqual(2, cluster.node_count)
@ -184,7 +185,7 @@ class TestHandler(db_base.DbTestCase):
self._test_update_cluster_status_complete(
cluster_status.ADOPT_COMPLETE)
@patch('magnum.conductor.handlers.cluster_conductor.HeatPoller')
@patch('magnum.drivers.heat.driver.HeatPoller')
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
@patch('magnum.drivers.common.driver.Driver.get_driver')
@ -227,9 +228,8 @@ class TestHandler(db_base.DbTestCase):
self.assertEqual(
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
mock_dr.create_stack.assert_called_once_with(self.context,
mock.sentinel.osc,
self.cluster, timeout)
mock_dr.create_cluster.assert_called_once_with(self.context,
self.cluster, timeout)
mock_cm.generate_certificates_to_cluster.assert_called_once_with(
self.cluster, context=self.context)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
@ -279,7 +279,7 @@ class TestHandler(db_base.DbTestCase):
mock_cluster_create):
mock_dr = mock.MagicMock()
mock_driver.return_value = mock_dr
mock_dr.create_stack.side_effect = exc.HTTPBadRequest
mock_dr.create_cluster.side_effect = exc.HTTPBadRequest
self._test_create_failed(
mock_openstack_client_class,
@ -369,7 +369,7 @@ class TestHandler(db_base.DbTestCase):
characters, must start with alpha""")
mock_dr = mock.MagicMock()
mock_driver.return_value = mock_dr
mock_dr.create_stack.side_effect = exc.HTTPBadRequest(error_message)
mock_dr.create_cluster.side_effect = exc.HTTPBadRequest(error_message)
self._test_create_failed(
mock_openstack_client_class,
@ -390,13 +390,14 @@ class TestHandler(db_base.DbTestCase):
self.assertEqual(
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
@patch('magnum.conductor.handlers.cluster_conductor.HeatPoller')
@patch('magnum.drivers.heat.driver.HeatPoller')
@patch('heatclient.common.template_utils'
'.process_multiple_environments_and_files')
@patch('heatclient.common.template_utils.get_template_contents')
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
@patch('magnum.drivers.common.driver._extract_template_definition')
@patch('magnum.drivers.k8s_fedora_atomic_v1.driver.Driver.'
'_extract_template_definition')
@patch('magnum.drivers.common.driver.Driver.get_driver')
@patch('magnum.common.clients.OpenStackClients')
@patch('magnum.common.short_id.generate_id')
@ -524,292 +525,3 @@ class TestHandler(db_base.DbTestCase):
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
self.assertEqual(
0, cert_manager.delete_certificates_from_cluster.call_count)
class TestHeatPoller(base.TestCase):
@patch('magnum.conductor.utils.retrieve_cluster_template')
@patch('oslo_config.cfg')
@patch('magnum.common.clients.OpenStackClients')
@patch('magnum.drivers.common.driver.Driver.get_driver')
def setup_poll_test(self, mock_driver, mock_openstack_client, cfg,
mock_retrieve_cluster_template):
cfg.CONF.cluster_heat.max_attempts = 10
cluster = mock.MagicMock()
cluster_template_dict = utils.get_test_cluster_template(
coe='kubernetes')
mock_heat_stack = mock.MagicMock()
mock_heat_client = mock.MagicMock()
mock_heat_client.stacks.get.return_value = mock_heat_stack
mock_openstack_client.heat.return_value = mock_heat_client
cluster_template = objects.ClusterTemplate(self.context,
**cluster_template_dict)
mock_retrieve_cluster_template.return_value = cluster_template
mock_driver.return_value = k8s_atomic_dr.Driver()
poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster,
k8s_atomic_dr.Driver())
poller.get_version_info = mock.MagicMock()
return (mock_heat_stack, cluster, poller)
def test_poll_and_check_send_notification(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.DELETE_FAILED
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(6, poller.attempts)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(6, len(notifications))
self.assertEqual(
'magnum.cluster.create', notifications[0].event_type)
self.assertEqual(
taxonomy.OUTCOME_SUCCESS, notifications[0].payload['outcome'])
self.assertEqual(
'magnum.cluster.create', notifications[1].event_type)
self.assertEqual(
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
self.assertEqual(
'magnum.cluster.delete', notifications[2].event_type)
self.assertEqual(
taxonomy.OUTCOME_SUCCESS, notifications[2].payload['outcome'])
self.assertEqual(
'magnum.cluster.delete', notifications[3].event_type)
self.assertEqual(
taxonomy.OUTCOME_FAILURE, notifications[3].payload['outcome'])
self.assertEqual(
'magnum.cluster.update', notifications[4].event_type)
self.assertEqual(
taxonomy.OUTCOME_SUCCESS, notifications[4].payload['outcome'])
self.assertEqual(
'magnum.cluster.update', notifications[5].event_type)
self.assertEqual(
taxonomy.OUTCOME_FAILURE, notifications[5].payload['outcome'])
def test_poll_no_save(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
cluster.status = cluster_status.CREATE_IN_PROGRESS
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
poller.poll_and_check()
self.assertEqual(0, cluster.save.call_count)
self.assertEqual(1, poller.attempts)
def test_poll_save(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
cluster.status = cluster_status.CREATE_IN_PROGRESS
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
mock_heat_stack.stack_status_reason = 'Create failed'
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, cluster.save.call_count)
self.assertEqual(cluster_status.CREATE_FAILED, cluster.status)
self.assertEqual('Create failed', cluster.status_reason)
self.assertEqual(1, poller.attempts)
def test_poll_done(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, poller.attempts)
def test_poll_done_by_update(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
mock_heat_stack.parameters = {'number_of_minions': 2}
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(1, cluster.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(2, cluster.node_count)
self.assertEqual(1, poller.attempts)
def test_poll_done_by_update_failed(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED
mock_heat_stack.parameters = {'number_of_minions': 2}
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, cluster.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(2, cluster.node_count)
self.assertEqual(1, poller.attempts)
def test_poll_done_by_rollback_complete(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE
mock_heat_stack.parameters = {'number_of_minions': 1}
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, cluster.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status)
self.assertEqual(1, cluster.node_count)
self.assertEqual(1, poller.attempts)
def test_poll_done_by_rollback_failed(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED
mock_heat_stack.parameters = {'number_of_minions': 1}
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, cluster.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status)
self.assertEqual(1, cluster.node_count)
self.assertEqual(1, poller.attempts)
def test_poll_destroy(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.DELETE_FAILED
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
# Destroy method is not called when stack delete failed
self.assertEqual(0, cluster.destroy.call_count)
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
poller.poll_and_check()
self.assertEqual(0, cluster.destroy.call_count)
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
# The cluster status should still be DELETE_IN_PROGRESS, because
# the destroy() method may be failed. If success, this cluster record
# will delete directly, change status is meaningless.
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.destroy.call_count)
def test_poll_delete_in_progress_timeout_set(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
mock_heat_stack.timeout_mins = 60
# timeout only affects stack creation so expecting this
# to process normally
poller.poll_and_check()
def test_poll_delete_in_progress_max_attempts_reached(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
poller.attempts = CONF.cluster_heat.max_attempts
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
def test_poll_create_in_prog_max_att_reached_no_timeout(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
poller.attempts = CONF.cluster_heat.max_attempts
mock_heat_stack.timeout_mins = None
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
def test_poll_create_in_prog_max_att_reached_timeout_set(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
poller.attempts = CONF.cluster_heat.max_attempts
mock_heat_stack.timeout_mins = 60
# since the timeout is set the max attempts gets ignored since
# the timeout will eventually stop the poller either when
# the stack gets created or the timeout gets reached
poller.poll_and_check()
def test_poll_create_in_prog_max_att_reached_timed_out(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
poller.attempts = CONF.cluster_heat.max_attempts
mock_heat_stack.timeout_mins = 60
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
def test_poll_create_in_prog_max_att_not_reached_no_timeout(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
mock_heat_stack.timeout.mins = None
poller.poll_and_check()
def test_poll_create_in_prog_max_att_not_reached_timeout_set(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
mock_heat_stack.timeout_mins = 60
poller.poll_and_check()
def test_poll_create_in_prog_max_att_not_reached_timed_out(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
mock_heat_stack.timeout_mins = 60
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
def test_poll_node_count(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.parameters = {'number_of_minions': 1}
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
poller.poll_and_check()
self.assertEqual(1, cluster.node_count)
def test_poll_node_count_by_update(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.parameters = {'number_of_minions': 2}
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
self.assertEqual(2, cluster.node_count)
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
def test_delete_complete(self, cert_manager, trust_manager):
mock_heat_stack, cluster, poller = self.setup_poll_test()
poller._delete_complete()
self.assertEqual(1, cluster.destroy.call_count)
self.assertEqual(
1, cert_manager.delete_certificates_from_cluster.call_count)
self.assertEqual(1,
trust_manager.delete_trustee_and_trust.call_count)
def test_create_or_complete(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE
mock_heat_stack.stack_status_reason = 'stack complete'
poller._sync_cluster_and_template_status(mock_heat_stack)
self.assertEqual('stack complete', cluster.status_reason)
self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_sync_cluster_status(self):
mock_heat_stack, cluster, poller = self.setup_poll_test()
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
mock_heat_stack.stack_status_reason = 'stack incomplete'
poller._sync_cluster_status(mock_heat_stack)
self.assertEqual('stack incomplete', cluster.status_reason)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
@patch('magnum.conductor.handlers.cluster_conductor.LOG')
def test_cluster_failed(self, logger):
mock_heat_stack, cluster, poller = self.setup_poll_test()
poller._sync_cluster_and_template_status(mock_heat_stack)
poller._cluster_failed(mock_heat_stack)
self.assertEqual(1, logger.error.call_count)

70
magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py

@ -16,7 +16,6 @@ import mock
from mock import patch
import magnum.conf
from magnum.drivers.common import driver
from magnum.drivers.k8s_coreos_v1 import driver as k8s_coreos_dr
from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_dr
from magnum import objects
@ -124,8 +123,8 @@ class TestClusterConductorWithK8s(base.TestCase):
(template_path,
definition,
env_files) = driver._extract_template_definition(self.context,
cluster)
env_files) = mock_driver()._extract_template_definition(self.context,
cluster)
mapping = {
'dns_nameserver': 'dns_nameserver',
@ -224,8 +223,8 @@ class TestClusterConductorWithK8s(base.TestCase):
(template_path,
definition,
env_files) = driver._extract_template_definition(self.context,
cluster)
env_files) = mock_driver()._extract_template_definition(self.context,
cluster)
expected = {
'auth_url': 'http://192.168.10.10:5000/v3',
@ -305,8 +304,8 @@ class TestClusterConductorWithK8s(base.TestCase):
(template_path,
definition,
env_files) = driver._extract_template_definition(self.context,
cluster)
env_files) = mock_driver()._extract_template_definition(self.context,
cluster)
expected = {
'auth_url': 'http://192.168.10.10:5000/v3',
@ -363,8 +362,8 @@ class TestClusterConductorWithK8s(base.TestCase):
(template_path,
definition,
env_files) = driver._extract_template_definition(self.context,
cluster)
env_files) = mock_driver()._extract_template_definition(self.context,
cluster)
expected = {
'ssh_key_name': 'keypair_id',
@ -423,8 +422,8 @@ class TestClusterConductorWithK8s(base.TestCase):
(template_path,
definition,
env_files) = driver._extract_template_definition(self.context,
cluster)