ng-2: Adapt existing cluster APIs and conductor

This changes the existing cluster APIs and the cluster conductor to
take into consideration nodegroups:

* create: now creates the default nodegroups for the cluster
* update: updates the default nodegroups of the cluster
* delete: deletes also the nodegroups that belong to the cluster
* cluster_resize: takes into account the nodegroup provided by the API

story: 2005266

Change-Id: I5478c83ca316f8f09625607d5ae9d9f3c02eb65a
This commit is contained in:
Theodoros Tsioutsias 2019-03-04 09:23:40 +00:00
parent 70f1dbd9c7
commit 18c77a288d
22 changed files with 562 additions and 152 deletions

View File

@ -410,8 +410,9 @@ class BaysController(base.Controller):
:param bay: a bay within the request body.
"""
new_bay = self._post(bay)
new_bay, node_count, master_count = self._post(bay)
res_bay = pecan.request.rpcapi.cluster_create(new_bay,
master_count, node_count,
bay.bay_create_timeout)
# Set the HTTP Location Header
@ -425,8 +426,9 @@ class BaysController(base.Controller):
:param bay: a bay within the request body.
"""
new_bay = self._post(bay)
new_bay, node_count, master_count = self._post(bay)
pecan.request.rpcapi.cluster_create_async(new_bay,
master_count, node_count,
bay.bay_create_timeout)
return BayID(new_bay.uuid)
@ -464,12 +466,15 @@ class BaysController(base.Controller):
# NOTE(yuywz): We will generate a random human-readable name for
# bay if the name is not specified by user.
name = bay_dict.get('name') or self._generate_name_for_bay(context)
node_count = bay_dict.pop('node_count')
master_count = bay_dict.pop('master_count')
bay_dict['name'] = name
bay_dict['coe_version'] = None
bay_dict['container_version'] = None
new_bay = objects.Cluster(context, **bay_dict)
new_bay.uuid = uuid.uuid4()
return new_bay
return new_bay, node_count, master_count
@base.Controller.api_version("1.1", "1.1")
@wsme.validate(types.uuid, [BayPatchType])
@ -480,8 +485,8 @@ class BaysController(base.Controller):
:param bay_ident: UUID or logical name of a bay.
:param patch: a json PATCH document to apply to this bay.
"""
bay = self._patch(bay_ident, patch)
res_bay = pecan.request.rpcapi.cluster_update(bay)
bay, node_count = self._patch(bay_ident, patch)
res_bay = pecan.request.rpcapi.cluster_update(bay, node_count)
return Bay.convert_with_links(res_bay)
@base.Controller.api_version("1.2", "1.2") # noqa
@ -494,8 +499,8 @@ class BaysController(base.Controller):
:param bay_ident: UUID or logical name of a bay.
:param patch: a json PATCH document to apply to this bay.
"""
bay = self._patch(bay_ident, patch)
pecan.request.rpcapi.cluster_update_async(bay)
bay, node_count = self._patch(bay_ident, patch)
pecan.request.rpcapi.cluster_update_async(bay, node_count)
return BayID(bay.uuid)
@base.Controller.api_version("1.3") # noqa
@ -509,8 +514,9 @@ class BaysController(base.Controller):
:param rollback: whether to rollback bay on update failure.
:param patch: a json PATCH document to apply to this bay.
"""
bay = self._patch(bay_ident, patch)
pecan.request.rpcapi.cluster_update_async(bay, rollback=rollback)
bay, node_count = self._patch(bay_ident, patch)
pecan.request.rpcapi.cluster_update_async(bay, node_count,
rollback=rollback)
return BayID(bay.uuid)
def _patch(self, bay_ident, patch):
@ -518,28 +524,33 @@ class BaysController(base.Controller):
bay = api_utils.get_resource('Cluster', bay_ident)
policy.enforce(context, 'bay:update', bay.as_dict(),
action='bay:update')
bay_to_cluster_attrs = {
'baymodel_id': 'cluster_template_id',
'bay_create_timeout': 'create_timeout'
}
try:
bay_dict = bay.as_dict()
new_bay = Bay(**api_utils.apply_jsonpatch(bay_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Cluster.fields:
try:
patch_val = getattr(new_bay, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
# NOTE(ttsiouts): magnum.objects.Cluster.node_count will be a
# property so we won't be able to store it in the object. So
# instead of object_what_changed compare the new and the old
# clusters.
delta = set()
for field in new_bay.fields:
cluster_field = field
if cluster_field in bay_to_cluster_attrs:
cluster_field = bay_to_cluster_attrs[field]
if cluster_field not in bay_dict:
continue
if patch_val == wtypes.Unset:
patch_val = None
if bay[field] != patch_val:
bay[field] = patch_val
delta = bay.obj_what_changed()
if getattr(new_bay, field) != bay_dict[cluster_field]:
delta.add(cluster_field)
validate_cluster_properties(delta)
return bay
return bay, new_bay.node_count
@base.Controller.api_version("1.1", "1.1")
@expose.expose(None, types.uuid_or_name, status_code=204)

View File

@ -499,9 +499,12 @@ class ClustersController(base.Controller):
cluster_dict['coe_version'] = None
cluster_dict['container_version'] = None
node_count = cluster_dict.pop('node_count')
master_count = cluster_dict.pop('master_count')
new_cluster = objects.Cluster(context, **cluster_dict)
new_cluster.uuid = uuid.uuid4()
pecan.request.rpcapi.cluster_create_async(new_cluster,
master_count, node_count,
cluster.create_timeout)
return ClusterID(new_cluster.uuid)
@ -516,8 +519,8 @@ class ClustersController(base.Controller):
:param cluster_ident: UUID or logical name of a cluster.
:param patch: a json PATCH document to apply to this cluster.
"""
cluster = self._patch(cluster_ident, patch)
pecan.request.rpcapi.cluster_update_async(cluster)
cluster, node_count = self._patch(cluster_ident, patch)
pecan.request.rpcapi.cluster_update_async(cluster, node_count)
return ClusterID(cluster.uuid)
@base.Controller.api_version("1.3") # noqa
@ -531,8 +534,9 @@ class ClustersController(base.Controller):
:param rollback: whether to rollback cluster on update failure.
:param patch: a json PATCH document to apply to this cluster.
"""
cluster = self._patch(cluster_ident, patch)
pecan.request.rpcapi.cluster_update_async(cluster, rollback)
cluster, node_count = self._patch(cluster_ident, patch)
pecan.request.rpcapi.cluster_update_async(cluster, node_count,
rollback)
return ClusterID(cluster.uuid)
def _patch(self, cluster_ident, patch):
@ -547,22 +551,17 @@ class ClustersController(base.Controller):
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Cluster.fields:
try:
patch_val = getattr(new_cluster, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if cluster[field] != patch_val:
cluster[field] = patch_val
delta = cluster.obj_what_changed()
# NOTE(ttsiouts): magnum.objects.Cluster.node_count will be a
# property so we won't be able to store it in the object. So
# instead of object_what_changed compare the new and the old
# clusters.
delta = set()
for field in new_cluster.fields:
if getattr(cluster, field) != getattr(new_cluster, field):
delta.add(field)
validation.validate_cluster_properties(delta)
return cluster
return cluster, new_cluster.node_count
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, cluster_ident):

View File

@ -18,7 +18,9 @@ from magnum.api.controllers import base
from magnum.api.controllers.v1 import types
from magnum.api import expose
from magnum.api import utils as api_utils
from magnum.common import exception
from magnum.common import policy
from magnum import objects
class ClusterID(wtypes.Base):
@ -42,10 +44,10 @@ class ClusterResizeRequest(base.APIBase):
This class enforces type checking and value constraints.
"""
node_count = wtypes.IntegerType(minimum=1)
node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), mandatory=True)
"""The expected node count after resize."""
nodes_to_remove = wsme.wsattr([wsme.types.text], mandatory=False,
nodes_to_remove = wsme.wsattr([wtypes.text], mandatory=False,
default=[])
"""Instance ID list for nodes to be removed."""
@ -77,14 +79,32 @@ class ActionsController(base.Controller):
if (cluster_resize_req.nodegroup == wtypes.Unset or
not cluster_resize_req.nodegroup):
# TODO(flwang): The default node group of current cluster could be
# extracted by objects.NodeGroups.get_by_uuid or something like
# that as long as we have node group support.
cluster_resize_req.nodegroup = None
# NOTE(ttsiouts): If the nodegroup is not specified
# reflect the change to the default worker nodegroup
nodegroup = cluster.default_ng_worker
else:
nodegroup = objects.NodeGroup.get(
context, cluster.uuid, cluster_resize_req.nodegroup)
if nodegroup.role == 'master':
# NOTE(ttsiouts): Restrict the resize to worker nodegroups
raise exception.MasterNGResizeNotSupported()
# NOTE(ttsiouts): Make sure that the new node count is within
# the configured boundaries of the selected nodegroup.
if nodegroup.min_node_count > cluster_resize_req.node_count:
raise exception.NGResizeOutBounds(
nodegroup=nodegroup.name, min_nc=nodegroup.min_node_count,
max_nc=nodegroup.max_node_count)
if (nodegroup.max_node_count and
nodegroup.max_node_count < cluster_resize_req.node_count):
raise exception.NGResizeOutBounds(
nodegroup=nodegroup.name, min_nc=nodegroup.min_node_count,
max_nc=nodegroup.max_node_count)
pecan.request.rpcapi.cluster_resize_async(
cluster,
cluster_resize_req.node_count,
cluster_resize_req.nodes_to_remove,
cluster_resize_req.nodegroup)
nodegroup)
return ClusterID(cluster.uuid)

View File

@ -406,3 +406,13 @@ class NodeGroupAlreadyExists(Conflict):
class NodeGroupNotFound(ResourceNotFound):
message = _("Nodegroup %(nodegroup)s could not be found.")
class MasterNGResizeNotSupported(NotSupported):
message = _("Resizing a master nodegroup is not supported.")
class NGResizeOutBounds(Invalid):
message = _("Resizing %(nodegroup)s outside the allowed range: "
"min_node_count = %(min_node_count), "
"max_node_count = %(max_node_count)")

View File

@ -31,12 +31,16 @@ class API(rpc_service.API):
# Cluster Operations
def cluster_create(self, cluster, create_timeout):
def cluster_create(self, cluster, master_count, node_count,
create_timeout):
return self._call('cluster_create', cluster=cluster,
master_count=master_count, node_count=node_count,
create_timeout=create_timeout)
def cluster_create_async(self, cluster, create_timeout):
def cluster_create_async(self, cluster, master_count, node_count,
create_timeout):
self._cast('cluster_create', cluster=cluster,
master_count=master_count, node_count=node_count,
create_timeout=create_timeout)
def cluster_delete(self, uuid):
@ -45,14 +49,16 @@ class API(rpc_service.API):
def cluster_delete_async(self, uuid):
self._cast('cluster_delete', uuid=uuid)
def cluster_update(self, cluster):
return self._call('cluster_update', cluster=cluster)
def cluster_update(self, cluster, node_count):
return self._call(
'cluster_update', cluster=cluster, node_count=node_count)
def cluster_update_async(self, cluster, rollback=False):
self._cast('cluster_update', cluster=cluster, rollback=rollback)
def cluster_update_async(self, cluster, node_count, rollback=False):
self._cast('cluster_update', cluster=cluster,
node_count=node_count, rollback=rollback)
def cluster_resize(self, cluster, node_count, nodes_to_remove,
nodegroup=None, rollback=False):
nodegroup, rollback=False):
return self._call('cluster_resize',
cluster=cluster,
@ -61,7 +67,7 @@ class API(rpc_service.API):
nodegroup=nodegroup)
def cluster_resize_async(self, cluster, node_count, nodes_to_remove,
nodegroup=None, rollback=False):
nodegroup, rollback=False):
return self._cast('cluster_resize',
cluster=cluster,
node_count=node_count,

View File

@ -43,15 +43,27 @@ class Handler(object):
# Cluster Operations
def cluster_create(self, context, cluster, create_timeout):
def cluster_create(self, context, cluster, master_count, node_count,
create_timeout):
LOG.debug('cluster_heat cluster_create')
osc = clients.OpenStackClients(context)
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
cluster.status_reason = None
cluster.node_count = node_count
cluster.master_count = master_count
cluster.create()
# Master nodegroup
master_ng = conductor_utils._get_nodegroup_object(
context, cluster, master_count, is_master=True)
master_ng.create()
# Minion nodegroup
minion_ng = conductor_utils._get_nodegroup_object(
context, cluster, node_count, is_master=False)
minion_ng.create()
try:
# Create trustee/trust and set them to cluster
trust_manager.create_trustee_and_trust(osc, cluster)
@ -82,7 +94,7 @@ class Handler(object):
return cluster
def cluster_update(self, context, cluster, rollback=False):
def cluster_update(self, context, cluster, node_count, rollback=False):
LOG.debug('cluster_heat cluster_update')
osc = clients.OpenStackClients(context)
@ -103,9 +115,14 @@ class Handler(object):
'"%s"') % cluster.status
raise exception.NotSupported(operation=operation)
delta = cluster.obj_what_changed()
if not delta:
return cluster
# Updates will be only reflected to the default worker
# nodegroup.
worker_ng = cluster.default_ng_worker
if worker_ng.node_count == node_count:
return
# Backup the old node count so that we can restore it
# in case of an exception.
old_node_count = worker_ng.node_count
manager = scale_manager.get_scale_manager(context, osc, cluster)
@ -118,6 +135,10 @@ class Handler(object):
try:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
worker_ng.node_count = node_count
worker_ng.save()
# For now update also the cluster.node_count
cluster.node_count = node_count
cluster_driver.update_cluster(context, cluster, manager, rollback)
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
cluster.status_reason = None
@ -125,6 +146,9 @@ class Handler(object):
cluster.status = fields.ClusterStatus.UPDATE_FAILED
cluster.status_reason = six.text_type(e)
cluster.save()
# Restore the node_count
worker_ng.node_count = old_node_count
worker_ng.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
if isinstance(e, exc.HTTPBadRequest):
@ -156,6 +180,9 @@ class Handler(object):
trust_manager.delete_trustee_and_trust(osc, context, cluster)
cert_manager.delete_certificates_from_cluster(cluster,
context=context)
# delete all cluster's nodegroups
for ng in cluster.nodegroups:
ng.destroy()
cluster.destroy()
except exception.ClusterNotFound:
LOG.info('The cluster %s has been deleted by others.',
@ -179,7 +206,7 @@ class Handler(object):
return None
def cluster_resize(self, context, cluster,
node_count, nodes_to_remove, nodegroup=None):
node_count, nodes_to_remove, nodegroup):
LOG.debug('cluster_conductor cluster_resize')
osc = clients.OpenStackClients(context)
@ -216,8 +243,14 @@ class Handler(object):
cluster_driver = driver.Driver.get_driver(ct.server_type,
ct.cluster_distro,
ct.coe)
# Backup the old node count so that we can restore it
# in case of an exception.
old_node_count = nodegroup.node_count
# Resize cluster
try:
nodegroup.node_count = node_count
nodegroup.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
cluster_driver.resize_cluster(context, cluster, resize_manager,
@ -229,6 +262,8 @@ class Handler(object):
cluster.status = fields.ClusterStatus.UPDATE_FAILED
cluster.status_reason = six.text_type(e)
cluster.save()
nodegroup.node_count = old_node_count
nodegroup.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
if isinstance(e, exc.HTTPBadRequest):

View File

@ -22,6 +22,7 @@ from magnum.common import clients
from magnum.common import rpc
from magnum.objects import cluster
from magnum.objects import cluster_template
from magnum.objects import nodegroup
def retrieve_cluster(context, cluster_ident):
@ -111,3 +112,25 @@ def notify_about_cluster_operation(context, action, outcome):
method = notifier.info
method(context, event_type, payload)
def _get_nodegroup_object(context, cluster, node_count, is_master=False):
"""Returns a nodegroup object based on the given cluster object."""
ng = nodegroup.NodeGroup(context)
ng.cluster_id = cluster.uuid
ng.project_id = cluster.project_id
ng.labels = cluster.labels
ng.node_count = node_count
ng.image_id = cluster.cluster_template.image_id
ng.docker_volume_size = (cluster.docker_volume_size or
cluster.cluster_template.docker_volume_size)
if is_master:
ng.flavor_id = (cluster.master_flavor_id or
cluster.cluster_template.master_flavor_id)
ng.role = "master"
else:
ng.flavor_id = cluster.flavor_id or cluster.cluster_template.flavor_id
ng.role = "worker"
ng.name = "default-%s" % ng.role
ng.is_default = True
return ng

View File

@ -19,6 +19,7 @@ from magnum.db import api as dbapi
from magnum.objects import base
from magnum.objects.cluster_template import ClusterTemplate
from magnum.objects import fields as m_fields
from magnum.objects.nodegroup import NodeGroup
@base.MagnumObjectRegistry.register
@ -47,8 +48,9 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
# Version 1.16: Added 'master_flavor_id' field
# Version 1.17: Added 'flavor_id' field
# Version 1.18: Added 'health_status' and 'health_status_reason' field
# Version 1.19: Added nodegroups, default_ng_worker, default_ng_master
VERSION = '1.18'
VERSION = '1.19'
dbapi = dbapi.get_instance()
@ -104,6 +106,28 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
cluster.obj_reset_changes()
return cluster
@property
def nodegroups(self):
# Returns all nodegroups that belong to the cluster.
return NodeGroup.list(self._context, self.uuid)
@property
def default_ng_worker(self):
# Assume that every cluster will have only one default
# non-master nodegroup. We don't want to limit the roles
# so each nodegroup that does not have a master role is
# considered as a worker/minion nodegroup.
filters = {'is_default': True}
default_ngs = NodeGroup.list(self._context, self.uuid, filters=filters)
return [n for n in default_ngs if n.role != 'master'][0]
@property
def default_ng_master(self):
# Assume that every cluster will have only one default
# master nodegroup.
filters = {'role': 'master', 'is_default': True}
return NodeGroup.list(self._context, self.uuid, filters=filters)[0]
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""

View File

@ -83,6 +83,9 @@ class ClusterUpdateJob(object):
taxonomy.OUTCOME_FAILURE)
# if we're done with it, delete it
if self.cluster.status == objects.fields.ClusterStatus.DELETE_COMPLETE:
# delete all the nodegroups that belong to this cluster
for ng in objects.NodeGroup.list(self.ctx, self.cluster.uuid):
ng.destroy()
self.cluster.destroy()
# end the "loop"
raise loopingcall.LoopingCallDone()

View File

@ -243,7 +243,9 @@ class TestPatch(api_base.FunctionalTest):
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
self.addCleanup(p.stop)
def _simulate_rpc_bay_update(self, bay, rollback=False):
def _simulate_rpc_bay_update(self, bay, node_count, rollback=False):
bay.status = 'UPDATE_IN_PROGRESS'
bay.node_count = node_count
bay.save()
return bay
@ -394,13 +396,15 @@ class TestPatch(api_base.FunctionalTest):
@mock.patch.object(rpcapi.API, 'cluster_update_async')
def test_update_bay_with_rollback_enabled(self, mock_update):
node_count = 4
response = self.patch_json(
'/bays/%s/?rollback=True' % self.bay.name,
[{'path': '/node_count', 'value': 4,
[{'path': '/node_count', 'value': node_count,
'op': 'replace'}],
headers={'OpenStack-API-Version': 'container-infra 1.3'})
mock_update.assert_called_once_with(mock.ANY, rollback=True)
mock_update.assert_called_once_with(mock.ANY, node_count,
rollback=True)
self.assertEqual(202, response.status_code)
def test_remove_ok(self):
@ -455,7 +459,10 @@ class TestPost(api_base.FunctionalTest):
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
def _simulate_rpc_bay_create(self, bay, bay_create_timeout):
def _simulate_rpc_bay_create(self, bay, master_count, node_count,
bay_create_timeout):
bay.node_count = node_count
bay.master_count = master_count
bay.create()
return bay
@ -481,7 +488,8 @@ class TestPost(api_base.FunctionalTest):
def test_create_bay_set_project_id_and_user_id(self):
bdict = apiutils.bay_post_data()
def _simulate_rpc_bay_create(bay, bay_create_timeout):
def _simulate_rpc_bay_create(bay, node_count, master_count,
bay_create_timeout):
self.assertEqual(self.context.project_id, bay.project_id)
self.assertEqual(self.context.user_id, bay.user_id)
bay.create()
@ -691,7 +699,8 @@ class TestPost(api_base.FunctionalTest):
self.assertEqual(201, response.status_int)
def test_create_bay_with_no_timeout(self):
def _simulate_rpc_bay_create(bay, bay_create_timeout):
def _simulate_rpc_bay_create(bay, node_count, master_count,
bay_create_timeout):
self.assertEqual(60, bay_create_timeout)
bay.create()
return bay

View File

@ -280,7 +280,9 @@ class TestPatch(api_base.FunctionalTest):
self.mock_cluster_update.side_effect = self._sim_rpc_cluster_update
self.addCleanup(p.stop)
def _sim_rpc_cluster_update(self, cluster, rollback=False):
def _sim_rpc_cluster_update(self, cluster, node_count, rollback=False):
cluster.status = 'UPDATE_IN_PROGRESS'
cluster.node_count = node_count
cluster.save()
return cluster
@ -437,23 +439,27 @@ class TestPatch(api_base.FunctionalTest):
self.assertTrue(response.json['errors'])
def test_update_cluster_with_rollback_enabled(self):
node_count = 4
response = self.patch_json(
'/clusters/%s/?rollback=True' % self.cluster_obj.uuid,
[{'path': '/node_count', 'value': 4,
[{'path': '/node_count', 'value': node_count,
'op': 'replace'}],
headers={'OpenStack-API-Version': 'container-infra 1.3'})
self.mock_cluster_update.assert_called_once_with(mock.ANY, True)
self.mock_cluster_update.assert_called_once_with(
mock.ANY, node_count, True)
self.assertEqual(202, response.status_code)
def test_update_cluster_with_rollback_disabled(self):
node_count = 4
response = self.patch_json(
'/clusters/%s/?rollback=False' % self.cluster_obj.uuid,
[{'path': '/node_count', 'value': 4,
[{'path': '/node_count', 'value': node_count,
'op': 'replace'}],
headers={'OpenStack-API-Version': 'container-infra 1.3'})
self.mock_cluster_update.assert_called_once_with(mock.ANY, False)
self.mock_cluster_update.assert_called_once_with(
mock.ANY, node_count, False)
self.assertEqual(202, response.status_code)
def test_remove_ok(self):
@ -510,7 +516,10 @@ class TestPost(api_base.FunctionalTest):
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
def _simulate_cluster_create(self, cluster, create_timeout):
def _simulate_cluster_create(self, cluster, master_count, node_count,
create_timeout):
cluster.node_count = node_count
cluster.master_count = master_count
cluster.create()
return cluster
@ -549,7 +558,8 @@ class TestPost(api_base.FunctionalTest):
def test_create_cluster_set_project_id_and_user_id(self):
bdict = apiutils.cluster_post_data()
def _simulate_rpc_cluster_create(cluster, create_timeout):
def _simulate_rpc_cluster_create(cluster, master_count, node_count,
create_timeout):
self.assertEqual(self.context.project_id, cluster.project_id)
self.assertEqual(self.context.user_id, cluster.user_id)
cluster.create()
@ -681,7 +691,8 @@ class TestPost(api_base.FunctionalTest):
self.assertEqual(202, response.status_int)
def test_create_cluster_with_no_timeout(self):
def _simulate_rpc_cluster_create(cluster, create_timeout):
def _simulate_rpc_cluster_create(cluster, master_count, node_count,
create_timeout):
self.assertEqual(60, create_timeout)
cluster.create()
return cluster

View File

@ -32,9 +32,11 @@ class TestClusterActions(api_base.FunctionalTest):
self.addCleanup(p.stop)
def _sim_rpc_cluster_resize(self, cluster, node_count, nodes_to_remove,
nodegroup=None, rollback=False):
nodegroup, rollback=False):
cluster.node_count = node_count
cluster.save()
nodegroup.node_count = node_count
nodegroup.save()
return cluster
def test_resize(self):
@ -51,3 +53,75 @@ class TestClusterActions(api_base.FunctionalTest):
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
self.assertEqual(self.cluster_obj.cluster_template_id,
response['cluster_template_id'])
def test_resize_with_nodegroup(self):
new_node_count = 6
nodegroup = self.cluster_obj.default_ng_worker
# Verify that the API is ok with maximum allowed
# node count set to None
self.assertIsNone(nodegroup.max_node_count)
cluster_resize_req = {
"node_count": new_node_count,
"nodegroup": nodegroup.uuid
}
response = self.post_json('/clusters/%s/actions/resize' %
self.cluster_obj.uuid,
cluster_resize_req,
headers={"Openstack-Api-Version":
"container-infra 1.7"})
self.assertEqual(202, response.status_code)
response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
self.assertEqual(new_node_count, response['node_count'])
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
self.assertEqual(self.cluster_obj.cluster_template_id,
response['cluster_template_id'])
def test_resize_with_master_nodegroup(self):
new_node_count = 6
nodegroup = self.cluster_obj.default_ng_master
cluster_resize_req = {
"node_count": new_node_count,
"nodegroup": nodegroup.uuid
}
response = self.post_json('/clusters/%s/actions/resize' %
self.cluster_obj.uuid,
cluster_resize_req,
headers={"Openstack-Api-Version":
"container-infra 1.7"},
expect_errors=True)
self.assertEqual(400, response.status_code)
def test_resize_with_node_count_greater_than_max(self):
new_node_count = 6
nodegroup = self.cluster_obj.default_ng_worker
nodegroup.max_node_count = 5
nodegroup.save()
cluster_resize_req = {
"node_count": new_node_count,
"nodegroup": nodegroup.uuid
}
response = self.post_json('/clusters/%s/actions/resize' %
self.cluster_obj.uuid,
cluster_resize_req,
headers={"Openstack-Api-Version":
"container-infra 1.7"},
expect_errors=True)
self.assertEqual(400, response.status_code)
def test_resize_with_node_count_less_than_min(self):
new_node_count = 3
nodegroup = self.cluster_obj.default_ng_worker
nodegroup.min_node_count = 4
nodegroup.save()
cluster_resize_req = {
"node_count": new_node_count,
"nodegroup": nodegroup.uuid
}
response = self.post_json('/clusters/%s/actions/resize' %
self.cluster_obj.uuid,
cluster_resize_req,
headers={"Openstack-Api-Version":
"container-infra 1.7"},
expect_errors=True)
self.assertEqual(400, response.status_code)

View File

@ -26,7 +26,12 @@ class NeutronTest(base.TestCase):
super(NeutronTest, self).setUp()
cluster_dict = utils.get_test_cluster(node_count=1)
nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1)
self.cluster = objects.Cluster(self.context, **cluster_dict)
self.nodegroups = [
objects.NodeGroup(self.context, **nodegroups_dict['master']),
objects.NodeGroup(self.context, **nodegroups_dict['worker'])
]
@mock.patch('magnum.common.clients.OpenStackClients')
def test_delete_floatingip(self, mock_clients):

View File

@ -26,7 +26,12 @@ class OctaviaTest(base.TestCase):
super(OctaviaTest, self).setUp()
cluster_dict = utils.get_test_cluster(node_count=1)
nodegroups_dict = utils.get_nodegroups_for_cluster(node_count=1)
self.cluster = objects.Cluster(self.context, **cluster_dict)
self.nodegroups = [
objects.NodeGroup(self.context, **nodegroups_dict['master']),
objects.NodeGroup(self.context, **nodegroups_dict['worker'])
]
@mock.patch("magnum.common.neutron.delete_floatingip")
@mock.patch('magnum.common.clients.OpenStackClients')

View File

@ -44,9 +44,19 @@ class TestHandler(db_base.DbTestCase):
self.cluster_template = objects.ClusterTemplate(
self.context, **cluster_template_dict)
self.cluster_template.create()
cluster_dict = utils.get_test_cluster(node_count=1)
self.cluster = objects.Cluster(self.context, **cluster_dict)
self.cluster_dict = utils.get_test_cluster(node_count=1)
self.nodegroups_dict = utils.get_nodegroups_for_cluster(
node_count=1)
del self.nodegroups_dict['master']['id']
del self.nodegroups_dict['worker']['id']
self.cluster = objects.Cluster(self.context, **self.cluster_dict)
self.master_count = self.cluster.master_count
self.node_count = self.cluster.node_count
self.cluster.create()
self.master = objects.NodeGroup(
self.context, **self.nodegroups_dict['master'])
self.worker = objects.NodeGroup(
self.context, **self.nodegroups_dict['worker'])
@patch('magnum.conductor.scale_manager.get_scale_manager')
@patch('magnum.drivers.common.driver.Driver.get_driver')
@ -65,9 +75,11 @@ class TestHandler(db_base.DbTestCase):
mock_dr = mock.MagicMock()
mock_driver.return_value = mock_dr
self.cluster.node_count = 2
node_count = 2
self.master.create()
self.worker.create()
self.cluster.status = cluster_status.CREATE_COMPLETE
self.handler.cluster_update(self.context, self.cluster)
self.handler.cluster_update(self.context, self.cluster, node_count)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -79,8 +91,9 @@ class TestHandler(db_base.DbTestCase):
mock_dr.update_cluster.assert_called_once_with(
self.context, self.cluster, mock_scale_manager.return_value,
False)
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
cluster = objects.Cluster.get_by_uuid(self.context, self.cluster.uuid)
self.assertEqual(2, cluster.node_count)
self.assertEqual(2, cluster.default_ng_worker.node_count)
@patch('magnum.common.clients.OpenStackClients')
def test_update_node_count_failure(
@ -93,10 +106,12 @@ class TestHandler(db_base.DbTestCase):
mock_openstack_client = mock_openstack_client_class.return_value
mock_openstack_client.heat.return_value = mock_heat_client
self.cluster.node_count = 2
node_count = 2
self.master.create()
self.worker.create()
self.cluster.status = cluster_status.CREATE_FAILED
self.assertRaises(exception.NotSupported, self.handler.cluster_update,
self.context, self.cluster)
self.context, self.cluster, node_count)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -107,6 +122,7 @@ class TestHandler(db_base.DbTestCase):
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
self.assertEqual(1, cluster.node_count)
self.assertEqual(1, self.worker.node_count)
@patch('magnum.conductor.scale_manager.get_scale_manager')
@patch('magnum.drivers.common.driver.Driver.get_driver')
@ -124,9 +140,11 @@ class TestHandler(db_base.DbTestCase):
mock_dr = mock.MagicMock()
mock_driver.return_value = mock_dr
self.cluster.node_count = 2
node_count = 2
self.cluster.status = cluster_status.CREATE_COMPLETE
self.handler.cluster_update(self.context, self.cluster)
self.master.create()
self.worker.create()
self.handler.cluster_update(self.context, self.cluster, node_count)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -139,6 +157,7 @@ class TestHandler(db_base.DbTestCase):
self.context, self.cluster, mock_scale_manager.return_value, False)
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
self.assertEqual(2, cluster.node_count)
self.assertEqual(2, cluster.default_ng_worker.node_count)
def test_update_cluster_status_update_complete(self):
self._test_update_cluster_status_complete(
@ -195,19 +214,18 @@ class TestHandler(db_base.DbTestCase):
mock_dr.create_stack.side_effect = create_stack_side_effect
# FixMe(eliqiao): cluster_create will call cluster.create()
# again, this so bad because we have already called it in setUp
# since other test case will share the codes in setUp()
# But in self.handler.cluster_create, we update cluster.uuid and
# cluster.stack_id so cluster.create will create a new record with
# clustermodel_id None, this is bad because we load clusterModel
# object in cluster object by clustermodel_id. Here update
# self.cluster.clustermodel_id so cluster.obj_get_changes will get
# notice that clustermodel_id is updated and will update it
# in db.
self.cluster.cluster_template_id = self.cluster_template.uuid
cluster = self.handler.cluster_create(self.context,
self.cluster, timeout)
# Just create a new cluster, since the one in setUp is already
# created and the previous solution seems kind of hacky.
cluster_dict = utils.get_test_cluster(node_count=1)
cluster = objects.Cluster(self.context, **cluster_dict)
node_count = cluster.node_count
master_count = cluster.node_count
del cluster_dict['id']
del cluster_dict['uuid']
cluster_obj = objects.Cluster(self.context, **cluster_dict)
cluster = self.handler.cluster_create(self.context, cluster_obj,
master_count, node_count,
timeout)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -217,12 +235,15 @@ class TestHandler(db_base.DbTestCase):
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
mock_dr.create_cluster.assert_called_once_with(self.context,
self.cluster, timeout)
cluster, timeout)
mock_cm.generate_certificates_to_cluster.assert_called_once_with(
self.cluster, context=self.context)
cluster, context=self.context)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
mock_trust_manager.create_trustee_and_trust.assert_called_once_with(
osc, self.cluster)
osc, cluster)
self.assertEqual(2, len(cluster.nodegroups))
self.assertEqual(node_count, cluster.default_ng_worker.node_count)
self.assertEqual(master_count, cluster.default_ng_master.node_count)
def _test_create_failed(self,
mock_openstack_client_class,
@ -239,8 +260,8 @@ class TestHandler(db_base.DbTestCase):
self.assertRaises(
expected_exception,
self.handler.cluster_create,
self.context,
self.cluster, timeout
self.context, self.cluster,
self.master_count, self.node_count, timeout
)
gctb = mock_cert_manager.generate_certificates_to_cluster
@ -400,9 +421,6 @@ class TestHandler(db_base.DbTestCase):
mock_process_mult,
mock_heat_poller_class):
timeout = 15
self.cluster.cluster_template_id = self.cluster_template.uuid
self.cluster.name = 'cluster1'
cluster_name = self.cluster.name
mock_poller = mock.MagicMock()
mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone()
mock_heat_poller_class.return_value = mock_poller
@ -435,10 +453,22 @@ class TestHandler(db_base.DbTestCase):
osc.heat.return_value = mock_hc
mock_openstack_client_class.return_value = osc
self.handler.cluster_create(self.context, self.cluster, timeout)
# NOTE(ttsiouts): self.cluster is already created so it's
# a bad idea to use it and try to create it again... Instead
# get a new object and use it.
cluster_dict = utils.get_test_cluster(
node_count=1, uuid='f6a99187-6f42-4fbb-aa6f-18407c0ee50e')
del cluster_dict['id']
cluster = objects.Cluster(self.context, **cluster_dict)
node_count = cluster.node_count
master_count = cluster.master_count
self.handler.cluster_create(self.context, cluster,
master_count, node_count,
timeout)
mock_extract_tmpl_def.assert_called_once_with(self.context,
self.cluster)
cluster)
mock_get_template_contents.assert_called_once_with(
'the/template/path.yaml')
mock_process_mult.assert_called_once_with(
@ -456,9 +486,13 @@ class TestHandler(db_base.DbTestCase):
'content of file:///the/template/env_file_2'
},
parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'},
stack_name=('%s-short_id' % cluster_name),
stack_name=('%s-short_id' % cluster.name),
template='some template yaml',
timeout_mins=timeout)
self.assertEqual(node_count, cluster.node_count)
self.assertEqual(node_count, cluster.default_ng_worker.node_count)
self.assertEqual(master_count, cluster.master_count)
self.assertEqual(master_count, cluster.default_ng_master.node_count)
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
@patch('magnum.common.clients.OpenStackClients')
@ -471,6 +505,9 @@ class TestHandler(db_base.DbTestCase):
osc = mock.MagicMock()
mock_openstack_client_class.return_value = osc
osc.heat.side_effect = exc.HTTPNotFound
self.master.create()
self.worker.create()
self.assertEqual(2, len(self.cluster.nodegroups))
self.handler.cluster_delete(self.context, self.cluster.uuid)
notifications = fake_notifier.NOTIFICATIONS
@ -485,6 +522,9 @@ class TestHandler(db_base.DbTestCase):
taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome'])
self.assertEqual(
1, cert_manager.delete_certificates_from_cluster.call_count)
# Assert that the cluster nodegroups were delete as well
db_nodegroups = objects.NodeGroup.list(self.context, self.cluster.uuid)
self.assertEqual([], db_nodegroups)
# The cluster has been destroyed
self.assertRaises(exception.ClusterNotFound,
objects.Cluster.get, self.context, self.cluster.uuid)
@ -551,8 +591,11 @@ class TestHandler(db_base.DbTestCase):
mock_dr = mock.MagicMock()
mock_driver.return_value = mock_dr
# Create the default worker
self.worker.create()
self.cluster.status = cluster_status.CREATE_COMPLETE
self.handler.cluster_resize(self.context, self.cluster, 3, ["ID1"])
self.handler.cluster_resize(self.context, self.cluster, 3, ["ID1"],
self.worker)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -563,7 +606,10 @@ class TestHandler(db_base.DbTestCase):
mock_dr.resize_cluster.assert_called_once_with(
self.context, self.cluster, mock_scale_manager.return_value, 3,
["ID1"], None)
["ID1"], self.worker)
nodegroup = objects.NodeGroup.get_by_uuid(
self.context, self.cluster.uuid, self.worker.uuid)
self.assertEqual(3, nodegroup.node_count)
@patch('magnum.common.clients.OpenStackClients')
def test_cluster_resize_failure(
@ -576,9 +622,11 @@ class TestHandler(db_base.DbTestCase):
mock_openstack_client = mock_openstack_client_class.return_value
mock_openstack_client.heat.return_value = mock_heat_client
# Create the default worker
self.worker.create()
self.cluster.status = cluster_status.CREATE_FAILED
self.assertRaises(exception.NotSupported, self.handler.cluster_resize,
self.context, self.cluster, 2, [])
self.context, self.cluster, 2, [], self.worker)
notifications = fake_notifier.NOTIFICATIONS
self.assertEqual(1, len(notifications))
@ -589,3 +637,6 @@ class TestHandler(db_base.DbTestCase):
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
self.assertEqual(1, cluster.node_count)
nodegroup = objects.NodeGroup.get_by_uuid(
self.context, self.cluster.uuid, self.worker.uuid)
self.assertEqual(1, nodegroup.node_count)

View File

@ -52,6 +52,12 @@ class MonitorsTestCase(base.TestCase):
api_address='https://5.6.7.8:2376',
master_addresses=['10.0.0.6'])
self.cluster = objects.Cluster(self.context, **cluster)
nodegroups = utils.get_nodegroups_for_cluster(
node_addresses=['1.2.3.4'], master_addresses=['10.0.0.6'])
self.nodegroups = [
objects.NodeGroup(self.context, **nodegroups['master']),
objects.NodeGroup(self.context, **nodegroups['worker'])
]
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster)
self.v2_monitor = swarm_v2_monitor.SwarmMonitor(self.context,
self.cluster)

View File

@ -28,6 +28,7 @@ class RPCAPITestCase(base.DbTestCase):
def setUp(self):
super(RPCAPITestCase, self).setUp()
self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver')
self.fake_nodegroups = dbutils.get_nodegroups_for_cluster()
self.fake_certificate = objects.Certificate.from_db_cluster(
self.fake_cluster)
self.fake_certificate.csr = 'fake-csr'
@ -78,6 +79,8 @@ class RPCAPITestCase(base.DbTestCase):
'call',
version='1.0',
cluster=self.fake_cluster,
master_count=3,
node_count=4,
create_timeout=15)
def test_cluster_delete(self):
@ -95,7 +98,8 @@ class RPCAPITestCase(base.DbTestCase):
self._test_rpcapi('cluster_update',
'call',
version='1.1',
cluster=self.fake_cluster['name'])
cluster=self.fake_cluster['name'],
node_count=2)
def test_ping_conductor(self):
self._test_rpcapi('ping_conductor',

View File

@ -14,6 +14,7 @@
# under the License.
"""Magnum test utilities."""
from oslo_utils import uuidutils
from magnum.db import api as db_api
@ -300,7 +301,45 @@ def create_test_nodegroup(**kw):
"""
nodegroup = get_test_nodegroup(**kw)
# Let DB generate ID if it isn't specified explicitly
if 'id' not in kw:
if 'id' in nodegroup:
del nodegroup['id']
dbapi = db_api.get_instance()
return dbapi.create_nodegroup(nodegroup)
def get_nodegroups_for_cluster(**kw):
# get workers nodegroup
worker = get_test_nodegroup(
role='worker',
name=kw.get('worker_name', 'test-worker'),
uuid=kw.get('worker_uuid', uuidutils.generate_uuid()),
cluster_id=kw.get('cluster_id',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
project_id=kw.get('project_id', 'fake_project'),
node_addresses=kw.get('node_addresses', ['172.17.2.4']),
node_count=kw.get('node_count', 3)
)
# get masters nodegroup
master = get_test_nodegroup(
role='master',
name=kw.get('master_name', 'test-master'),
uuid=kw.get('master_uuid', uuidutils.generate_uuid()),
cluster_id=kw.get('cluster_id',
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
project_id=kw.get('project_id', 'fake_project'),
node_addresses=kw.get('master_addresses', ['172.17.2.18']),
node_count=kw.get('master_count', 3)
)
return {'master': master, 'worker': worker}
def create_nodegroups_for_cluster(**kw):
nodegroups = get_nodegroups_for_cluster(**kw)
# Create workers nodegroup
worker = nodegroups['worker']
create_test_nodegroup(**worker)
# Create masters nodegroup
master = nodegroups['master']
create_test_nodegroup(**master)

View File

@ -28,6 +28,7 @@ class TestClusterObject(base.DbTestCase):
def setUp(self):
super(TestClusterObject, self).setUp()
self.fake_cluster = utils.get_test_cluster()
self.fake_nodegroups = utils.get_nodegroups_for_cluster()
self.fake_cluster['trust_id'] = 'trust_id'
self.fake_cluster['trustee_username'] = 'trustee_user'
self.fake_cluster['trustee_user_id'] = 'trustee_user_id'

View File

@ -355,7 +355,7 @@ class TestObject(test_base.TestCase, _TestObject):
# For more information on object version testing, read
# https://docs.openstack.org/magnum/latest/contributor/objects.html
object_data = {
'Cluster': '1.18-9f0dfcc3e898eef2b9a09647b612adb6',
'Cluster': '1.19-9f0dfcc3e898eef2b9a09647b612adb6',
'ClusterTemplate': '1.19-3b0b2b3933d0955abf3ab40111744960',
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',

View File

@ -86,6 +86,8 @@ def create_test_cluster(context, **kw):
create_test_cluster_template(context, uuid=cluster['cluster_template_id'],
coe=kw.get('coe', 'swarm'),
tls_disabled=kw.get('tls_disabled'))
kw.update({'cluster_id': cluster['uuid']})
db_utils.create_nodegroups_for_cluster(**kw)
cluster.create()
return cluster

View File

@ -14,6 +14,8 @@
import mock
from oslo_utils import uuidutils
from magnum.common import context
from magnum.common.rpc_service import CONF
from magnum.db.sqlalchemy import api as dbapi
@ -34,6 +36,17 @@ class fake_stack(object):
for key, val in kw.items():
setattr(self, key, val)
# This dictionary will be populated by setUp to help mock
# the nodegroup list magnum.db.api.get_cluster_nodergoups.
cluster_ngs = {}
def mock_nodegroup_list(cls, dummy_context, cluster_id, **kwargs):
try:
return cluster_ngs[cluster_id]
except KeyError:
return []
class PeriodicTestCase(base.TestCase):
@ -49,26 +62,57 @@ class PeriodicTestCase(base.TestCase):
'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
}
trust_attrs.update({'id': 1, 'stack_id': '11',
uuid = uuidutils.generate_uuid()
trust_attrs.update({'id': 1, 'stack_id': '11', 'uuid': uuid,
'status': cluster_status.CREATE_IN_PROGRESS,
'status_reason': 'no change'})
cluster1 = utils.get_test_cluster(**trust_attrs)
trust_attrs.update({'id': 2, 'stack_id': '22',
ngs1 = utils.get_nodegroups_for_cluster()
uuid = uuidutils.generate_uuid()
trust_attrs.update({'id': 2, 'stack_id': '22', 'uuid': uuid,
'status': cluster_status.DELETE_IN_PROGRESS,
'status_reason': 'no change'})
cluster2 = utils.get_test_cluster(**trust_attrs)
trust_attrs.update({'id': 3, 'stack_id': '33',
ngs2 = utils.get_nodegroups_for_cluster()
uuid = uuidutils.generate_uuid()
trust_attrs.update({'id': 3, 'stack_id': '33', 'uuid': uuid,
'status': cluster_status.UPDATE_IN_PROGRESS,
'status_reason': 'no change'})
cluster3 = utils.get_test_cluster(**trust_attrs)
trust_attrs.update({'id': 4, 'stack_id': '44',
ngs3 = utils.get_nodegroups_for_cluster()
uuid = uuidutils.generate_uuid()
trust_attrs.update({'id': 4, 'stack_id': '44', 'uuid': uuid,
'status': cluster_status.DELETE_IN_PROGRESS,
'status_reason': 'no change'})
cluster4 = utils.get_test_cluster(**trust_attrs)
trust_attrs.update({'id': 5, 'stack_id': '55',
ngs4 = utils.get_nodegroups_for_cluster()
uuid = uuidutils.generate_uuid()
trust_attrs.update({'id': 5, 'stack_id': '55', 'uuid': uuid,
'status': cluster_status.ROLLBACK_IN_PROGRESS,
'status_reason': 'no change'})
cluster5 = utils.get_test_cluster(**trust_attrs)
ngs5 = utils.get_nodegroups_for_cluster()
self.nodegroups1 = [
objects.NodeGroup(self.context, **ngs1['master']),
objects.NodeGroup(self.context, **ngs1['worker'])
]
self.nodegroups2 = [
objects.NodeGroup(self.context, **ngs2['master']),
objects.NodeGroup(self.context, **ngs2['worker'])
]
self.nodegroups3 = [
objects.NodeGroup(self.context, **ngs3['master']),
objects.NodeGroup(self.context, **ngs3['worker'])
]
self.nodegroups4 = [
objects.NodeGroup(self.context, **ngs4['master']),
objects.NodeGroup(self.context, **ngs4['worker'])
]
self.nodegroups5 = [
objects.NodeGroup(self.context, **ngs5['master']),
objects.NodeGroup(self.context, **ngs5['worker'])
]
self.cluster1 = objects.Cluster(self.context, **cluster1)
self.cluster2 = objects.Cluster(self.context, **cluster2)
@ -76,6 +120,18 @@ class PeriodicTestCase(base.TestCase):
self.cluster4 = objects.Cluster(self.context, **cluster4)
self.cluster5 = objects.Cluster(self.context, **cluster5)
# This is used to mock the get_cluster_nodegroups from magnum.db.api.
# It's not the greatest way to do it, But we have to populate the
# dictionary in the runtime (or have statically defined uuids per NG).
global cluster_ngs
cluster_ngs = {
self.cluster1.uuid: self.nodegroups1,
self.cluster2.uuid: self.nodegroups2,
self.cluster3.uuid: self.nodegroups3,
self.cluster4.uuid: self.nodegroups4,
self.cluster5.uuid: self.nodegroups5
}
# these tests are based on the basic behavior of our standard
# Heat-based drivers, but drivers based on other orchestration
# methods should generally behave in a similar fashion as far
@ -131,25 +187,33 @@ class PeriodicTestCase(base.TestCase):
new=fakes.FakeLoopingCall)
@mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster')
@mock.patch('magnum.objects.Cluster.list')
@mock.patch.object(dbapi.Connection, 'destroy_nodegroup')
@mock.patch.object(dbapi.Connection, 'destroy_cluster')
def test_sync_cluster_status_changes(self, mock_db_destroy,
mock_cluster_list, mock_get_driver):
mock_ng_destroy,
mock_cluster_list,
mock_get_driver):
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
self.cluster3, self.cluster4,
self.cluster5]
mock_get_driver.return_value = self.mock_driver
with mock.patch.object(dbapi.Connection, 'list_cluster_nodegroups',
mock_nodegroup_list):
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
self.assertEqual(cluster_status.CREATE_COMPLETE, self.cluster1.status)
self.assertEqual(cluster_status.CREATE_COMPLETE,
self.cluster1.status)
self.assertEqual('fake_reason_11', self.cluster1.status_reason)
# make sure cluster 2 didn't change
self.assertEqual(cluster_status.DELETE_IN_PROGRESS,
self.cluster2.status)
self.assertEqual('no change', self.cluster2.status_reason)
self.assertEqual(cluster_status.UPDATE_COMPLETE, self.cluster3.status)
self.assertEqual(cluster_status.UPDATE_COMPLETE,
self.cluster3.status)
self.assertEqual('fake_reason_33', self.cluster3.status_reason)
self.assertEqual(2, mock_ng_destroy.call_count)
mock_db_destroy.assert_called_once_with(self.cluster4.uuid)
self.assertEqual(cluster_status.ROLLBACK_COMPLETE,
self.cluster5.status)
@ -171,6 +235,7 @@ class PeriodicTestCase(base.TestCase):
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
self.cluster3, self.cluster5]
mock_get_driver.return_value = self.mock_driver
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS,
@ -193,7 +258,9 @@ class PeriodicTestCase(base.TestCase):
@mock.patch('magnum.drivers.common.driver.Driver.get_driver_for_cluster')
@mock.patch('magnum.objects.Cluster.list')
@mock.patch.object(dbapi.Connection, 'destroy_cluster')
def test_sync_cluster_status_heat_not_found(self, mock_db_destroy,
@mock.patch.object(dbapi.Connection, 'destroy_nodegroup')
def test_sync_cluster_status_heat_not_found(self, mock_ng_destroy,
mock_db_destroy,
mock_cluster_list,
mock_get_driver):
self.get_stacks.clear()
@ -202,13 +269,18 @@ class PeriodicTestCase(base.TestCase):
self.cluster3, self.cluster4,
self.cluster5]
with mock.patch.object(dbapi.Connection, 'list_cluster_nodegroups',
mock_nodegroup_list):
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
self.assertEqual(cluster_status.CREATE_FAILED, self.cluster1.status)
self.assertEqual(cluster_status.CREATE_FAILED,
self.cluster1.status)
self.assertEqual('Stack 11 not found', self.cluster1.status_reason)
self.assertEqual(cluster_status.UPDATE_FAILED, self.cluster3.status)
self.assertEqual(cluster_status.UPDATE_FAILED,
self.cluster3.status)
self.assertEqual('Stack 33 not found', self.cluster3.status_reason)
self.assertEqual(cluster_status.ROLLBACK_FAILED, self.cluster5.status)
self.assertEqual(cluster_status.ROLLBACK_FAILED,
self.cluster5.status)
self.assertEqual('Stack 55 not found', self.cluster5.status_reason)
mock_db_destroy.assert_has_calls([
mock.call(self.cluster2.uuid),