ng-8: APIs for nodegroup CRUD operations
This adds the changes needed in the API and conductor level to support creating updating and deleting nodegroups. Change-Id: I4ad60994ad6b4cb9cac18129557e1e87e61ae98c
This commit is contained in:
parent
d4a52719f1
commit
5027e0daf8
@ -32,7 +32,6 @@ from magnum.api.controllers.v1 import types
|
||||
from magnum.api import expose
|
||||
from magnum.api import utils as api_utils
|
||||
from magnum.api import validation
|
||||
from magnum.common import clients
|
||||
from magnum.common import exception
|
||||
from magnum.common import name_generator
|
||||
from magnum.common import policy
|
||||
@ -397,22 +396,11 @@ class ClustersController(base.Controller):
|
||||
|
||||
and store them into cluster.faults.
|
||||
"""
|
||||
osc = clients.OpenStackClients(context)
|
||||
filters = {'status': 'FAILED'}
|
||||
try:
|
||||
failed_resources = osc.heat().resources.list(
|
||||
cluster.stack_id, nested_depth=2, filters=filters)
|
||||
except Exception as e:
|
||||
failed_resources = []
|
||||
LOG.warning("Failed to retrieve failed resources for "
|
||||
"cluster %(cluster)s from Heat stack "
|
||||
"%(stack)s due to error: %(e)s",
|
||||
{'cluster': cluster.uuid,
|
||||
'stack': cluster.stack_id, 'e': e},
|
||||
exc_info=True)
|
||||
|
||||
return {res.resource_name: res.resource_status_reason
|
||||
for res in failed_resources}
|
||||
# Gather fault info from the cluster nodegroups.
|
||||
return {
|
||||
ng.name: ng.status_reason for ng in cluster.nodegroups
|
||||
if ng.status.endswith('FAILED')
|
||||
}
|
||||
|
||||
@expose.expose(Cluster, types.uuid_or_name)
|
||||
def get_one(self, cluster_ident):
|
||||
@ -437,12 +425,12 @@ class ClustersController(base.Controller):
|
||||
policy.enforce(context, 'cluster:get', cluster.as_dict(),
|
||||
action='cluster:get')
|
||||
|
||||
cluster = Cluster.convert_with_links(cluster)
|
||||
api_cluster = Cluster.convert_with_links(cluster)
|
||||
|
||||
if cluster.status in fields.ClusterStatus.STATUS_FAILED:
|
||||
cluster.faults = self._collect_fault_info(context, cluster)
|
||||
if api_cluster.status in fields.ClusterStatus.STATUS_FAILED:
|
||||
api_cluster.faults = self._collect_fault_info(context, cluster)
|
||||
|
||||
return cluster
|
||||
return api_cluster
|
||||
|
||||
def _check_cluster_quota_limit(self, context):
|
||||
try:
|
||||
|
@ -14,6 +14,8 @@
|
||||
# under the License.
|
||||
|
||||
import pecan
|
||||
import six
|
||||
import uuid
|
||||
import wsme
|
||||
from wsme import types as wtypes
|
||||
|
||||
@ -23,8 +25,34 @@ from magnum.api.controllers.v1 import collection
|
||||
from magnum.api.controllers.v1 import types
|
||||
from magnum.api import expose
|
||||
from magnum.api import utils as api_utils
|
||||
from magnum.common import exception
|
||||
from magnum.common import policy
|
||||
from magnum import objects
|
||||
from magnum.objects import fields
|
||||
|
||||
|
||||
def _validate_node_count(ng):
|
||||
if ng.max_node_count:
|
||||
if ng.max_node_count < ng.min_node_count:
|
||||
expl = ("min_node_count (%s) should be less or equal to "
|
||||
"max_node_count (%s)" % (ng.min_node_count,
|
||||
ng.max_node_count))
|
||||
raise exception.NodeGroupInvalidInput(attr='max_node_count',
|
||||
nodegroup=ng.name,
|
||||
expl=expl)
|
||||
if ng.node_count > ng.max_node_count:
|
||||
expl = ("node_count (%s) should be less or equal to "
|
||||
"max_node_count (%s)" % (ng.node_count,
|
||||
ng.max_node_count))
|
||||
raise exception.NodeGroupInvalidInput(attr='max_node_count',
|
||||
nodegroup=ng.name,
|
||||
expl=expl)
|
||||
if ng.min_node_count > ng.node_count:
|
||||
expl = ('min_node_count (%s) should be less or equal to '
|
||||
'node_count (%s)' % (ng.min_node_count, ng.node_count))
|
||||
raise exception.NodeGroupInvalidInput(attr='min_node_count',
|
||||
nodegroup=ng.name,
|
||||
expl=expl)
|
||||
|
||||
|
||||
class NodeGroup(base.APIBase):
|
||||
@ -52,7 +80,10 @@ class NodeGroup(base.APIBase):
|
||||
docker_volume_size = wtypes.IntegerType(minimum=1)
|
||||
"""The size in GB of the docker volume"""
|
||||
|
||||
labels = wtypes.DictType(str, str)
|
||||
labels = wtypes.DictType(wtypes.text, types.MultiType(wtypes.text,
|
||||
six.integer_types,
|
||||
bool,
|
||||
float))
|
||||
"""One or more key/value pairs"""
|
||||
|
||||
links = wsme.wsattr([link.Link], readonly=True)
|
||||
@ -70,7 +101,8 @@ class NodeGroup(base.APIBase):
|
||||
node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
||||
"""The node count for this nodegroup. Default to 1 if not set"""
|
||||
|
||||
role = wtypes.StringType(min_length=1, max_length=255)
|
||||
role = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
||||
default='worker')
|
||||
"""The role of the nodes included in this nodegroup"""
|
||||
|
||||
min_node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
||||
@ -82,6 +114,18 @@ class NodeGroup(base.APIBase):
|
||||
is_default = types.BooleanType()
|
||||
"""Specifies is a nodegroup was created by default or not"""
|
||||
|
||||
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
||||
"""Stack id of the heat stack"""
|
||||
|
||||
status = wtypes.Enum(wtypes.text, *fields.ClusterStatus.ALL)
|
||||
"""Status of the nodegroup from the heat stack"""
|
||||
|
||||
status_reason = wtypes.text
|
||||
"""Status reason of the nodegroup from the heat stack"""
|
||||
|
||||
version = wtypes.text
|
||||
"""Version of the nodegroup"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(NodeGroup, self).__init__()
|
||||
self.fields = []
|
||||
@ -101,7 +145,8 @@ class NodeGroup(base.APIBase):
|
||||
ng = NodeGroup(**nodegroup.as_dict())
|
||||
if not expand:
|
||||
ng.unset_fields_except(["uuid", "name", "flavor_id", "node_count",
|
||||
"role", "is_default", "image_id"])
|
||||
"role", "is_default", "image_id", "status",
|
||||
"stack_id"])
|
||||
else:
|
||||
ng.links = [link.Link.make_link('self', url, cluster_path,
|
||||
nodegroup_path),
|
||||
@ -111,6 +156,20 @@ class NodeGroup(base.APIBase):
|
||||
return ng
|
||||
|
||||
|
||||
class NodeGroupPatchType(types.JsonPatchType):
|
||||
_api_base = NodeGroup
|
||||
|
||||
@staticmethod
|
||||
def internal_attrs():
|
||||
# Allow updating only min/max_node_count
|
||||
internal_attrs = ["/name", "/cluster_id", "/project_id",
|
||||
"/docker_volume_size", "/labels", "/flavor_id",
|
||||
"/image_id", "/node_addresses", "/node_count",
|
||||
"/role", "/is_default", "/stack_id", "/status",
|
||||
"/status_reason", "/version"]
|
||||
return types.JsonPatchType.internal_attrs() + internal_attrs
|
||||
|
||||
|
||||
class NodeGroupCollection(collection.Collection):
|
||||
"""API representation of a collection of Node Groups."""
|
||||
|
||||
@ -145,14 +204,14 @@ class NodeGroupController(base.Controller):
|
||||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.NodeGroup.list(pecan.request.context,
|
||||
cluster_id,
|
||||
marker)
|
||||
marker_obj = objects.NodeGroup.get(pecan.request.context,
|
||||
cluster_id,
|
||||
marker)
|
||||
|
||||
nodegroups = objects.NodeGroup.list(pecan.request.context,
|
||||
cluster_id,
|
||||
limit,
|
||||
marker_obj,
|
||||
limit=limit,
|
||||
marker=marker_obj,
|
||||
sort_key=sort_key,
|
||||
sort_dir=sort_dir,
|
||||
filters=filters)
|
||||
@ -217,3 +276,98 @@ class NodeGroupController(base.Controller):
|
||||
cluster = api_utils.get_resource('Cluster', cluster_id)
|
||||
nodegroup = objects.NodeGroup.get(context, cluster.uuid, nodegroup_id)
|
||||
return NodeGroup.convert(nodegroup)
|
||||
|
||||
@expose.expose(NodeGroup, types.uuid_or_name, NodeGroup, body=NodeGroup,
|
||||
status_code=202)
|
||||
def post(self, cluster_id, nodegroup):
|
||||
"""Create NodeGroup.
|
||||
|
||||
:param nodegroup: a json document to create this NodeGroup.
|
||||
"""
|
||||
|
||||
context = pecan.request.context
|
||||
policy.enforce(context, 'nodegroup:create', action='nodegroup:create')
|
||||
|
||||
cluster = api_utils.get_resource('Cluster', cluster_id)
|
||||
cluster_ngs = [ng.name for ng in cluster.nodegroups]
|
||||
if nodegroup.name in cluster_ngs:
|
||||
raise exception.NodeGroupAlreadyExists(name=nodegroup.name,
|
||||
cluster_id=cluster.name)
|
||||
_validate_node_count(nodegroup)
|
||||
|
||||
if nodegroup.role == "master":
|
||||
# Currently we don't support adding master nodegroups.
|
||||
# Keep this until we start supporting it.
|
||||
raise exception.CreateMasterNodeGroup()
|
||||
if nodegroup.image_id is None or nodegroup.image_id == wtypes.Unset:
|
||||
nodegroup.image_id = cluster.cluster_template.image_id
|
||||
if nodegroup.flavor_id is None or nodegroup.flavor_id == wtypes.Unset:
|
||||
nodegroup.flavor_id = cluster.flavor_id
|
||||
if nodegroup.labels is None or nodegroup.labels == wtypes.Unset:
|
||||
nodegroup.labels = cluster.labels
|
||||
|
||||
nodegroup_dict = nodegroup.as_dict()
|
||||
nodegroup_dict['cluster_id'] = cluster.uuid
|
||||
nodegroup_dict['project_id'] = context.project_id
|
||||
|
||||
new_obj = objects.NodeGroup(context, **nodegroup_dict)
|
||||
new_obj.uuid = uuid.uuid4()
|
||||
pecan.request.rpcapi.nodegroup_create_async(cluster, new_obj)
|
||||
return NodeGroup.convert(new_obj)
|
||||
|
||||
@expose.expose(NodeGroup, types.uuid_or_name, types.uuid_or_name,
|
||||
body=[NodeGroupPatchType], status_code=202)
|
||||
def patch(self, cluster_id, nodegroup_id, patch):
|
||||
"""Update NodeGroup.
|
||||
|
||||
:param cluster_id: cluster id.
|
||||
:param : resource name.
|
||||
:param values: a json document to update a nodegroup.
|
||||
"""
|
||||
cluster = api_utils.get_resource('Cluster', cluster_id)
|
||||
nodegroup = self._patch(cluster.uuid, nodegroup_id, patch)
|
||||
pecan.request.rpcapi.nodegroup_update_async(cluster, nodegroup)
|
||||
return NodeGroup.convert(nodegroup)
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, types.uuid_or_name,
|
||||
status_code=204)
|
||||
def delete(self, cluster_id, nodegroup_id):
|
||||
"""Delete NodeGroup for a given project_id and resource.
|
||||
|
||||
:param cluster_id: cluster id.
|
||||
:param nodegroup_id: resource name.
|
||||
"""
|
||||
context = pecan.request.context
|
||||
policy.enforce(context, 'nodegroup:delete', action='nodegroup:delete')
|
||||
cluster = api_utils.get_resource('Cluster', cluster_id)
|
||||
nodegroup = objects.NodeGroup.get(context, cluster.uuid, nodegroup_id)
|
||||
if nodegroup.is_default:
|
||||
raise exception.DeletingDefaultNGNotSupported()
|
||||
pecan.request.rpcapi.nodegroup_delete_async(cluster, nodegroup)
|
||||
|
||||
def _patch(self, cluster_uuid, nodegroup_id, patch):
|
||||
context = pecan.request.context
|
||||
policy.enforce(context, 'nodegroup:update', action='nodegroup:update')
|
||||
nodegroup = objects.NodeGroup.get(context, cluster_uuid, nodegroup_id)
|
||||
|
||||
try:
|
||||
ng_dict = nodegroup.as_dict()
|
||||
new_nodegroup = NodeGroup(**api_utils.apply_jsonpatch(ng_dict,
|
||||
patch))
|
||||
except api_utils.JSONPATCH_EXCEPTIONS as e:
|
||||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
# Update only the fields that have changed
|
||||
for field in objects.NodeGroup.fields:
|
||||
try:
|
||||
patch_val = getattr(new_nodegroup, field)
|
||||
except AttributeError:
|
||||
# Ignore fields that aren't exposed in the API
|
||||
continue
|
||||
if patch_val == wtypes.Unset:
|
||||
patch_val = None
|
||||
if nodegroup[field] != patch_val:
|
||||
nodegroup[field] = patch_val
|
||||
_validate_node_count(nodegroup)
|
||||
|
||||
return nodegroup
|
||||
|
@ -30,6 +30,7 @@ from magnum.conductor.handlers import cluster_conductor
|
||||
from magnum.conductor.handlers import conductor_listener
|
||||
from magnum.conductor.handlers import federation_conductor
|
||||
from magnum.conductor.handlers import indirection_api
|
||||
from magnum.conductor.handlers import nodegroup_conductor
|
||||
import magnum.conf
|
||||
from magnum import version
|
||||
|
||||
@ -53,6 +54,7 @@ def main():
|
||||
conductor_listener.Handler(),
|
||||
ca_conductor.Handler(),
|
||||
federation_conductor.Handler(),
|
||||
nodegroup_conductor.Handler(),
|
||||
]
|
||||
|
||||
server = rpc_service.Service.create(CONF.conductor.topic,
|
||||
|
@ -437,5 +437,22 @@ class MasterNGResizeNotSupported(NotSupported):
|
||||
|
||||
class NGResizeOutBounds(Invalid):
|
||||
message = _("Resizing %(nodegroup)s outside the allowed range: "
|
||||
"min_node_count = %(min_node_count)s, "
|
||||
"max_node_count = %(max_node_count)s")
|
||||
"min_node_count = %(min_nc)s, "
|
||||
"max_node_count = %(max_nc)s")
|
||||
|
||||
|
||||
class DeletingDefaultNGNotSupported(NotSupported):
|
||||
message = _("Deleting a default nodegroup is not supported.")
|
||||
|
||||
|
||||
class NodeGroupInvalidInput(Conflict):
|
||||
message = _("%(attr)s for %(nodegroup)s is invalid (%(expl)s).")
|
||||
|
||||
|
||||
class CreateMasterNodeGroup(NotSupported):
|
||||
message = _("Creating master nodegroups is currently not supported.")
|
||||
|
||||
|
||||
class NgOperationInProgress(Invalid):
|
||||
message = _("Nodegroup %(nodegroup)s already has an operation in "
|
||||
"progress.")
|
||||
|
@ -66,6 +66,39 @@ rules = [
|
||||
}
|
||||
]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
name=NODEGROUP % 'create',
|
||||
check_str=base.RULE_ADMIN_OR_OWNER,
|
||||
description='Create a new nodegroup.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v1/clusters/{cluster_id}/nodegroups/',
|
||||
'method': 'POST'
|
||||
}
|
||||
]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
name=NODEGROUP % 'delete',
|
||||
check_str=base.RULE_ADMIN_OR_OWNER,
|
||||
description='Delete a nodegroup.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v1/clusters/{cluster_id}/nodegroups/{nodegroup}',
|
||||
'method': 'DELETE'
|
||||
}
|
||||
]
|
||||
),
|
||||
policy.DocumentedRuleDefault(
|
||||
name=NODEGROUP % 'update',
|
||||
check_str=base.RULE_ADMIN_OR_OWNER,
|
||||
description='Update an existing nodegroup.',
|
||||
operations=[
|
||||
{
|
||||
'path': '/v1/clusters/{cluster_id}/nodegroups/{nodegroup}',
|
||||
'method': 'PATCH'
|
||||
}
|
||||
]
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
@ -144,6 +144,29 @@ class API(rpc_service.API):
|
||||
return self._client.call(context, 'object_backport', objinst=objinst,
|
||||
target_version=target_version)
|
||||
|
||||
# NodeGroup Operations
|
||||
|
||||
def nodegroup_create(self, cluster, nodegroup):
|
||||
return self._call('nodegroup_create', cluster=cluster,
|
||||
nodegroup=nodegroup)
|
||||
|
||||
def nodegroup_create_async(self, cluster, nodegroup):
|
||||
self._cast('nodegroup_create', cluster=cluster, nodegroup=nodegroup)
|
||||
|
||||
def nodegroup_delete(self, cluster, nodegroup):
|
||||
return self._call('nodegroup_delete', cluster=cluster,
|
||||
nodegroup=nodegroup)
|
||||
|
||||
def nodegroup_delete_async(self, cluster, nodegroup):
|
||||
self._cast('nodegroup_delete', cluster=cluster, nodegroup=nodegroup)
|
||||
|
||||
def nodegroup_update(self, cluster, nodegroup):
|
||||
return self._call('nodegroup_update', cluster=cluster,
|
||||
nodegroup=nodegroup)
|
||||
|
||||
def nodegroup_update_async(self, cluster, nodegroup):
|
||||
self._cast('nodegroup_update', cluster=cluster, nodegroup=nodegroup)
|
||||
|
||||
|
||||
@profiler.trace_cls("rpc")
|
||||
class ListenerAPI(rpc_service.API):
|
||||
|
152
magnum/conductor/handlers/nodegroup_conductor.py
Normal file
152
magnum/conductor/handlers/nodegroup_conductor.py
Normal file
@ -0,0 +1,152 @@
|
||||
# Copyright (c) 2018 European Organization for Nuclear Research.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import functools
|
||||
|
||||
from heatclient import exc
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from magnum.common import exception
|
||||
from magnum.common import profiler
|
||||
import magnum.conf
|
||||
from magnum.drivers.common import driver
|
||||
from magnum.i18n import _
|
||||
from magnum.objects import fields
|
||||
|
||||
CONF = magnum.conf.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# TODO(ttsiouts): notifications about nodegroup operations will be
|
||||
# added in later commit.
|
||||
|
||||
|
||||
ALLOWED_NODEGROUP_STATES = (
|
||||
fields.ClusterStatus.CREATE_COMPLETE,
|
||||
fields.ClusterStatus.UPDATE_COMPLETE,
|
||||
fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
fields.ClusterStatus.UPDATE_FAILED,
|
||||
fields.ClusterStatus.RESUME_COMPLETE,
|
||||
fields.ClusterStatus.RESTORE_COMPLETE,
|
||||
fields.ClusterStatus.ROLLBACK_COMPLETE,
|
||||
fields.ClusterStatus.SNAPSHOT_COMPLETE,
|
||||
fields.ClusterStatus.CHECK_COMPLETE,
|
||||
fields.ClusterStatus.ADOPT_COMPLETE
|
||||
)
|
||||
|
||||
|
||||
def allowed_operation(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, context, cluster, nodegroup, *args, **kwargs):
|
||||
# Before we begin we need to check the status
|
||||
# of the cluster. If the cluster is in a status
|
||||
# that does not allow nodegroup creation we just
|
||||
# fail.
|
||||
if ('status' in nodegroup
|
||||
and nodegroup.status not in ALLOWED_NODEGROUP_STATES):
|
||||
operation = _(
|
||||
'%(fname)s when nodegroup status is "%(status)s"'
|
||||
) % {'fname': func.__name__, 'status': cluster.status}
|
||||
raise exception.NotSupported(operation=operation)
|
||||
return func(self, context, cluster, nodegroup, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@profiler.trace_cls("rpc")
|
||||
class Handler(object):
|
||||
|
||||
@allowed_operation
|
||||
def nodegroup_create(self, context, cluster, nodegroup):
|
||||
LOG.debug("nodegroup_conductor nodegroup_create")
|
||||
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
|
||||
cluster.save()
|
||||
nodegroup.status = fields.ClusterStatus.CREATE_IN_PROGRESS
|
||||
nodegroup.create()
|
||||
|
||||
try:
|
||||
cluster_driver = driver.Driver.get_driver_for_cluster(context,
|
||||
cluster)
|
||||
cluster_driver.create_nodegroup(context, cluster, nodegroup)
|
||||
nodegroup.save()
|
||||
except Exception as e:
|
||||
nodegroup.status = fields.ClusterStatus.CREATE_FAILED
|
||||
nodegroup.status_reason = six.text_type(e)
|
||||
nodegroup.save()
|
||||
cluster.status = fields.ClusterStatus.UPDATE_FAILED
|
||||
cluster.save()
|
||||
if isinstance(e, exc.HTTPBadRequest):
|
||||
e = exception.InvalidParameterValue(message=six.text_type(e))
|
||||
raise e
|
||||
raise
|
||||
return nodegroup
|
||||
|
||||
@allowed_operation
|
||||
def nodegroup_update(self, context, cluster, nodegroup):
|
||||
LOG.debug("nodegroup_conductor nodegroup_update")
|
||||
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
|
||||
cluster.save()
|
||||
nodegroup.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
|
||||
|
||||
try:
|
||||
cluster_driver = driver.Driver.get_driver_for_cluster(context,
|
||||
cluster)
|
||||
cluster_driver.update_nodegroup(context, cluster, nodegroup)
|
||||
nodegroup.save()
|
||||
except Exception as e:
|
||||
nodegroup.status = fields.ClusterStatus.UPDATE_FAILED
|
||||
nodegroup.status_reason = six.text_type(e)
|
||||
nodegroup.save()
|
||||
cluster.status = fields.ClusterStatus.UPDATE_FAILED
|
||||
cluster.save()
|
||||
if isinstance(e, exc.HTTPBadRequest):
|
||||
e = exception.InvalidParameterValue(message=six.text_type(e))
|
||||
raise e
|
||||
raise
|
||||
|
||||
return nodegroup
|
||||
|
||||
def nodegroup_delete(self, context, cluster, nodegroup):
|
||||
LOG.debug("nodegroup_conductor nodegroup_delete")
|
||||
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
|
||||
cluster.save()
|
||||
nodegroup.status = fields.ClusterStatus.DELETE_IN_PROGRESS
|
||||
|
||||
try:
|
||||
cluster_driver = driver.Driver.get_driver_for_cluster(context,
|
||||
cluster)
|
||||
cluster_driver.delete_nodegroup(context, cluster, nodegroup)
|
||||
except exc.HTTPNotFound:
|
||||
LOG.info('The nodegroup %s was not found during nodegroup'
|
||||
' deletion.', nodegroup.uuid)
|
||||
try:
|
||||
nodegroup.destroy()
|
||||
except exception.NodeGroupNotFound:
|
||||
LOG.info('The nodegroup %s has been deleted by others.',
|
||||
nodegroup.uuid)
|
||||
return None
|
||||
except exc.HTTPConflict:
|
||||
raise exception.NgOperationInProgress(nodegroup=nodegroup.name)
|
||||
except Exception as e:
|
||||
nodegroup.status = fields.ClusterStatus.DELETE_FAILED
|
||||
nodegroup.status_reason = six.text_type(e)
|
||||
nodegroup.save()
|
||||
cluster.status = fields.ClusterStatus.UPDATE_FAILED
|
||||
cluster.save()
|
||||
raise
|
||||
return None
|
@ -23,6 +23,7 @@ from magnum.common import clients
|
||||
from magnum.common import rpc
|
||||
from magnum.objects import cluster
|
||||
from magnum.objects import cluster_template
|
||||
from magnum.objects import fields
|
||||
from magnum.objects import nodegroup
|
||||
|
||||
|
||||
@ -184,4 +185,5 @@ def _get_nodegroup_object(context, cluster, node_count, is_master=False):
|
||||
ng.role = "worker"
|
||||
ng.name = "default-%s" % ng.role
|
||||
ng.is_default = True
|
||||
ng.status = fields.ClusterStatus.CREATE_IN_PROGRESS
|
||||
return ng
|
||||
|
@ -209,6 +209,21 @@ class Driver(object):
|
||||
raise NotImplementedError("Subclasses must implement "
|
||||
"'delete_federation'.")
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Subclasses must implement "
|
||||
"'create_nodegroup'.")
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Subclasses must implement "
|
||||
"'update_nodegroup'.")
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Subclasses must implement "
|
||||
"'delete_nodegroup'.")
|
||||
|
||||
def get_monitor(self, context, cluster):
|
||||
"""return the monitor with container data for this driver."""
|
||||
|
||||
|
@ -96,6 +96,15 @@ class HeatDriver(driver.Driver):
|
||||
def delete_federation(self, context, federation):
|
||||
return NotImplementedError("Must implement 'delete_federation'")
|
||||
|
||||
def create_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Must implement 'create_nodegroup'.")
|
||||
|
||||
def update_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Must implement 'update_nodegroup'.")
|
||||
|
||||
def delete_nodegroup(self, context, cluster, nodegroup):
|
||||
raise NotImplementedError("Must implement 'delete_nodegroup'.")
|
||||
|
||||
def update_cluster_status(self, context, cluster):
|
||||
if cluster.stack_id is None:
|
||||
# NOTE(mgoddard): During cluster creation it is possible to poll
|
||||
|
@ -89,30 +89,15 @@ class TestListCluster(api_base.FunctionalTest):
|
||||
self.assertEqual(cluster.uuid, response['uuid'])
|
||||
self._verify_attrs(self._expand_cluster_attrs, response)
|
||||
|
||||
@mock.patch('magnum.common.clients.OpenStackClients.heat')
|
||||
def test_get_one_failed_cluster(self, mock_heat):
|
||||
fake_resources = mock.MagicMock()
|
||||
fake_resources.resource_name = 'fake_name'
|
||||
fake_resources.resource_status_reason = 'fake_reason'
|
||||
|
||||
ht = mock.MagicMock()
|
||||
ht.resources.list.return_value = [fake_resources]
|
||||
mock_heat.return_value = ht
|
||||
|
||||
def test_get_one_failed_cluster(self):
|
||||
cluster = obj_utils.create_test_cluster(self.context,
|
||||
status='CREATE_FAILED')
|
||||
status='CREATE_FAILED',
|
||||
master_status='CREATE_FAILED',
|
||||
master_reason='fake_reason')
|
||||
response = self.get_json('/clusters/%s' % cluster['uuid'])
|
||||
expected_faults = {cluster.default_ng_master.name: 'fake_reason'}
|
||||
self.assertEqual(cluster.uuid, response['uuid'])
|
||||
self.assertEqual({'fake_name': 'fake_reason'}, response['faults'])
|
||||
|
||||
@mock.patch('magnum.common.clients.OpenStackClients.heat')
|
||||
def test_get_one_failed_cluster_heatclient_exception(self, mock_heat):
|
||||
mock_heat.resources.list.side_effect = Exception('fake')
|
||||
cluster = obj_utils.create_test_cluster(self.context,
|
||||
status='CREATE_FAILED')
|
||||
response = self.get_json('/clusters/%s' % cluster['uuid'])
|
||||
self.assertEqual(cluster.uuid, response['uuid'])
|
||||
self.assertEqual({}, response['faults'])
|
||||
self.assertEqual(expected_faults, response['faults'])
|
||||
|
||||
def test_get_one_by_name(self):
|
||||
cluster = obj_utils.create_test_cluster(self.context)
|
||||
|
@ -13,11 +13,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import mock
|
||||
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from magnum.api.controllers.v1 import nodegroup as api_nodegroup
|
||||
from magnum.conductor import api as rpcapi
|
||||
import magnum.conf
|
||||
from magnum import objects
|
||||
from magnum.tests import base
|
||||
@ -86,6 +89,13 @@ class TestListNodegroups(api_base.FunctionalTest):
|
||||
expected = [ng.uuid for ng in self.cluster.nodegroups]
|
||||
self._test_list_nodegroups(self.cluster.name, expected=expected)
|
||||
|
||||
def test_get_all_with_pagination_marker(self):
|
||||
ng_uuid = self.cluster.default_ng_master.uuid
|
||||
url = '/clusters/%s/nodegroups?limit=1&marker=1' % (self.cluster_uuid)
|
||||
response = self.get_json(url)
|
||||
self.assertEqual(1, len(response['nodegroups']))
|
||||
self.assertEqual(ng_uuid, response['nodegroups'][0]['uuid'])
|
||||
|
||||
def test_get_all_by_role(self):
|
||||
filters = {'role': 'master'}
|
||||
expected = [self.cluster.default_ng_master.uuid]
|
||||
@ -147,6 +157,403 @@ class TestListNodegroups(api_base.FunctionalTest):
|
||||
self._verify_attrs(self._expanded_attrs, response)
|
||||
|
||||
|
||||
class TestPost(api_base.FunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestPost, self).setUp()
|
||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||
self.context)
|
||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||
self.cluster.refresh()
|
||||
p = mock.patch.object(rpcapi.API, 'nodegroup_create_async')
|
||||
self.mock_ng_create = p.start()
|
||||
self.mock_ng_create.side_effect = self._simulate_nodegroup_create
|
||||
self.addCleanup(p.stop)
|
||||
self.url = "/clusters/%s/nodegroups" % self.cluster.uuid
|
||||
|
||||
def _simulate_nodegroup_create(self, cluster, nodegroup):
|
||||
nodegroup.create()
|
||||
return nodegroup
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data()
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
|
||||
self.assertFalse(response.json['is_default'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_without_node_count(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data()
|
||||
del ng_dict['node_count']
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
# Verify node_count defaults to 1
|
||||
self.assertEqual(1, response.json['node_count'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_with_max_node_count(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(max_node_count=5)
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual(5, response.json['max_node_count'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_with_role(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(role='test-role')
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual('test-role', response.json['role'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_with_labels(self, mock_utcnow):
|
||||
labels = {'label1': 'value1'}
|
||||
ng_dict = apiutils.nodegroup_post_data(labels=labels)
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual(labels, response.json['labels'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_with_image_id(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(image_id='test_image')
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual('test_image', response.json['image_id'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_with_flavor(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(flavor_id='test_flavor')
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual('test_flavor', response.json['flavor_id'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_only_name(self, mock_utcnow):
|
||||
ng_dict = {'name': 'test_ng'}
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_int)
|
||||
self.assertEqual('worker', response.json['role'])
|
||||
self.assertEqual(self.cluster_template.image_id,
|
||||
response.json['image_id'])
|
||||
self.assertEqual(self.cluster.flavor_id, response.json['flavor_id'])
|
||||
self.assertEqual(self.cluster.uuid, response.json['cluster_id'])
|
||||
self.assertEqual(self.cluster.project_id, response.json['project_id'])
|
||||
self.assertEqual(self.cluster.labels, response.json['labels'])
|
||||
self.assertEqual('worker', response.json['role'])
|
||||
self.assertEqual(1, response.json['min_node_count'])
|
||||
self.assertEqual(1, response.json['node_count'])
|
||||
self.assertIsNone(response.json['max_node_count'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_nodegroup_invalid_node_count(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(node_count=7, max_node_count=5)
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.post_json(self.url, ng_dict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_int)
|
||||
|
||||
ng_dict = apiutils.nodegroup_post_data(node_count=2, min_node_count=3)
|
||||
|
||||
response = self.post_json(self.url, ng_dict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_int)
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_master_ng(self, mock_utcnow):
|
||||
ng_dict = apiutils.nodegroup_post_data(role='master')
|
||||
response = self.post_json(self.url, ng_dict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_int)
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_create_ng_same_name(self, mock_utcnow):
|
||||
existing_name = self.cluster.default_ng_master.name
|
||||
ng_dict = apiutils.nodegroup_post_data(name=existing_name)
|
||||
response = self.post_json(self.url, ng_dict, expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_int)
|
||||
|
||||
|
||||
class TestDelete(api_base.FunctionalTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDelete, self).setUp()
|
||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||
self.context)
|
||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||
self.cluster.refresh()
|
||||
self.nodegroup = obj_utils.create_test_nodegroup(
|
||||
self.context, cluster_id=self.cluster.uuid, is_default=False)
|
||||
p = mock.patch.object(rpcapi.API, 'nodegroup_delete_async')
|
||||
self.mock_ng_delete = p.start()
|
||||
self.mock_ng_delete.side_effect = self._simulate_nodegroup_delete
|
||||
self.addCleanup(p.stop)
|
||||
self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid
|
||||
|
||||
def _simulate_nodegroup_delete(self, cluster, nodegroup):
|
||||
nodegroup.destroy()
|
||||
|
||||
def test_delete_nodegroup(self):
|
||||
response = self.delete(self.url + self.nodegroup.uuid)
|
||||
self.assertEqual(204, response.status_int)
|
||||
response = self.get_json(self.url + self.nodegroup.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_delete_nodegroup_by_name(self):
|
||||
response = self.delete(self.url + self.nodegroup.name)
|
||||
self.assertEqual(204, response.status_int)
|
||||
response = self.get_json(self.url + self.nodegroup.name,
|
||||
expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_delete_not_found(self):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
response = self.delete(self.url + uuid, expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_delete_by_name_not_found(self):
|
||||
response = self.delete(self.url + "not-there", expect_errors=True)
|
||||
self.assertEqual(404, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_delete_default_nodegroup(self):
|
||||
response = self.delete(self.url + self.cluster.default_ng_master.uuid,
|
||||
expect_errors=True)
|
||||
self.assertEqual(400, response.status_int)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
@mock.patch("magnum.common.policy.enforce")
|
||||
@mock.patch("magnum.common.context.make_context")
|
||||
def test_delete_nodegroup_as_admin(self, mock_context, mock_policy):
|
||||
cluster_uuid = uuidutils.generate_uuid()
|
||||
obj_utils.create_test_cluster(self.context, uuid=cluster_uuid,
|
||||
project_id='fake', name='test-fake')
|
||||
ng_uuid = uuidutils.generate_uuid()
|
||||
obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid,
|
||||
cluster_id=cluster_uuid,
|
||||
is_default=False,
|
||||
project_id='fake', id=50)
|
||||
self.context.is_admin = True
|
||||
url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid)
|
||||
response = self.delete(url)
|
||||
self.assertEqual(204, response.status_int)
|
||||
|
||||
|
||||
class TestPatch(api_base.FunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestPatch, self).setUp()
|
||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||
self.context)
|
||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||
self.cluster.refresh()
|
||||
self.nodegroup = obj_utils.create_test_nodegroup(
|
||||
self.context, cluster_id=self.cluster.uuid, is_default=False,
|
||||
min_node_count=2, max_node_count=5, node_count=2)
|
||||
p = mock.patch.object(rpcapi.API, 'nodegroup_update_async')
|
||||
self.mock_ng_update = p.start()
|
||||
self.mock_ng_update.side_effect = self._simulate_nodegroup_update
|
||||
self.addCleanup(p.stop)
|
||||
self.url = "/clusters/%s/nodegroups/" % self.cluster.uuid
|
||||
|
||||
def _simulate_nodegroup_update(self, cluster, nodegroup):
|
||||
nodegroup.save()
|
||||
return nodegroup
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_replace_ok(self, mock_utcnow):
|
||||
max_node_count = 4
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.uuid,
|
||||
[{'path': '/max_node_count',
|
||||
'value': max_node_count,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_code)
|
||||
|
||||
response = self.get_json(self.url + self.nodegroup.uuid)
|
||||
self.assertEqual(max_node_count, response['max_node_count'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
response['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_replace_ok_by_name(self, mock_utcnow):
|
||||
max_node_count = 4
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/max_node_count',
|
||||
'value': max_node_count,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_code)
|
||||
|
||||
response = self.get_json(self.url + self.nodegroup.uuid)
|
||||
self.assertEqual(max_node_count, response['max_node_count'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
response['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
def test_replace_node_count_failed(self):
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/node_count',
|
||||
'value': 3,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_replace_max_node_count_failed(self):
|
||||
# min_node_count equals to 2. Verify that if the max_node_count
|
||||
# is less than the min the patch fails
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/max_node_count',
|
||||
'value': 1,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_code)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
def test_replace_min_node_count_failed(self):
|
||||
# min_node_count equals to 2. Verify that if the max_node_count
|
||||
# is less than the min the patch fails
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/min_node_count',
|
||||
'value': 3,
|
||||
'op': 'replace'}],
|
||||
expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(409, response.status_code)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_remove_ok(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/max_node_count',
|
||||
'op': 'remove'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_code)
|
||||
|
||||
response = self.get_json(self.url + self.nodegroup.uuid)
|
||||
self.assertIsNone(response['max_node_count'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
response['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_remove_min_node_count(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/min_node_count',
|
||||
'op': 'remove'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_code)
|
||||
|
||||
response = self.get_json(self.url + self.nodegroup.uuid)
|
||||
# Removing the min_node_count just restores the default value
|
||||
self.assertEqual(1, response['min_node_count'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
response['updated_at']).replace(tzinfo=None)
|
||||
self.assertEqual(test_time, return_updated_at)
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_remove_internal_attr(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/node_count',
|
||||
'op': 'remove'}], expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_remove_non_existent_property(self, mock_utcnow):
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
mock_utcnow.return_value = test_time
|
||||
|
||||
response = self.patch_json(self.url + self.nodegroup.name,
|
||||
[{'path': '/not_there',
|
||||
'op': 'remove'}], expect_errors=True)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(400, response.status_code)
|
||||
self.assertIsNotNone(response.json['errors'])
|
||||
|
||||
@mock.patch("magnum.common.policy.enforce")
|
||||
@mock.patch("magnum.common.context.make_context")
|
||||
def test_update_nodegroup_as_admin(self, mock_context, mock_policy):
|
||||
cluster_uuid = uuidutils.generate_uuid()
|
||||
obj_utils.create_test_cluster(self.context, uuid=cluster_uuid,
|
||||
project_id='fake', name='test-fake')
|
||||
ng_uuid = uuidutils.generate_uuid()
|
||||
obj_utils.create_test_nodegroup(self.context, uuid=ng_uuid,
|
||||
cluster_id=cluster_uuid,
|
||||
is_default=False,
|
||||
project_id='fake', id=50)
|
||||
self.context.is_admin = True
|
||||
url = '/clusters/%s/nodegroups/%s' % (cluster_uuid, ng_uuid)
|
||||
response = self.patch_json(url,
|
||||
[{'path': '/max_node_count',
|
||||
'value': 4,
|
||||
'op': 'replace'}])
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(202, response.status_code)
|
||||
|
||||
|
||||
class TestNodeGroupPolicyEnforcement(api_base.FunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestNodeGroupPolicyEnforcement, self).setUp()
|
||||
|
@ -98,5 +98,8 @@ def federation_post_data(**kw):
|
||||
|
||||
|
||||
def nodegroup_post_data(**kw):
|
||||
internal = ['/cluster_id', '/project_id', '/node_addresses', '/is_default',
|
||||
'/created_at', '/updated_at', '/status', '/status_reason',
|
||||
'/version', '/stack_id']
|
||||
nodegroup = utils.get_test_nodegroup(**kw)
|
||||
return nodegroup
|
||||
return remove_internal(nodegroup, internal)
|
||||
|
218
magnum/tests/unit/conductor/handlers/test_nodegroup_conductor.py
Normal file
218
magnum/tests/unit/conductor/handlers/test_nodegroup_conductor.py
Normal file
@ -0,0 +1,218 @@
|
||||
# Copyright (c) 2018 European Organization for Nuclear Research.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from mock import patch
|
||||
|
||||
from heatclient import exc
|
||||
|
||||
from magnum.common import exception
|
||||
from magnum.conductor.handlers import nodegroup_conductor
|
||||
from magnum.objects import fields
|
||||
from magnum.tests.unit.db import base as db_base
|
||||
from magnum.tests.unit.objects import utils as obj_utils
|
||||
|
||||
|
||||
class TestHandler(db_base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestHandler, self).setUp()
|
||||
self.handler = nodegroup_conductor.Handler()
|
||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||
self.nodegroup = obj_utils.create_test_nodegroup(
|
||||
self.context, cluster_id=self.cluster.uuid)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_create(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
nodegroup = mock.MagicMock()
|
||||
self.handler.nodegroup_create(self.context, self.cluster, nodegroup)
|
||||
mock_driver.create_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
nodegroup)
|
||||
nodegroup.create.assert_called_once()
|
||||
nodegroup.save.assert_called_once()
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.CREATE_IN_PROGRESS,
|
||||
nodegroup.status)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_create_failed(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
side_effect = NotImplementedError("Test failure")
|
||||
mock_driver.create_nodegroup.side_effect = side_effect
|
||||
nodegroup = mock.MagicMock()
|
||||
self.assertRaises(NotImplementedError, self.handler.nodegroup_create,
|
||||
self.context, self.cluster, nodegroup)
|
||||
mock_driver.create_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
nodegroup)
|
||||
nodegroup.create.assert_called_once()
|
||||
nodegroup.save.assert_called_once()
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.CREATE_FAILED,
|
||||
nodegroup.status)
|
||||
self.assertEqual("Test failure", nodegroup.status_reason)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_create_failed_bad_request(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
side_effect = exc.HTTPBadRequest("Bad request")
|
||||
mock_driver.create_nodegroup.side_effect = side_effect
|
||||
nodegroup = mock.MagicMock()
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.handler.nodegroup_create,
|
||||
self.context, self.cluster, nodegroup)
|
||||
mock_driver.create_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
nodegroup)
|
||||
nodegroup.create.assert_called_once()
|
||||
nodegroup.save.assert_called_once()
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.CREATE_FAILED,
|
||||
nodegroup.status)
|
||||
self.assertEqual("ERROR: Bad request", nodegroup.status_reason)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_udpate(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
self.handler.nodegroup_update(self.context, self.cluster,
|
||||
self.nodegroup)
|
||||
mock_driver.update_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
self.nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.nodegroup.status)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_update_failed(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
side_effect = NotImplementedError("Update failed")
|
||||
mock_driver.update_nodegroup.side_effect = side_effect
|
||||
self.assertRaises(NotImplementedError, self.handler.nodegroup_update,
|
||||
self.context, self.cluster, self.nodegroup)
|
||||
mock_driver.update_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
self.nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.nodegroup.status)
|
||||
self.assertEqual("Update failed", self.nodegroup.status_reason)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_update_failed_bad_request(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
side_effect = exc.HTTPBadRequest("Bad request")
|
||||
mock_driver.update_nodegroup.side_effect = side_effect
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.handler.nodegroup_update,
|
||||
self.context, self.cluster, self.nodegroup)
|
||||
mock_driver.update_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
self.nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_FAILED,
|
||||
self.nodegroup.status)
|
||||
self.assertEqual("ERROR: Bad request", self.nodegroup.status_reason)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_delete(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
self.handler.nodegroup_delete(self.context, self.cluster,
|
||||
self.nodegroup)
|
||||
mock_driver.delete_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
self.nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.DELETE_IN_PROGRESS,
|
||||
self.nodegroup.status)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_delete_stack_not_found(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
nodegroup = mock.MagicMock()
|
||||
mock_driver.delete_nodegroup.side_effect = exc.HTTPNotFound()
|
||||
self.handler.nodegroup_delete(self.context, self.cluster, nodegroup)
|
||||
mock_driver.delete_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
nodegroup.destroy.assert_called_once()
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_delete_stack_and_ng_not_found(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
nodegroup = mock.MagicMock()
|
||||
mock_driver.delete_nodegroup.side_effect = exc.HTTPNotFound()
|
||||
nodegroup.destroy.side_effect = exception.NodeGroupNotFound()
|
||||
self.handler.nodegroup_delete(self.context, self.cluster, nodegroup)
|
||||
mock_driver.delete_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
nodegroup.destroy.assert_called_once()
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_delete_stack_operation_ongoing(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
mock_driver.delete_nodegroup.side_effect = exc.HTTPConflict()
|
||||
self.assertRaises(exception.NgOperationInProgress,
|
||||
self.handler.nodegroup_delete,
|
||||
self.context, self.cluster, self.nodegroup)
|
||||
mock_driver.delete_nodegroup.assert_called_once_with(self.context,
|
||||
self.cluster,
|
||||
self.nodegroup)
|
||||
self.assertEqual(fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||
self.cluster.status)
|
||||
self.assertEqual(fields.ClusterStatus.DELETE_IN_PROGRESS,
|
||||
self.nodegroup.status)
|
||||
|
||||
@patch('magnum.drivers.common.driver.Driver.get_driver')
|
||||
def test_nodegroup_delete_failed(self, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_get_driver.return_value = mock_driver
|
||||
side_effect = NotImplementedError("Delete failed")
|
||||
mock_driver.dele |