Rename Bay to Cluster in api

This is the first of several patches to add new Cluster commands
that will replace the Bay terminalogy in Magnum. This patch adds
the new Cluster and ClusterTemplate commands in addition to the
Bay and Baymodel commands.  Additional patches will be created
for client, docs, and additional functional tests.

Change-Id: Ie686281a6f98a1a9931158d2a79eee6ac21ed9a1
Implements: blueprint rename-bay-to-cluster
This commit is contained in:
Jaycen Grant 2016-08-10 13:29:31 -07:00
parent 570173d999
commit eaddb942fd
34 changed files with 4352 additions and 155 deletions

View File

@ -20,6 +20,21 @@
"baymodel:update": "rule:default", "baymodel:update": "rule:default",
"baymodel:publish": "rule:admin_or_owner", "baymodel:publish": "rule:admin_or_owner",
"cluster:create": "rule:default",
"cluster:delete": "rule:default",
"cluster:detail": "rule:default",
"cluster:get": "rule:default",
"cluster:get_all": "rule:default",
"cluster:update": "rule:default",
"clustertemplate:create": "rule:default",
"clustertemplate:delete": "rule:default",
"clustertemplate:detail": "rule:default",
"clustertemplate:get": "rule:default",
"clustertemplate:get_all": "rule:default",
"clustertemplate:update": "rule:default",
"clustertemplate:publish": "rule:admin_or_owner",
"rc:create": "rule:default", "rc:create": "rule:default",
"rc:delete": "rule:default", "rc:delete": "rule:default",
"rc:detail": "rule:default", "rc:detail": "rule:default",

View File

@ -69,7 +69,7 @@ class Root(base.APIBase):
root = Root() root = Root()
root.name = "OpenStack Magnum API" root.name = "OpenStack Magnum API"
root.description = ("Magnum is an OpenStack project which aims to " root.description = ("Magnum is an OpenStack project which aims to "
"provide container management.") "provide container cluster management.")
root.versions = [Version.convert('v1', "CURRENT", root.versions = [Version.convert('v1', "CURRENT",
versions.CURRENT_MAX_VER, versions.CURRENT_MAX_VER,
versions.BASE_VER)] versions.BASE_VER)]

View File

@ -27,6 +27,8 @@ from magnum.api.controllers import link
from magnum.api.controllers.v1 import bay from magnum.api.controllers.v1 import bay
from magnum.api.controllers.v1 import baymodel from magnum.api.controllers.v1 import baymodel
from magnum.api.controllers.v1 import certificate from magnum.api.controllers.v1 import certificate
from magnum.api.controllers.v1 import cluster
from magnum.api.controllers.v1 import cluster_template
from magnum.api.controllers.v1 import magnum_services from magnum.api.controllers.v1 import magnum_services
from magnum.api.controllers import versions as ver from magnum.api.controllers import versions as ver
from magnum.api import expose from magnum.api import expose
@ -77,6 +79,12 @@ class V1(controllers_base.APIBase):
bays = [link.Link] bays = [link.Link]
"""Links to the bays resource""" """Links to the bays resource"""
clustertemplates = [link.Link]
"""Links to the clustertemplates resource"""
clusters = [link.Link]
"""Links to the clusters resource"""
certificates = [link.Link] certificates = [link.Link]
"""Links to the certificates resource""" """Links to the certificates resource"""
@ -108,6 +116,19 @@ class V1(controllers_base.APIBase):
pecan.request.host_url, pecan.request.host_url,
'bays', '', 'bays', '',
bookmark=True)] bookmark=True)]
v1.clustertemplates = [link.Link.make_link('self',
pecan.request.host_url,
'clustertemplates', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'clustertemplates', '',
bookmark=True)]
v1.clusters = [link.Link.make_link('self', pecan.request.host_url,
'clusters', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'clusters', '',
bookmark=True)]
v1.certificates = [link.Link.make_link('self', pecan.request.host_url, v1.certificates = [link.Link.make_link('self', pecan.request.host_url,
'certificates', ''), 'certificates', ''),
link.Link.make_link('bookmark', link.Link.make_link('bookmark',
@ -128,6 +149,8 @@ class Controller(controllers_base.Controller):
bays = bay.BaysController() bays = bay.BaysController()
baymodels = baymodel.BayModelsController() baymodels = baymodel.BayModelsController()
clusters = cluster.ClustersController()
clustertemplates = cluster_template.ClusterTemplatesController()
certificates = certificate.CertificateController() certificates = certificate.CertificateController()
mservices = magnum_services.MagnumServiceController() mservices = magnum_services.MagnumServiceController()

View File

@ -80,7 +80,7 @@ class Bay(base.APIBase):
try: try:
baymodel = api_utils.get_resource('BayModel', value) baymodel = api_utils.get_resource('BayModel', value)
self._baymodel_id = baymodel.uuid self._baymodel_id = baymodel.uuid
except exception.BayModelNotFound as e: except exception.ClusterTemplateNotFound as e:
# Change error code because 404 (NotFound) is inappropriate # Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Bay # response for a POST request to create a Bay
e.code = 400 # BadRequest e.code = 400 # BadRequest

View File

@ -343,7 +343,7 @@ class BayModelsController(base.Controller):
if baymodel_dict['public']: if baymodel_dict['public']:
if not policy.enforce(context, "baymodel:publish", None, if not policy.enforce(context, "baymodel:publish", None,
do_raise=False): do_raise=False):
raise exception.BaymodelPublishDenied() raise exception.ClusterTemplatePublishDenied()
# NOTE(yuywz): We will generate a random human-readable name for # NOTE(yuywz): We will generate a random human-readable name for
# baymodel if the name is not spcified by user. # baymodel if the name is not spcified by user.
@ -386,7 +386,7 @@ class BayModelsController(base.Controller):
if baymodel.public != new_baymodel.public: if baymodel.public != new_baymodel.public:
if not policy.enforce(context, "baymodel:publish", None, if not policy.enforce(context, "baymodel:publish", None,
do_raise=False): do_raise=False):
raise exception.BaymodelPublishDenied() raise exception.ClusterTemplatePublishDenied()
# Update only the fields that have changed # Update only the fields that have changed
for field in objects.BayModel.fields: for field in objects.BayModel.fields:

View File

@ -48,7 +48,7 @@ class Certificate(base.APIBase):
try: try:
self._bay = api_utils.get_resource('Bay', value) self._bay = api_utils.get_resource('Bay', value)
self._bay_uuid = self._bay.uuid self._bay_uuid = self._bay.uuid
except exception.BayNotFound as e: except exception.ClusterNotFound as e:
# Change error code because 404 (NotFound) is inappropriate # Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Bay # response for a POST request to create a Bay
e.code = 400 # BadRequest e.code = 400 # BadRequest

View File

@ -0,0 +1,468 @@
# Copyright 2013 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_log import log as logging
from oslo_utils import timeutils
import pecan
import wsme
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api import expose
from magnum.api import utils as api_utils
from magnum.api.validation import validate_bay_properties
from magnum.common import clients
from magnum.common import exception
from magnum.common import name_generator
from magnum.common import policy
from magnum.i18n import _LW
from magnum import objects
from magnum.objects import fields
LOG = logging.getLogger(__name__)
class ClusterPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return ['/cluster_template_id']
@staticmethod
def internal_attrs():
internal_attrs = ['/api_address', '/node_addresses',
'/master_addresses', '/stack_id',
'/ca_cert_ref', '/magnum_cert_ref',
'/trust_id', '/trustee_user_name',
'/trustee_password', '/trustee_user_id']
return types.JsonPatchType.internal_attrs() + internal_attrs
class ClusterID(wtypes.Base):
"""API representation of a cluster ID
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a cluster
ID.
"""
uuid = types.uuid
"""Unique UUID for this cluster"""
def __init__(self, uuid):
self.uuid = uuid
class Cluster(base.APIBase):
"""API representation of a cluster.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a bay.
"""
_cluster_template_id = None
def _get_cluster_template_id(self):
return self._cluster_template_id
def _set_cluster_template_id(self, value):
if value and self._cluster_template_id != value:
try:
cluster_template = api_utils.get_resource('BayModel', value)
self._cluster_template_id = cluster_template.uuid
except exception.ClusterTemplateNotFound as e:
# Change error code because 404 (NotFound) is inappropriate
# response for a POST request to create a Bay
e.code = 400 # BadRequest
raise
elif value == wtypes.Unset:
self._cluster_template_id = wtypes.Unset
uuid = types.uuid
"""Unique UUID for this cluster"""
name = wtypes.StringType(min_length=1, max_length=242,
pattern='^[a-zA-Z][a-zA-Z0-9_.-]*$')
"""Name of this cluster, max length is limited to 242 because of heat
stack requires max length limit to 255, and Magnum amend a uuid length"""
cluster_template_id = wsme.wsproperty(wtypes.text,
_get_cluster_template_id,
_set_cluster_template_id,
mandatory=True)
"""The cluster_template UUID"""
node_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
"""The node count for this cluster. Default to 1 if not set"""
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
"""The number of master nodes for this cluster. Default to 1 if not set"""
create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60)
"""Timeout for creating the cluster in minutes. Default to 60 if not set"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated cluster links"""
stack_id = wsme.wsattr(wtypes.text, readonly=True)
"""Stack id of the heat stack"""
status = wtypes.Enum(str, *fields.BayStatus.ALL)
"""Status of the cluster from the heat stack"""
status_reason = wtypes.text
"""Status reason of the cluster from the heat stack"""
discovery_url = wtypes.text
"""Url used for cluster node discovery"""
api_address = wsme.wsattr(wtypes.text, readonly=True)
"""Api address of cluster master node"""
node_addresses = wsme.wsattr([wtypes.text], readonly=True)
"""IP addresses of cluster agent nodes"""
master_addresses = wsme.wsattr([wtypes.text], readonly=True)
"""IP addresses of cluster master nodes"""
faults = wsme.wsattr(wtypes.DictType(str, wtypes.text))
"""Fault info collected from the heat resources of this cluster"""
def __init__(self, **kwargs):
super(Cluster, self).__init__()
self.fields = []
for field in objects.Bay.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
# Set the renamed attributes for clusters
self.fields.append('cluster_template_id')
if 'cluster_template_id' in kwargs.keys():
setattr(self, 'cluster_template_id',
kwargs.get('cluster_template_id', wtypes.Unset))
else:
setattr(self, 'cluster_template_id', kwargs.get('baymodel_id',
wtypes.Unset))
self.fields.append('create_timeout')
if 'create_timeout' in kwargs.keys():
setattr(self, 'create_timeout', kwargs.get('create_timeout',
wtypes.Unset))
else:
setattr(self, 'create_timeout', kwargs.get('bay_create_timeout',
wtypes.Unset))
self.fields.append('faults')
if 'faults' in kwargs.keys():
setattr(self, 'faults', kwargs.get('faults', wtypes.Unset))
else:
setattr(self, 'faults', kwargs.get('bay_faults', wtypes.Unset))
@staticmethod
def _convert_with_links(cluster, url, expand=True):
if not expand:
cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id',
'node_count', 'status',
'create_timeout', 'master_count',
'stack_id'])
cluster.links = [link.Link.make_link('self', url,
'bays', cluster.uuid),
link.Link.make_link('bookmark', url,
'bays', cluster.uuid,
bookmark=True)]
return cluster
@classmethod
def convert_with_links(cls, rpc_bay, expand=True):
cluster = Cluster(**rpc_bay.as_dict())
return cls._convert_with_links(cluster, pecan.request.host_url, expand)
@classmethod
def sample(cls, expand=True):
temp_id = '4a96ac4b-2447-43f1-8ca6-9fd6f36d146d'
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
cluster_template_id=temp_id,
node_count=2,
master_count=1,
create_timeout=15,
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
status=fields.BayStatus.CREATE_COMPLETE,
status_reason="CREATE completed successfully",
api_address='172.24.4.3',
node_addresses=['172.24.4.4', '172.24.4.5'],
created_at=timeutils.utcnow(),
updated_at=timeutils.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
def as_dict(self):
"""Render this object as a dict of its fields."""
# Override this for updated cluster values
d = super(Cluster, self).as_dict()
if 'cluster_template_id' in d.keys():
d['baymodel_id'] = d['cluster_template_id']
del d['cluster_template_id']
if 'create_timeout' in d.keys():
d['bay_create_timeout'] = d['create_timeout']
del d['create_timeout']
if 'faults' in d.keys():
d['bay_faults'] = d['faults']
del d['faults']
return d
class ClusterCollection(collection.Collection):
"""API representation of a collection of clusters."""
clusters = [Cluster]
"""A list containing cluster objects"""
def __init__(self, **kwargs):
self._type = 'clusters'
@staticmethod
def convert_with_links(rpc_bays, limit, url=None, expand=False, **kwargs):
collection = ClusterCollection()
collection.clusters = [Cluster.convert_with_links(p, expand)
for p in rpc_bays]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.clusters = [Cluster.sample(expand=False)]
return sample
class ClustersController(base.Controller):
"""REST controller for Clusters."""
def __init__(self):
super(ClustersController, self).__init__()
_custom_actions = {
'detail': ['GET'],
}
def _generate_name_for_cluster(self, context):
"""Generate a random name like: zeta-22-bay."""
name_gen = name_generator.NameGenerator()
name = name_gen.generate()
return name + '-cluster'
def _get_clusters_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
marker)
clusters = objects.Bay.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return ClusterCollection.convert_with_links(clusters, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@expose.expose(ClusterCollection, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of clusters.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'cluster:get_all',
action='cluster:get_all')
return self._get_clusters_collection(marker, limit, sort_key,
sort_dir)
@expose.expose(ClusterCollection, types.uuid, int, wtypes.text,
wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of clusters with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'cluster:detail',
action='cluster:detail')
# NOTE(lucasagomes): /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "clusters":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['clusters', 'detail'])
return self._get_clusters_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
def _collect_fault_info(self, context, cluster):
"""Collect fault info from heat resources of given cluster
and store them into cluster.faults.
"""
osc = clients.OpenStackClients(context)
filters = {'status': 'FAILED'}
try:
failed_resources = osc.heat().resources.list(
cluster.stack_id, nested_depth=2, filters=filters)
except Exception as e:
failed_resources = []
LOG.warning(_LW("Failed to retrieve failed resources for "
"cluster %(cluster)s from Heat stack "
"%(stack)s due to error: %(e)s"),
{'cluster': cluster.uuid,
'stack': cluster.stack_id, 'e': e},
exc_info=True)
return {res.resource_name: res.resource_status_reason
for res in failed_resources}
@expose.expose(Cluster, types.uuid_or_name)
def get_one(self, bay_ident):
"""Retrieve information about the given bay.
:param bay_ident: UUID of a bay or logical name of the bay.
"""
context = pecan.request.context
cluster = api_utils.get_resource('Bay', bay_ident)
policy.enforce(context, 'cluster:get', cluster,
action='cluster:get')
cluster = Cluster.convert_with_links(cluster)
if cluster.status in fields.BayStatus.STATUS_FAILED:
cluster.faults = self._collect_fault_info(context, cluster)
return cluster
@expose.expose(ClusterID, body=Cluster, status_code=202)
def post(self, cluster):
"""Create a new cluster.
:param cluster: a cluster within the request body.
"""
context = pecan.request.context
policy.enforce(context, 'cluster:create',
action='cluster:create')
temp_id = cluster.cluster_template_id
cluster_template = objects.BayModel.get_by_uuid(context, temp_id)
cluster_dict = cluster.as_dict()
attr_validator.validate_os_resources(context,
cluster_template.as_dict())
attr_validator.validate_master_count(cluster_dict,
cluster_template.as_dict())
cluster_dict['project_id'] = context.project_id
cluster_dict['user_id'] = context.user_id
# NOTE(yuywz): We will generate a random human-readable name for
# cluster if the name is not specified by user.
name = cluster_dict.get('name') or \
self._generate_name_for_cluster(context)
cluster_dict['name'] = name
new_cluster = objects.Bay(context, **cluster_dict)
new_cluster.uuid = uuid.uuid4()
pecan.request.rpcapi.bay_create_async(new_cluster,
cluster.create_timeout)
return ClusterID(new_cluster.uuid)
@wsme.validate(types.uuid, [ClusterPatchType])
@expose.expose(ClusterID, types.uuid_or_name, body=[ClusterPatchType],
status_code=202)
def patch(self, cluster_ident, patch):
"""Update an existing bay.
:param cluster_ident: UUID or logical name of a bay.
:param patch: a json PATCH document to apply to this bay.
"""
context = pecan.request.context
cluster = api_utils.get_resource('Bay', cluster_ident)
policy.enforce(context, 'cluster:update', cluster,
action='cluster:update')
try:
cluster_dict = cluster.as_dict()
new_cluster = Cluster(**api_utils.apply_jsonpatch(cluster_dict,
patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.Bay.fields:
try:
patch_val = getattr(new_cluster, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if cluster[field] != patch_val:
cluster[field] = patch_val
delta = cluster.obj_what_changed()
validate_bay_properties(delta)
pecan.request.rpcapi.bay_update_async(cluster)
return ClusterID(cluster.uuid)
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, cluster_ident):
"""Delete a cluster.
:param cluster_ident: UUID of cluster or logical name of the cluster.
"""
context = pecan.request.context
cluster = api_utils.get_resource('Bay', cluster_ident)
policy.enforce(context, 'cluster:delete', cluster,
action='cluster:delete')
pecan.request.rpcapi.bay_delete_async(cluster.uuid)

View File

@ -0,0 +1,434 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import pecan
import wsme
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api import expose
from magnum.api import utils as api_utils
from magnum.api import validation
from magnum.common import clients
from magnum.common import exception
from magnum.common import name_generator
from magnum.common import policy
from magnum import objects
from magnum.objects import fields
class ClusterTemplatePatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return ['/image_id', '/keypair_id', '/external_network_id', '/coe',
'/tls_disabled', '/public', '/registry_enabled',
'/server_type', '/cluster_distro', '/network_driver']
class ClusterTemplate(base.APIBase):
"""API representation of a clustertemplate.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
a clustertemplate.
"""
uuid = types.uuid
"""Unique UUID for this clustertemplate"""
name = wtypes.StringType(min_length=1, max_length=255)
"""The name of the clustertemplate"""
coe = wtypes.Enum(str, *fields.BayType.ALL, mandatory=True)
"""The Container Orchestration Engine for this clustertemplate"""
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The image name or UUID to use as an image for this clustertemplate"""
flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of this clustertemplate"""
master_flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of the master node for this clustertemplate"""
dns_nameserver = wtypes.IPv4AddressType()
"""The DNS nameserver address"""
keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The name or id of the nova ssh keypair"""
external_network_id = wtypes.StringType(min_length=1, max_length=255)
"""The external network to attach the cluster"""
fixed_network = wtypes.StringType(min_length=1, max_length=255)
"""The fixed network name to attach the cluster"""
fixed_subnet = wtypes.StringType(min_length=1, max_length=255)
"""The fixed subnet name to attach the cluster"""
network_driver = wtypes.StringType(min_length=1, max_length=255)
"""The name of the driver used for instantiating container networks"""
apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535)
"""The API server port for k8s"""
docker_volume_size = wtypes.IntegerType(minimum=1)
"""The size in GB of the docker volume"""
cluster_distro = wtypes.StringType(min_length=1, max_length=255)
"""The Cluster distro for the cluster, ex - coreos, fedora-atomic."""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated clustertemplate links"""
http_proxy = wtypes.StringType(min_length=1, max_length=255)
"""Address of a proxy that will receive all HTTP requests and relay them.
The format is a URL including a port number.
"""
https_proxy = wtypes.StringType(min_length=1, max_length=255)
"""Address of a proxy that will receive all HTTPS requests and relay them.
The format is a URL including a port number.
"""
no_proxy = wtypes.StringType(min_length=1, max_length=255)
"""A comma separated list of ips for which proxies should not
used in the cluster
"""
volume_driver = wtypes.StringType(min_length=1, max_length=255)
"""The name of the driver used for instantiating container volume driver"""
registry_enabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the docker registry is enabled"""
labels = wtypes.DictType(str, str)
"""One or more key/value pairs"""
tls_disabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the TLS should be disabled"""
public = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the clustertemplate is public or not."""
server_type = wsme.wsattr(wtypes.StringType(min_length=1,
max_length=255),
default='vm')
"""Server type for this clustertemplate """
insecure_registry = wtypes.StringType(min_length=1, max_length=255)
"""insecure registry url when create clustertemplate """
docker_storage_driver = wtypes.Enum(str, *fields.DockerStorageDriver.ALL)
"""Docker storage driver"""
master_lb_enabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether created bays should have a load balancer for master
nodes or not.
"""
floating_ip_enabled = wsme.wsattr(types.boolean, default=True)
"""Indicates whether created bays should have a floating ip or not."""
def __init__(self, **kwargs):
self.fields = []
for field in objects.BayModel.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(cluster_template, url):
cluster_template.links = [link.Link.make_link('self', url,
'clustertemplates',
cluster_template.uuid),
link.Link.make_link('bookmark', url,
'clustertemplates',
cluster_template.uuid,
bookmark=True)]
return cluster_template
@classmethod
def convert_with_links(cls, rpc_baymodel):
cluster_template = ClusterTemplate(**rpc_baymodel.as_dict())
return cls._convert_with_links(cluster_template,
pecan.request.host_url)
@classmethod
def sample(cls):
sample = cls(
uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
image_id='Fedora-k8s',
flavor_id='m1.small',
master_flavor_id='m1.small',
dns_nameserver='8.8.1.1',
keypair_id='keypair1',
external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba',
fixed_network='private',
fixed_subnet='private-subnet',
network_driver='libnetwork',
volume_driver='cinder',
apiserver_port=8080,
docker_volume_size=25,
docker_storage_driver='devicemapper',
cluster_distro='fedora-atomic',
coe=fields.BayType.KUBERNETES,
http_proxy='http://proxy.com:123',
https_proxy='https://proxy.com:123',
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
labels={'key1': 'val1', 'key2': 'val2'},
server_type='vm',
insecure_registry='10.238.100.100:5000',
created_at=timeutils.utcnow(),
updated_at=timeutils.utcnow(),
public=False,
master_lb_enabled=False,
floating_ip_enabled=True)
return cls._convert_with_links(sample, 'http://localhost:9511')
class ClusterTemplateCollection(collection.Collection):
"""API representation of a collection of clustertemplates."""
clustertemplates = [ClusterTemplate]
"""A list containing clustertemplates objects"""
def __init__(self, **kwargs):
self._type = 'clustertemplates'
@staticmethod
def convert_with_links(rpc_baymodels, limit, url=None, **kwargs):
collection = ClusterTemplateCollection()
collection.clustertemplates = [ClusterTemplate.convert_with_links(p)
for p in rpc_baymodels]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.clustertemplates = [ClusterTemplate.sample()]
return sample
class ClusterTemplatesController(base.Controller):
"""REST controller for ClusterTemplates."""
_custom_actions = {
'detail': ['GET'],
}
def _generate_name_for_cluster_template(self, context):
"""Generate a random name like: zeta-22-model."""
name_gen = name_generator.NameGenerator()
name = name_gen.generate()
return name + '-template'
def _get_cluster_templates_collection(self, marker, limit,
sort_key, sort_dir,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.BayModel.get_by_uuid(pecan.request.context,
marker)
cluster_templates = objects.BayModel.list(pecan.request.context, limit,
marker_obj,
sort_key=sort_key,
sort_dir=sort_dir)
return ClusterTemplateCollection.convert_with_links(cluster_templates,
limit,
url=resource_url,
sort_key=sort_key,
sort_dir=sort_dir)
@expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text,
wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of baymodels.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'clustertemplate:get_all',
action='clustertemplate:get_all')
return self._get_cluster_templates_collection(marker, limit, sort_key,
sort_dir)
@expose.expose(ClusterTemplateCollection, types.uuid, int, wtypes.text,
wtypes.text)
def detail(self, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of clustertemplates with detail.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'clustertemplate:detail',
action='clustertemplate:detail')
# NOTE(lucasagomes): /detail should only work against collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "clustertemplates":
raise exception.HTTPNotFound
resource_url = '/'.join(['clustertemplates', 'detail'])
return self._get_cluster_templates_collection(marker, limit,
sort_key, sort_dir,
resource_url)
@expose.expose(ClusterTemplate, types.uuid_or_name)
def get_one(self, cluster_template_ident):
"""Retrieve information about the given clustertemplate.
:param cluster_template_ident: UUID or logical name of a
clustertemplate.
"""
context = pecan.request.context
cluster_template = api_utils.get_resource('BayModel',
cluster_template_ident)
if not cluster_template.public:
policy.enforce(context, 'clustertemplate:get', cluster_template,
action='clustertemplate:get')
return ClusterTemplate.convert_with_links(cluster_template)
@expose.expose(ClusterTemplate, body=ClusterTemplate, status_code=201)
@validation.enforce_network_driver_types_create()
@validation.enforce_volume_driver_types_create()
@validation.enforce_volume_storage_size_create()
def post(self, cluster_template):
"""Create a new cluster_template.
:param cluster_template: a cluster_template within the request body.
"""
context = pecan.request.context
policy.enforce(context, 'clustertemplate:create',
action='clustertemplate:create')
cluster_template_dict = cluster_template.as_dict()
cli = clients.OpenStackClients(context)
attr_validator.validate_os_resources(context, cluster_template_dict)
image_data = attr_validator.validate_image(cli,
cluster_template_dict[
'image_id'])
cluster_template_dict['cluster_distro'] = image_data['os_distro']
cluster_template_dict['project_id'] = context.project_id
cluster_template_dict['user_id'] = context.user_id
# check permissions for making cluster_template public
if cluster_template_dict['public']:
if not policy.enforce(context, "clustertemplate:publish", None,
do_raise=False):
raise exception.ClusterTemplatePublishDenied()
# NOTE(yuywz): We will generate a random human-readable name for
# cluster_template if the name is not specified by user.
arg_name = cluster_template_dict.get('name')
name = arg_name or self._generate_name_for_cluster_template(context)
cluster_template_dict['name'] = name
new_cluster_template = objects.BayModel(context,
**cluster_template_dict)
new_cluster_template.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url('clustertemplates',
new_cluster_template.uuid)
return ClusterTemplate.convert_with_links(new_cluster_template)
@wsme.validate(types.uuid_or_name, [ClusterTemplatePatchType])
@expose.expose(ClusterTemplate, types.uuid_or_name,
body=[ClusterTemplatePatchType])
@validation.enforce_network_driver_types_update()
@validation.enforce_volume_driver_types_update()
def patch(self, cluster_template_ident, patch):
"""Update an existing cluster_template.
:param cluster_template_ident: UUID or logic name of a
cluster_template.
:param patch: a json PATCH document to apply to this
cluster_template.
"""
context = pecan.request.context
cluster_template = api_utils.get_resource('BayModel',
cluster_template_ident)
policy.enforce(context, 'clustertemplate:update', cluster_template,
action='clustertemplate:update')
try:
cluster_template_dict = cluster_template.as_dict()
new_cluster_template = ClusterTemplate(**api_utils.apply_jsonpatch(
cluster_template_dict,
patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
new_cluster_template_dict = new_cluster_template.as_dict()
attr_validator.validate_os_resources(context,
new_cluster_template_dict)
# check permissions when updating baymodel public flag
if cluster_template.public != new_cluster_template.public:
if not policy.enforce(context, "clustertemplate:publish", None,
do_raise=False):
raise exception.ClusterTemplatePublishDenied()
# Update only the fields that have changed
for field in objects.BayModel.fields:
try:
patch_val = getattr(new_cluster_template, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if cluster_template[field] != patch_val:
cluster_template[field] = patch_val
cluster_template.save()
return ClusterTemplate.convert_with_links(cluster_template)
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, cluster_template_ident):
"""Delete a cluster_template.
:param cluster_template_ident: UUID or logical name of a
cluster_template.
"""
context = pecan.request.context
cluster_template = api_utils.get_resource('BayModel',
cluster_template_ident)
policy.enforce(context, 'clustertemplate:delete', cluster_template,
action='clustertemplate:delete')
cluster_template.destroy()

View File

@ -229,28 +229,29 @@ class FileSystemNotSupported(MagnumException):
"File system %(fs)s is not supported.") "File system %(fs)s is not supported.")
class BayModelNotFound(ResourceNotFound): class ClusterTemplateNotFound(ResourceNotFound):
message = _("Baymodel %(baymodel)s could not be found.") message = _("ClusterTemplate %(clustertemplate)s could not be found.")
class BayModelAlreadyExists(Conflict): class ClusterTemplateAlreadyExists(Conflict):
message = _("A baymodel with UUID %(uuid)s already exists.") message = _("A ClusterTemplate with UUID %(uuid)s already exists.")
class BayModelReferenced(Invalid): class ClusterTemplateReferenced(Invalid):
message = _("Baymodel %(baymodel)s is referenced by one or multiple bays.") message = _("ClusterTemplate %(clustertemplate)s is referenced by one or"
" multiple clusters.")
class BaymodelPublishDenied(NotAuthorized): class ClusterTemplatePublishDenied(NotAuthorized):
message = _("Not authorized to set public flag for baymodel.") message = _("Not authorized to set public flag for cluster template.")
class BayNotFound(ResourceNotFound): class ClusterNotFound(ResourceNotFound):
message = _("Bay %(bay)s could not be found.") message = _("Cluster %(cluster)s could not be found.")
class BayAlreadyExists(Conflict): class ClusterAlreadyExists(Conflict):
message = _("A bay with UUID %(uuid)s already exists.") message = _("A cluster with UUID %(uuid)s already exists.")
class ContainerNotFound(ResourceNotFound): class ContainerNotFound(ResourceNotFound):

View File

@ -234,7 +234,7 @@ class Handler(object):
trust_manager.delete_trustee_and_trust(osc, context, bay) trust_manager.delete_trustee_and_trust(osc, context, bay)
cert_manager.delete_certificates_from_bay(bay, context=context) cert_manager.delete_certificates_from_bay(bay, context=context)
bay.destroy() bay.destroy()
except exception.BayNotFound: except exception.ClusterNotFound:
LOG.info(_LI('The bay %s has been deleted by others.'), uuid) LOG.info(_LI('The bay %s has been deleted by others.'), uuid)
conductor_utils.notify_about_bay_operation( conductor_utils.notify_about_bay_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS) context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS)
@ -348,7 +348,7 @@ class HeatPoller(object):
cert_manager.delete_certificates_from_bay(self.bay, cert_manager.delete_certificates_from_bay(self.bay,
context=self.context) context=self.context)
self.bay.destroy() self.bay.destroy()
except exception.BayNotFound: except exception.ClusterNotFound:
LOG.info(_LI('The bay %s has been deleted by others.') LOG.info(_LI('The bay %s has been deleted by others.')
% self.bay.uuid) % self.bay.uuid)

View File

@ -118,7 +118,7 @@ class Connection(object):
:param bay_id: The id or uuid of a bay. :param bay_id: The id or uuid of a bay.
:returns: A bay. :returns: A bay.
:raises: BayNotFound :raises: ClusterNotFound
""" """
@abc.abstractmethod @abc.abstractmethod
@ -201,7 +201,7 @@ class Connection(object):
:param baymodel_id: The id or uuid of a baymodel. :param baymodel_id: The id or uuid of a baymodel.
:returns: A baymodel. :returns: A baymodel.
:raises: BayModelNotFound :raises: ClusterTemplateNotFound
""" """
@abc.abstractmethod @abc.abstractmethod

View File

@ -157,7 +157,7 @@ class Connection(api.Connection):
try: try:
bay.save() bay.save()
except db_exc.DBDuplicateEntry: except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid']) raise exception.ClusterAlreadyExists(uuid=values['uuid'])
return bay return bay
def get_bay_by_id(self, context, bay_id): def get_bay_by_id(self, context, bay_id):
@ -167,7 +167,7 @@ class Connection(api.Connection):
try: try:
return query.one() return query.one()
except NoResultFound: except NoResultFound:
raise exception.BayNotFound(bay=bay_id) raise exception.ClusterNotFound(cluster=bay_id)
def get_bay_by_name(self, context, bay_name): def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay) query = model_query(models.Bay)
@ -179,7 +179,7 @@ class Connection(api.Connection):
raise exception.Conflict('Multiple bays exist with same name.' raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.') ' Please use the bay uuid instead.')
except NoResultFound: except NoResultFound:
raise exception.BayNotFound(bay=bay_name) raise exception.ClusterNotFound(cluster=bay_name)
def get_bay_by_uuid(self, context, bay_uuid): def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay) query = model_query(models.Bay)
@ -188,7 +188,7 @@ class Connection(api.Connection):
try: try:
return query.one() return query.one()
except NoResultFound: except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid) raise exception.ClusterNotFound(cluster=bay_uuid)
def destroy_bay(self, bay_id): def destroy_bay(self, bay_id):
session = get_session() session = get_session()
@ -199,7 +199,7 @@ class Connection(api.Connection):
try: try:
query.one() query.one()
except NoResultFound: except NoResultFound:
raise exception.BayNotFound(bay=bay_id) raise exception.ClusterNotFound(cluster=bay_id)
query.delete() query.delete()
@ -219,7 +219,7 @@ class Connection(api.Connection):
try: try:
ref = query.with_lockmode('update').one() ref = query.with_lockmode('update').one()
except NoResultFound: except NoResultFound:
raise exception.BayNotFound(bay=bay_id) raise exception.ClusterNotFound(cluster=bay_id)
if 'provision_state' in values: if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow() values['provision_updated_at'] = timeutils.utcnow()
@ -264,7 +264,7 @@ class Connection(api.Connection):
try: try:
baymodel.save() baymodel.save()
except db_exc.DBDuplicateEntry: except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid']) raise exception.ClusterTemplateAlreadyExists(uuid=values['uuid'])
return baymodel return baymodel
def get_baymodel_by_id(self, context, baymodel_id): def get_baymodel_by_id(self, context, baymodel_id):
@ -276,7 +276,8 @@ class Connection(api.Connection):
try: try:
return query.one() return query.one()
except NoResultFound: except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id) raise exception.ClusterTemplateNotFound(
clustertemplate=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid): def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel) query = model_query(models.BayModel)
@ -287,7 +288,8 @@ class Connection(api.Connection):
try: try:
return query.one() return query.one()
except NoResultFound: except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid) raise exception.ClusterTemplateNotFound(
clustertemplate=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name): def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel) query = model_query(models.BayModel)
@ -301,7 +303,8 @@ class Connection(api.Connection):
raise exception.Conflict('Multiple baymodels exist with same name.' raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.') ' Please use the baymodel uuid instead.')
except NoResultFound: except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name) raise exception.ClusterTemplateNotFound(
clustertemplate=baymodel_name)
def _is_baymodel_referenced(self, session, baymodel_uuid): def _is_baymodel_referenced(self, session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s).""" """Checks whether the baymodel is referenced by bay(s)."""
@ -324,10 +327,12 @@ class Connection(api.Connection):
try: try:
baymodel_ref = query.one() baymodel_ref = query.one()
except NoResultFound: except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id) raise exception.ClusterTemplateNotFound(
clustertemplate=baymodel_id)
if self._is_baymodel_referenced(session, baymodel_ref['uuid']): if self._is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id) raise exception.ClusterTemplateReferenced(
clustertemplate=baymodel_id)
query.delete() query.delete()
@ -347,12 +352,14 @@ class Connection(api.Connection):
try: try:
ref = query.with_lockmode('update').one() ref = query.with_lockmode('update').one()
except NoResultFound: except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id) raise exception.ClusterTemplateNotFound(
clustertemplate=baymodel_id)
if self._is_baymodel_referenced(session, ref['uuid']): if self._is_baymodel_referenced(session, ref['uuid']):
# we only allow to update baymodel to be public # we only allow to update baymodel to be public
if not self._is_publishing_baymodel(values): if not self._is_publishing_baymodel(values):
raise exception.BayModelReferenced(baymodel=baymodel_id) raise exception.ClusterTemplateReferenced(
clustertemplate=baymodel_id)
ref.update(values) ref.update(values)
return ref return ref

View File

@ -165,7 +165,7 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
def _sync_deleted_stack(self, bay): def _sync_deleted_stack(self, bay):
try: try:
bay.destroy() bay.destroy()
except exception.BayNotFound: except exception.ClusterNotFound:
LOG.info(_LI('The bay %s has been deleted by others.'), bay.uuid) LOG.info(_LI('The bay %s has been deleted by others.'), bay.uuid)
else: else:
LOG.info(_LI("Bay with id %(id)s not found in heat " LOG.info(_LI("Bay with id %(id)s not found in heat "

View File

@ -0,0 +1,176 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.lib import exceptions
from magnum.i18n import _LE
from magnum.i18n import _LI
from magnum.i18n import _LW
from magnum.tests.functional.api.v1.models import cluster_id_model
from magnum.tests.functional.api.v1.models import cluster_model
from magnum.tests.functional.common import client
from magnum.tests.functional.common import utils
class ClusterClient(client.MagnumClient):
"""Encapsulates REST calls and maps JSON to/from models"""
LOG = logging.getLogger(__name__)
@classmethod
def clusters_uri(cls, filters=None):
"""Construct clusters uri with optional filters
:param filters: Optional k:v dict that's converted to url query
:returns: url string
"""
url = "/clusters"
if filters:
url = cls.add_filters(url, filters)
return url
@classmethod
def cluster_uri(cls, cluster_id):
"""Construct cluster uri
:param cluster_id: cluster uuid or name
:returns: url string
"""
return "{0}/{1}".format(cls.clusters_uri(), cluster_id)
def list_clusters(self, filters=None, **kwargs):
"""Makes GET /clusters request and returns ClusterCollection
Abstracts REST call to return all clusters
:param filters: Optional k:v dict that's converted to url query
:returns: response object and ClusterCollection object
"""
resp, body = self.get(self.clusters_uri(filters), **kwargs)
return self.deserialize(resp, body, cluster_model.ClusterCollection)
def get_cluster(self, cluster_id, **kwargs):
"""Makes GET /cluster request and returns ClusterEntity
Abstracts REST call to return a single cluster based on uuid or name
:param cluster_id: cluster uuid or name
:returns: response object and ClusterCollection object
"""
resp, body = self.get(self.cluster_uri(cluster_id))
return self.deserialize(resp, body, cluster_model.ClusterEntity)
def post_cluster(self, model, **kwargs):
"""Makes POST /cluster request and returns ClusterIdEntity
Abstracts REST call to create new cluster
:param model: ClusterEntity
:returns: response object and ClusterIdEntity object
"""
resp, body = self.post(
self.clusters_uri(),
body=model.to_json(), **kwargs)
return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
def patch_cluster(self, cluster_id, clusterpatch_listmodel, **kwargs):
"""Makes PATCH /cluster request and returns ClusterIdEntity
Abstracts REST call to update cluster attributes
:param cluster_id: UUID of cluster
:param clusterpatch_listmodel: ClusterPatchCollection
:returns: response object and ClusterIdEntity object
"""
resp, body = self.patch(
self.cluster_uri(cluster_id),
body=clusterpatch_listmodel.to_json(), **kwargs)
return self.deserialize(resp, body, cluster_id_model.ClusterIdEntity)
def delete_cluster(self, cluster_id, **kwargs):
"""Makes DELETE /cluster request and returns response object
Abstracts REST call to delete cluster based on uuid or name
:param cluster_id: UUID or name of cluster
:returns: response object
"""
return self.delete(self.cluster_uri(cluster_id), **kwargs)
def wait_for_cluster_to_delete(self, cluster_id):
utils.wait_for_condition(
lambda: self.does_cluster_not_exist(cluster_id), 10, 600)
def wait_for_created_cluster(self, cluster_id, delete_on_error=True):
try:
utils.wait_for_condition(
lambda: self.does_cluster_exist(cluster_id), 10, 1800)
except Exception:
# In error state. Clean up the cluster id if desired
self.LOG.error(_LE('Cluster %s entered an exception state.') %
cluster_id)
if delete_on_error:
self.LOG.error(_LE('We will attempt to delete clusters now.'))
self.delete_cluster(cluster_id)
self.wait_for_cluster_to_delete(cluster_id)
raise
def wait_for_final_state(self, cluster_id):
utils.wait_for_condition(
lambda: self.is_cluster_in_final_state(cluster_id), 10, 1800)
def is_cluster_in_final_state(self, cluster_id):
try:
resp, model = self.get_cluster(cluster_id)
if model.status in ['CREATED', 'CREATE_COMPLETE',
'ERROR', 'CREATE_FAILED']:
self.LOG.info(_LI('Cluster %s succeeded.') % cluster_id)
return True
else:
return False
except exceptions.NotFound:
self.LOG.warning(_LW('Cluster %s is not found.') % cluster_id)
return False
def does_cluster_exist(self, cluster_id):
try:
resp, model = self.get_cluster(cluster_id)
if model.status in ['CREATED', 'CREATE_COMPLETE']:
self.LOG.info(_LI('Cluster %s is created.') % cluster_id)
return True
elif model.status in ['ERROR', 'CREATE_FAILED']:
self.LOG.error(_LE('Cluster %s is in fail state.') %
cluster_id)
raise exceptions.ServerFault(
"Got into an error condition: %s for %s" %
(model.status, cluster_id))
else:
return False
except exceptions.NotFound:
self.LOG.warning(_LW('Cluster %s is not found.') % cluster_id)
return False
def does_cluster_not_exist(self, cluster_id):
try:
self.get_cluster(cluster_id)
except exceptions.NotFound:
self.LOG.warning(_LW('Cluster %s is not found.') % cluster_id)
return True
return False

View File

@ -0,0 +1,113 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.tests.functional.api.v1.models import cluster_template_model
from magnum.tests.functional.common import client
class ClusterTemplateClient(client.MagnumClient):
"""Encapsulates REST calls and maps JSON to/from models"""
@classmethod
def cluster_templates_uri(cls, filters=None):
"""Construct clustertemplates uri with optional filters
:param filters: Optional k:v dict that's converted to url query
:returns: url string
"""
url = "/clustertemplates"
if filters:
url = cls.add_filters(url, filters)
return url
@classmethod
def cluster_template_uri(cls, cluster_template_id):
"""Construct cluster_template uri
:param cluster_template_id: cluster_template uuid or name
:returns: url string
"""
return "{0}/{1}".format(cls.cluster_templates_uri(),
cluster_template_id)
def list_cluster_templates(self, filters=None, **kwargs):
"""Makes GET /clustertemplates request
Abstracts REST call to return all clustertemplates
:param filters: Optional k:v dict that's converted to url query
:returns: response object and ClusterTemplateCollection object
"""
resp, body = self.get(self.cluster_templates_uri(filters), **kwargs)
collection = cluster_template_model.ClusterTemplateCollection
return self.deserialize(resp, body, collection)
def get_cluster_template(self, cluster_template_id, **kwargs):
"""Makes GET /clustertemplate request and returns ClusterTemplateEntity
Abstracts REST call to return a single clustertempalte based on uuid
or name
:param cluster_template_id: clustertempalte uuid or name
:returns: response object and ClusterTemplateCollection object
"""
resp, body = self.get(self.cluster_template_uri(cluster_template_id))
return self.deserialize(resp, body,
cluster_template_model.ClusterTemplateEntity)
def post_cluster_template(self, model, **kwargs):
"""Makes POST /clustertemplate request
Abstracts REST call to create new clustertemplate
:param model: ClusterTemplateEntity
:returns: response object and ClusterTemplateEntity object
"""
resp, body = self.post(
self.cluster_templates_uri(),
body=model.to_json(), **kwargs)
entity = cluster_template_model.ClusterTemplateEntity
return self.deserialize(resp, body, entity)
def patch_cluster_template(self, cluster_template_id,
cluster_templatepatch_listmodel, **kwargs):
"""Makes PATCH /clustertemplate and returns ClusterTemplateEntity
Abstracts REST call to update clustertemplate attributes
:param cluster_template_id: UUID of clustertemplate
:param cluster_templatepatch_listmodel: ClusterTemplatePatchCollection
:returns: response object and ClusterTemplateEntity object
"""
resp, body = self.patch(
self.cluster_template_uri(cluster_template_id),
body=cluster_templatepatch_listmodel.to_json(), **kwargs)
return self.deserialize(resp, body,
cluster_template_model.ClusterTemplateEntity)
def delete_cluster_template(self, cluster_template_id, **kwargs):
"""Makes DELETE /clustertemplate request and returns response object
Abstracts REST call to delete clustertemplate based on uuid or name
:param cluster_template_id: UUID or name of clustertemplate
:returns: response object
"""
return self.delete(self.cluster_template_uri(cluster_template_id),
**kwargs)

View File

@ -0,0 +1,24 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.tests.functional.common import models
class ClusterIdData(models.BaseModel):
"""Data that encapsulates ClusterId attributes"""
pass
class ClusterIdEntity(models.EntityModel):
"""Entity Model that represents a single instance of CertData"""
ENTITY_NAME = 'clusterid'
MODEL_TYPE = ClusterIdData

View File

@ -0,0 +1,30 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.tests.functional.common import models
class ClusterData(models.BaseModel):
"""Data that encapsulates cluster attributes"""
pass
class ClusterEntity(models.EntityModel):
"""Entity Model that represents a single instance of ClusterData"""
ENTITY_NAME = 'cluster'
MODEL_TYPE = ClusterData
class ClusterCollection(models.CollectionModel):
"""Collection Model that represents a list of ClusterData objects"""
COLLECTION_NAME = 'clusterlists'
MODEL_TYPE = ClusterData

View File

@ -0,0 +1,30 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.tests.functional.common import models
class ClusterTemplateData(models.BaseModel):
"""Data that encapsulates clustertemplate attributes"""
pass
class ClusterTemplateEntity(models.EntityModel):
"""Entity Model that represents a single instance of ClusterTemplateData"""
ENTITY_NAME = 'clustertemplate'
MODEL_TYPE = ClusterTemplateData
class ClusterTemplateCollection(models.CollectionModel):
"""Collection that represents a list of ClusterTemplateData objects"""
COLLECTION_NAME = 'clustertemplatelists'
MODEL_TYPE = ClusterTemplateData

View File

@ -0,0 +1,77 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from magnum.tests.functional.common import models
class ClusterTemplatePatchData(models.BaseModel):
"""Data that encapsulates clustertemplatepatch attributes"""
pass
class ClusterTemplatePatchEntity(models.EntityModel):
"""Model that represents a single instance of ClusterTemplatePatchData"""
ENTITY_NAME = 'clustertemplatepatch'
MODEL_TYPE = ClusterTemplatePatchData
class ClusterTemplatePatchCollection(models.CollectionModel):
"""Model that represents a list of ClusterTemplatePatchData objects"""
MODEL_TYPE = ClusterTemplatePatchData
COLLECTION_NAME = 'clustertemplatepatchlist'
def to_json(self):
"""Converts ClusterTemplatePatchCollection to json
Retrieves list from COLLECTION_NAME attribute and converts each object
to dict, appending it to a list. Then converts the entire list to
json
This is required due to COLLECTION_NAME holding a list of objects that
needed to be converted to dict individually
:returns: json object
"""
data = getattr(self, ClusterTemplatePatchCollection.COLLECTION_NAME)
collection = []
for d in data:
collection.append(d.to_dict())
return json.dumps(collection)
@classmethod
def from_dict(cls, data):
"""Converts dict to ClusterTemplatePatchData
Converts data dict to list of ClusterTemplatePatchData objects and
stores it in COLLECTION_NAME
Example of dict data:
[{
"path": "/name",
"value": "myname",
"op": "replace"
}]
:param data: dict of patch data
:returns: json object
"""
model = cls()
collection = []
for d in data:
collection.append(cls.MODEL_TYPE.from_dict(d))
setattr(model, cls.COLLECTION_NAME, collection)
return model

View File

@ -0,0 +1,76 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from magnum.tests.functional.common import models
class ClusterPatchData(models.BaseModel):
"""Data that encapsulates clusterpatch attributes"""
pass
class ClusterPatchEntity(models.EntityModel):
"""Entity Model that represents a single instance of ClusterPatchData"""
ENTITY_NAME = 'clusterpatch'
MODEL_TYPE = ClusterPatchData
class ClusterPatchCollection(models.CollectionModel):
"""Collection Model that represents a list of ClusterPatchData objects"""
MODEL_TYPE = ClusterPatchData
COLLECTION_NAME = 'clusterpatchlist'
def to_json(self):
"""Converts ClusterPatchCollection to json
Retrieves list from COLLECTION_NAME attribute and converts each object
to dict, appending it to a list. Then converts the entire list to json
This is required due to COLLECTION_NAME holding a list of objects that
needed to be converted to dict individually
:returns: json object
"""
data = getattr(self, ClusterPatchCollection.COLLECTION_NAME)
collection = []
for d in data:
collection.append(d.to_dict())
return json.dumps(collection)
@classmethod
def from_dict(cls, data):
"""Converts dict to ClusterPatchData
Converts data dict to list of ClusterPatchData objects and stores it
in COLLECTION_NAME
Example of dict data:
[{
"path": "/name",
"value": "myname",
"op": "replace"
}]
:param data: dict of patch data
:returns: json object
"""
model = cls()
collection = []
for d in data:
collection.append(cls.MODEL_TYPE.from_dict(d))
setattr(model, cls.COLLECTION_NAME, collection)
return model

View File

@ -18,7 +18,6 @@ from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions from tempest.lib import exceptions
import testtools import testtools
from magnum.objects.fields import BayStatus
from magnum.tests.functional.api import base from magnum.tests.functional.api import base
from magnum.tests.functional.common import config from magnum.tests.functional.common import config
from magnum.tests.functional.common import datagen from magnum.tests.functional.common import datagen
@ -131,56 +130,6 @@ class BayTest(base.BaseTempestTest):
resp, model = self.bay_client.get_bay(bay_id) resp, model = self.bay_client.get_bay(bay_id)
return resp, model return resp, model
# (dimtruck) Combining all these tests in one because
# they time out on the gate (2 hours not enough)
@testtools.testcase.attr('positive')
def test_create_list_and_delete_bays(self):
gen_model = datagen.valid_bay_data(
baymodel_id=self.baymodel.uuid, node_count=1)
# test bay create
_, temp_model = self._create_bay(gen_model)
self.assertEqual(BayStatus.CREATE_IN_PROGRESS, temp_model.status)
self.assertIsNone(temp_model.status_reason)
# test bay list
resp, model = self.bay_client.list_bays()
self.assertEqual(200, resp.status)
self.assertGreater(len(model.bays), 0)
self.assertIn(
temp_model.uuid, list([x['uuid'] for x in model.bays]))
# test invalid bay update
patch_model = datagen.bay_name_patch_data()
self.assertRaises(
exceptions.BadRequest,
self.bay_client.patch_bay,
temp_model.uuid, patch_model)
# test bay delete
self._delete_bay(temp_model.uuid)
self.bays.remove(temp_model.uuid)
@testtools.testcase.attr('positive')
def test_create_delete_bays_async(self):
gen_model = datagen.valid_bay_data(
baymodel_id=self.baymodel.uuid, node_count=1)
# test bay create
_, temp_model = self._create_bay(gen_model, is_async=True)
self.assertNotIn('status', temp_model)
# test bay list
resp, model = self.bay_client.list_bays()
self.assertEqual(200, resp.status)
self.assertGreater(len(model.bays), 0)
self.assertIn(
temp_model.uuid, list([x['uuid'] for x in model.bays]))
# test bay delete
self._delete_bay(temp_model.uuid)
self.bays.remove(temp_model.uuid)
@testtools.testcase.attr('negative') @testtools.testcase.attr('negative')
def test_create_bay_for_nonexisting_baymodel(self): def test_create_bay_for_nonexisting_baymodel(self):
gen_model = datagen.valid_bay_data(baymodel_id='this-does-not-exist') gen_model = datagen.valid_bay_data(baymodel_id='this-does-not-exist')
@ -265,37 +214,3 @@ class BayTest(base.BaseTempestTest):
self.assertRaises( self.assertRaises(
exceptions.NotFound, exceptions.NotFound,
self.bay_client.delete_bay, data_utils.rand_uuid()) self.bay_client.delete_bay, data_utils.rand_uuid())
@testtools.testcase.attr('positive')
def test_certificate_sign_and_show(self):
first_model = datagen.valid_bay_data(baymodel_id=self.baymodel.uuid,
name='test')
_, bay_model = self._create_bay(first_model)
# test ca show
resp, model = self.cert_client.get_cert(
bay_model.uuid)
self.LOG.debug("cert resp: %s" % resp)
self.assertEqual(200, resp.status)
self.assertEqual(model.bay_uuid, bay_model.uuid)
self.assertIsNotNone(model.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', model.pem)
self.assertIn('-----END CERTIFICATE-----', model.pem)
# test ca sign
model = datagen.cert_data(bay_uuid=bay_model.uuid)
resp, model = self.cert_client.post_cert(model)
self.LOG.debug("cert resp: %s" % resp)
self.assertEqual(201, resp.status)
self.assertEqual(model.bay_uuid, bay_model.uuid)
self.assertIsNotNone(model.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', model.pem)
self.assertIn('-----END CERTIFICATE-----', model.pem)
# test ca sign invalid
model = datagen.cert_data(bay_uuid=bay_model.uuid,
csr_data="invalid_csr")
self.assertRaises(
exceptions.BadRequest,
self.cert_client.post_cert,
model)

View File

@ -0,0 +1,240 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_log import log as logging
from oslo_utils import uuidutils
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
import testtools
from magnum.tests.functional.api import base
from magnum.tests.functional.common import config
from magnum.tests.functional.common import datagen
class ClusterTest(base.BaseTempestTest):
"""Tests for cluster CRUD."""
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(ClusterTest, self).__init__(*args, **kwargs)
self.clusters = []
self.creds = None
self.keypair = None
self.cluster_template = None
self.cluster_template_client = None
self.keypairs_client = None
self.cluster_client = None
self.cert_client = None
def setUp(self):
try:
super(ClusterTest, self).setUp()
(self.creds, self.keypair) = self.get_credentials_with_keypair(
type_of_creds='default')
(self.cluster_template_client,
self.keypairs_client) = self.get_clients_with_existing_creds(
creds=self.creds,
type_of_creds='default',
request_type='cluster_template')
(self.cluster_client, _) = self.get_clients_with_existing_creds(
creds=self.creds,
type_of_creds='default',
request_type='cluster')
(self.cert_client, _) = self.get_clients_with_existing_creds(
creds=self.creds,
type_of_creds='default',
request_type='cert')
model = datagen.valid_swarm_cluster_template()
_, self.cluster_template = self._create_cluster_template(model)
# NOTE (dimtruck) by default tempest sets timeout to 20 mins.
# We need more time.
test_timeout = 1800
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
except Exception:
self.tearDown()
raise
def tearDown(self):
try:
cluster_list = self.clusters[:]
for cluster_id in cluster_list:
self._delete_cluster(cluster_id)
self.clusters.remove(cluster_id)
if self.cluster_template:
self._delete_cluster_template(self.cluster_template.uuid)
finally:
super(ClusterTest, self).tearDown()
def _create_cluster_template(self, cm_model):
self.LOG.debug('We will create a clustertemplate for %s' % cm_model)
resp, model = self.cluster_template_client.post_cluster_template(
cm_model)
return resp, model
def _delete_cluster_template(self, cm_id):
self.LOG.debug('We will delete a clustertemplate for %s' % cm_id)
resp, model = self.cluster_template_client.delete_cluster_template(
cm_id)
return resp, model
def _create_cluster(self, cluster_model):
self.LOG.debug('We will create cluster for %s' % cluster_model)
resp, model = self.cluster_client.post_cluster(cluster_model)
self.LOG.debug('Response: %s' % resp)
self.assertEqual(202, resp.status)
self.assertIsNotNone(model.uuid)
self.assertTrue(uuidutils.is_uuid_like(model.uuid))
self.clusters.append(model.uuid)
self.cluster_uuid = model.uuid
if config.Config.copy_logs:
self.addOnException(self.copy_logs_handler(
lambda: list(
[self._get_cluster_by_id(model.uuid)[1].master_addresses,
self._get_cluster_by_id(model.uuid)[1].node_addresses]),
self.cluster_template.coe,
self.keypair))
self.cluster_client.wait_for_created_cluster(model.uuid,
delete_on_error=False)
return resp, model
def _delete_cluster(self, cluster_id):
self.LOG.debug('We will delete a cluster for %s' % cluster_id)
resp, model = self.cluster_client.delete_cluster(cluster_id)
self.assertEqual(204, resp.status)
self.cluster_client.wait_for_cluster_to_delete(cluster_id)
self.assertRaises(exceptions.NotFound, self.cert_client.get_cert,
cluster_id)
return resp, model
def _get_cluster_by_id(self, cluster_id):
resp, model = self.cluster_client.get_cluster(cluster_id)
return resp, model
# (dimtruck) Combining all these tests in one because
# they time out on the gate (2 hours not enough)
@testtools.testcase.attr('positive')
def test_create_list_sign_delete_clusters(self):
gen_model = datagen.valid_cluster_data(
cluster_template_id=self.cluster_template.uuid, node_count=1)
# test cluster create
_, cluster_model = self._create_cluster(gen_model)
self.assertNotIn('status', cluster_model)
# test cluster list
resp, cluster_list_model = self.cluster_client.list_clusters()
self.assertEqual(200, resp.status)
self.assertGreater(len(cluster_list_model.clusters), 0)
self.assertIn(
cluster_model.uuid, list([x['uuid']
for x in cluster_list_model.clusters]))
# test invalid cluster update
patch_model = datagen.cluster_name_patch_data()
self.assertRaises(
exceptions.BadRequest,
self.cluster_client.patch_cluster,
cluster_model.uuid, patch_model)
# test ca show
resp, cert_model = self.cert_client.get_cert(
cluster_model.uuid)
self.LOG.debug("cert resp: %s" % resp)
self.assertEqual(200, resp.status)
self.assertEqual(cert_model.bay_uuid, cluster_model.uuid)
self.assertIsNotNone(cert_model.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem)
self.assertIn('-----END CERTIFICATE-----', cert_model.pem)
# test ca sign
cert_data_model = datagen.cert_data(cluster_model.uuid)
resp, cert_model = self.cert_client.post_cert(cert_data_model)
self.LOG.debug("cert resp: %s" % resp)
self.assertEqual(201, resp.status)
self.assertEqual(cert_model.bay_uuid, cluster_model.uuid)
self.assertIsNotNone(cert_model.pem)
self.assertIn('-----BEGIN CERTIFICATE-----', cert_model.pem)
self.assertIn('-----END CERTIFICATE-----', cert_model.pem)
# test ca sign invalid
cert_data_model = datagen.cert_data(cluster_model.uuid,
csr_data="invalid_csr")
self.assertRaises(
exceptions.BadRequest,
self.cert_client.post_cert,
cert_data_model)
# test cluster delete
self._delete_cluster(cluster_model.uuid)
self.clusters.remove(cluster_model.uuid)
@testtools.testcase.attr('negative')
def test_create_cluster_for_nonexisting_cluster_template(self):
cm_id = 'this-does-not-exist'
gen_model = datagen.valid_cluster_data(cluster_template_id=cm_id)
self.assertRaises(
exceptions.BadRequest,
self.cluster_client.post_cluster, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_with_node_count_0(self):
gen_model = datagen.valid_cluster_data(
cluster_template_id=self.cluster_template.uuid, node_count=0)
self.assertRaises(
exceptions.BadRequest,
self.cluster_client.post_cluster, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_with_zero_masters(self):
uuid = self.cluster_template.uuid
gen_model = datagen.valid_cluster_data(cluster_template_id=uuid,
master_count=0)
self.assertRaises(
exceptions.BadRequest,
self.cluster_client.post_cluster, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_with_nonexisting_flavor(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, cluster_template = self._create_cluster_template(gen_model)
self.assertEqual(201, resp.status)
self.assertIsNotNone(cluster_template.uuid)
uuid = cluster_template.uuid
gen_model = datagen.valid_cluster_data(cluster_template_id=uuid)
gen_model.flavor_id = 'aaa'
self.assertRaises(exceptions.BadRequest,
self.cluster_client.post_cluster, gen_model)
resp, _ = self._delete_cluster_template(cluster_template.uuid)
self.assertEqual(204, resp.status)
@testtools.testcase.attr('negative')
def test_update_cluster_for_nonexisting_cluster(self):
patch_model = datagen.cluster_name_patch_data()
self.assertRaises(
exceptions.NotFound,
self.cluster_client.patch_cluster, 'fooo', patch_model)
@testtools.testcase.attr('negative')
def test_delete_cluster_for_nonexisting_cluster(self):
self.assertRaises(
exceptions.NotFound,
self.cluster_client.delete_cluster, data_utils.rand_uuid())

View File

@ -0,0 +1,239 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions
import testtools
from magnum.tests.functional.api import base
from magnum.tests.functional.common import datagen
class ClusterTemplateTest(base.BaseTempestTest):
"""Tests for clustertemplate CRUD."""
def __init__(self, *args, **kwargs):
super(ClusterTemplateTest, self).__init__(*args, **kwargs)
self.cluster_templates = []
self.cluster_template_client = None
self.keypairs_client = None
def setUp(self):
try:
super(ClusterTemplateTest, self).setUp()
(self.cluster_template_client,
self.keypairs_client) = self.get_clients_with_new_creds(
type_of_creds='default',
request_type='cluster_template')
except Exception:
self.tearDown()
raise
def tearDown(self):
for cluster_template_id in self.cluster_templates:
self._delete_cluster_template(cluster_template_id)
self.cluster_templates.remove(cluster_template_id)
super(ClusterTemplateTest, self).tearDown()
def _create_cluster_template(self, cmodel_model):
resp, model = \
self.cluster_template_client.post_cluster_template(cmodel_model)
self.assertEqual(201, resp.status)
self.cluster_templates.append(model.uuid)
return resp, model
def _delete_cluster_template(self, model_id):
resp, model = \
self.cluster_template_client.delete_cluster_template(model_id)
self.assertEqual(204, resp.status)
return resp, model
@testtools.testcase.attr('positive')
def test_list_cluster_templates(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
_, temp_model = self._create_cluster_template(gen_model)
resp, model = self.cluster_template_client.list_cluster_templates()
self.assertEqual(200, resp.status)
self.assertGreater(len(model.clustertemplates), 0)
self.assertIn(
temp_model.uuid,
list([x['uuid'] for x in model.clustertemplates]))
@testtools.testcase.attr('positive')
def test_create_cluster_template(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, model = self._create_cluster_template(gen_model)
@testtools.testcase.attr('positive')
def test_create_get_public_cluster_template(self):
gen_model = datagen.valid_swarm_cluster_template(is_public=True)
resp, model = self._create_cluster_template(gen_model)
resp, model = \
self.cluster_template_client.get_cluster_template(model.uuid)
self.assertEqual(200, resp.status)
self.assertTrue(model.public)
@testtools.testcase.attr('positive')
def test_update_cluster_template_public_by_uuid(self):
path = "/public"
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, old_model = self._create_cluster_template(gen_model)
patch_model = datagen.cluster_template_replace_patch_data(path,
value=True)
resp, new_model = self.cluster_template_client.patch_cluster_template(
old_model.uuid, patch_model)
self.assertEqual(200, resp.status)
resp, model = self.cluster_template_client.get_cluster_template(
new_model.uuid)
self.assertEqual(200, resp.status)
self.assertTrue(model.public)
@testtools.testcase.attr('positive')
def test_update_cluster_template_by_uuid(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, old_model = self._create_cluster_template(gen_model)
patch_model = datagen.cluster_template_name_patch_data()
resp, new_model = self.cluster_template_client.patch_cluster_template(
old_model.uuid, patch_model)
self.assertEqual(200, resp.status)
resp, model = \
self.cluster_template_client.get_cluster_template(new_model.uuid)
self.assertEqual(200, resp.status)
self.assertEqual(old_model.uuid, new_model.uuid)
self.assertEqual(model.name, new_model.name)
@testtools.testcase.attr('positive')
def test_delete_cluster_template_by_uuid(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, model = self._create_cluster_template(gen_model)
resp, _ = self.cluster_template_client.delete_cluster_template(
model.uuid)
self.assertEqual(204, resp.status)
self.cluster_templates.remove(model.uuid)
@testtools.testcase.attr('positive')
def test_delete_cluster_template_by_name(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, model = self._create_cluster_template(gen_model)
resp, _ = self.cluster_template_client.delete_cluster_template(
model.name)
self.assertEqual(204, resp.status)
self.cluster_templates.remove(model.uuid)
@testtools.testcase.attr('negative')
def test_get_cluster_template_by_uuid_404(self):
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.get_cluster_template,
data_utils.rand_uuid())
@testtools.testcase.attr('negative')
def test_update_cluster_template_404(self):
patch_model = datagen.cluster_template_name_patch_data()
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.patch_cluster_template,
data_utils.rand_uuid(), patch_model)
@testtools.testcase.attr('negative')
def test_delete_cluster_template_404(self):
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.delete_cluster_template,
data_utils.rand_uuid())
@testtools.testcase.attr('negative')
def test_get_cluster_template_by_name_404(self):
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.get_cluster_template, 'fooo')
@testtools.testcase.attr('negative')
def test_update_cluster_template_name_not_found(self):
patch_model = datagen.cluster_template_name_patch_data()
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.patch_cluster_template,
'fooo', patch_model)
@testtools.testcase.attr('negative')
def test_delete_cluster_template_by_name_404(self):
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.get_cluster_template, 'fooo')
@testtools.testcase.attr('negative')
def test_create_cluster_template_missing_image(self):
gen_model = datagen.cluster_template_data_with_missing_image()
self.assertRaises(
exceptions.BadRequest,
self.cluster_template_client.post_cluster_template, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_template_missing_flavor(self):
gen_model = datagen.cluster_template_data_with_missing_flavor()
self.assertRaises(
exceptions.BadRequest,
self.cluster_template_client.post_cluster_template, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_template_missing_keypair(self):
gen_model = \
datagen.cluster_template_data_with_missing_keypair()
self.assertRaises(
exceptions.NotFound,
self.cluster_template_client.post_cluster_template, gen_model)
@testtools.testcase.attr('negative')
def test_update_cluster_template_invalid_patch(self):
# get json object
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
resp, old_model = self._create_cluster_template(gen_model)
self.assertRaises(
exceptions.BadRequest,
self.cluster_template_client.patch_cluster_template,
data_utils.rand_uuid(), gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_template_invalid_network_driver(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
gen_model.network_driver = 'invalid_network_driver'
self.assertRaises(
exceptions.BadRequest,
self.cluster_template_client.post_cluster_template, gen_model)
@testtools.testcase.attr('negative')
def test_create_cluster_template_invalid_volume_driver(self):
gen_model = \
datagen.cluster_template_data_with_valid_keypair_image_flavor()
gen_model.volume_driver = 'invalid_volume_driver'
self.assertRaises(
exceptions.BadRequest,
self.cluster_template_client.post_cluster_template, gen_model)

View File

@ -79,9 +79,9 @@ class BaseMagnumTest(base.BaseTestCase):
]) ])
except Exception: except Exception:
cls.LOG.error(msg) cls.LOG.error(msg)
msg = (_LE("failed to copy from %{node_address}s " msg = (_LE("failed to copy from %(node_address)s "
"to %{base_path}s%{log_name}s-" "to %(base_path)s%(log_name)s-"
"%{node_address}s") % "%(node_address)s") %
{'node_address': node_address, {'node_address': node_address,
'base_path': "/opt/stack/logs/bay-nodes/", 'base_path': "/opt/stack/logs/bay-nodes/",
'log_name': log_name}) 'log_name': log_name})

View File

@ -23,6 +23,10 @@ from magnum.tests.functional.api.v1.models import baymodel_model
from magnum.tests.functional.api.v1.models import baymodelpatch_model from magnum.tests.functional.api.v1.models import baymodelpatch_model
from magnum.tests.functional.api.v1.models import baypatch_model from magnum.tests.functional.api.v1.models import baypatch_model
from magnum.tests.functional.api.v1.models import cert_model from magnum.tests.functional.api.v1.models import cert_model
from magnum.tests.functional.api.v1.models import cluster_model
from magnum.tests.functional.api.v1.models import cluster_template_model
from magnum.tests.functional.api.v1.models import cluster_templatepatch_model
from magnum.tests.functional.api.v1.models import clusterpatch_model
from magnum.tests.functional.common import config from magnum.tests.functional.common import config
@ -334,3 +338,277 @@ def cert_data(bay_uuid, csr_data=None):
model = cert_model.CertEntity.from_dict(data) model = cert_model.CertEntity.from_dict(data)
return model return model
def cluster_template_data(**kwargs):
"""Generates random cluster_template data
Keypair and image id cannot be random for the cluster_template to be valid
due to validations for the presence of keypair and image id prior to
cluster_template creation.
:param keypair_id: keypair name
:param image_id: image id or name
:returns: ClusterTemplateEntity with generated data
"""
data = {
"name": data_utils.rand_name('cluster'),
"coe": "swarm",
"tls_disabled": False,
"network_driver": None,
"volume_driver": None,
"docker_volume_size": 3,
"labels": {},
"public": False,
"fixed_network": "192.168.0.0/24",
"dns_nameserver": "8.8.8.8",
"flavor_id": data_utils.rand_name('cluster'),
"master_flavor_id": data_utils.rand_name('cluster'),
"external_network_id": config.Config.nic_id,
"keypair_id": data_utils.rand_name('cluster'),
"image_id": data_utils.rand_name('cluster')
}
data.update(kwargs)
model = cluster_template_model.ClusterTemplateEntity.from_dict(data)
return model
def cluster_template_replace_patch_data(path,
value=data_utils.rand_name('cluster')):
"""Generates random ClusterTemplate patch data
:param path: path to replace
:param value: value to replace in patch
:returns: ClusterTemplatePatchCollection with generated data
"""
data = [{
"path": path,
"value": value,
"op": "replace"
}]
collection = cluster_templatepatch_model.ClusterTemplatePatchCollection
return collection.from_dict(data)
def cluster_template_remove_patch_data(path):
"""Generates ClusterTempalte patch data by removing value
:param path: path to remove
:returns: BayModelPatchCollection with generated data
"""
data = [{
"path": path,
"op": "remove"
}]
collection = cluster_templatepatch_model.ClusterTemplatePatchCollection
return collection.from_dict(data)
def cluster_template_name_patch_data(name=data_utils.rand_name('cluster')):
"""Generates random cluster_template patch data
:param name: name to replace in patch
:returns: ClusterTemplatePatchCollection with generated data
"""
data = [{
"path": "/name",
"value": name,
"op": "replace"
}]
collection = cluster_templatepatch_model.ClusterTemplatePatchCollection
return collection.from_dict(data)
def cluster_template_flavor_patch_data(flavor=data_utils.rand_name('cluster')):
"""Generates random cluster_template patch data
:param flavor: flavor to replace in patch
:returns: ClusterTemplatePatchCollection with generated data
"""
data = [{
"path": "/flavor_id",
"value": flavor,
"op": "replace"
}]
collection = cluster_templatepatch_model.ClusterTemplatePatchCollection
return collection.from_dict(data)
def cluster_template_data_with_valid_keypair_image_flavor():
"""Generates random clustertemplate data with valid data
:returns: ClusterTemplateEntity with generated data
"""
master_flavor = config.Config.master_flavor_id
return cluster_template_data(keypair_id=config.Config.keypair_id,
image_id=config.Config.image_id,
flavor_id=config.Config.flavor_id,
master_flavor_id=master_flavor)
def cluster_template_data_with_missing_image():
"""Generates random cluster_template data with missing image
:returns: ClusterTemplateEntity with generated data
"""
return cluster_template_data(
keypair_id=config.Config.keypair_id,
flavor_id=config.Config.flavor_id,
master_flavor_id=config.Config.master_flavor_id)
def cluster_template_data_with_missing_flavor():
"""Generates random cluster_template data with missing flavor
:returns: ClusterTemplateEntity with generated data
"""
return cluster_template_data(keypair_id=config.Config.keypair_id,
image_id=config.Config.image_id)
def cluster_template_data_with_missing_keypair():
"""Generates random cluster_template data with missing keypair
:returns: ClusterTemplateEntity with generated data
"""
return cluster_template_data(
image_id=config.Config.image_id,
flavor_id=config.Config.flavor_id,
master_flavor_id=config.Config.master_flavor_id)
def cluster_template_valid_data_with_specific_coe(coe):
"""Generates random cluster_template data with valid keypair and image
:param coe: coe
:returns: ClusterTemplateEntity with generated data
"""
return cluster_template_data(keypair_id=config.Config.keypair_id,
image_id=config.Config.image_id, coe=coe)
def valid_swarm_cluster_template(is_public=False):
"""Generates a valid swarm cluster_template with valid data
:returns: ClusterTemplateEntity with generated data
"""
master_flavor_id = config.Config.master_flavor_id
return cluster_template_data(image_id=config.Config.image_id,
fixed_network="192.168.0.0/24",
flavor_id=config.Config.flavor_id,
public=is_public,
dns_nameserver=config.Config.dns_nameserver,
master_flavor_id=master_flavor_id,
keypair_id=config.Config.keypair_id,
coe="swarm", docker_volume_size=3,
cluster_distro=None,
external_network_id=config.Config.nic_id,
http_proxy=None, https_proxy=None,
no_proxy=None, network_driver=None,
volume_driver=None, labels={},
tls_disabled=False)
def cluster_data(name=data_utils.rand_name('cluster'),
cluster_template_id=data_utils.rand_uuid(),
node_count=random_int(1, 5), discovery_url=gen_random_ip(),
create_timeout=random_int(1, 30),
master_count=random_int(1, 5)):
"""Generates random cluster data
cluster_template_id cannot be random for the cluster to be valid due to
validations for the presence of clustertemplate prior to clustertemplate
creation.
:param name: cluster name (must be unique)
:param cluster_template_id: clustertemplate unique id (must already exist)
:param node_count: number of agents for cluster
:param discovery_url: url provided for node discovery
:param create_timeout: timeout in minutes for cluster create
:param master_count: number of master nodes for the cluster
:returns: ClusterEntity with generated data
"""
data = {
"name": name,
"cluster_template_id": cluster_template_id,
"node_count": node_count,
"discovery_url": None,
"create_timeout": create_timeout,
"master_count": master_count
}
model = cluster_model.ClusterEntity.from_dict(data)
return model
def valid_cluster_data(cluster_template_id,
name=data_utils.rand_name('cluster'),
node_count=1, master_count=1, create_timeout=None):
"""Generates random cluster data with valid
:param cluster_template_id: clustertemplate unique id that already exists
:param name: cluster name (must be unique)
:param node_count: number of agents for cluster
:returns: ClusterEntity with generated data
"""
return cluster_data(cluster_template_id=cluster_template_id, name=name,
master_count=master_count, node_count=node_count,
create_timeout=create_timeout)
def cluster_name_patch_data(name=data_utils.rand_name('cluster')):
"""Generates random clustertemplate patch data
:param name: name to replace in patch
:returns: ClusterPatchCollection with generated data
"""
data = [{
"path": "/name",
"value": name,
"op": "replace"
}]
return clusterpatch_model.ClusterPatchCollection.from_dict(data)
def cluster_api_addy_patch_data(address='0.0.0.0'):
"""Generates random cluster patch data
:param name: name to replace in patch
:returns: ClusterPatchCollection with generated data
"""
data = [{
"path": "/api_address",
"value": address,
"op": "replace"
}]
return clusterpatch_model.ClusterPatchCollection.from_dict(data)
def cluster_node_count_patch_data(node_count=2):
"""Generates random cluster patch data
:param name: name to replace in patch
:returns: ClusterPatchCollection with generated data
"""
data = [{
"path": "/node_count",
"value": node_count,
"op": "replace"
}]
return clusterpatch_model.ClusterPatchCollection.from_dict(data)

View File

@ -16,6 +16,8 @@ from tempest.common import credentials_factory as common_creds
from magnum.tests.functional.api.v1.clients import bay_client from magnum.tests.functional.api.v1.clients import bay_client
from magnum.tests.functional.api.v1.clients import baymodel_client from magnum.tests.functional.api.v1.clients import baymodel_client
from magnum.tests.functional.api.v1.clients import cert_client from magnum.tests.functional.api.v1.clients import cert_client
from magnum.tests.functional.api.v1.clients import cluster_client
from magnum.tests.functional.api.v1.clients import cluster_template_client
from magnum.tests.functional.api.v1.clients import magnum_service_client from magnum.tests.functional.api.v1.clients import magnum_service_client
from magnum.tests.functional.common import client from magnum.tests.functional.common import client
from magnum.tests.functional.common import config from magnum.tests.functional.common import config
@ -29,17 +31,21 @@ class Manager(clients.Manager):
super(Manager, self).__init__(credentials, 'container-infra') super(Manager, self).__init__(credentials, 'container-infra')
self.auth_provider.orig_base_url = self.auth_provider.base_url self.auth_provider.orig_base_url = self.auth_provider.base_url
self.auth_provider.base_url = self.bypassed_base_url self.auth_provider.base_url = self.bypassed_base_url
auth = self.auth_provider
if request_type == 'baymodel': if request_type == 'baymodel':
self.client = baymodel_client.BayModelClient(self.auth_provider) self.client = baymodel_client.BayModelClient(auth)
elif request_type == 'bay': elif request_type == 'bay':
self.client = bay_client.BayClient(self.auth_provider) self.client = bay_client.BayClient(auth)
elif request_type == 'cert': elif request_type == 'cert':
self.client = cert_client.CertClient(self.auth_provider) self.client = cert_client.CertClient(auth)
elif request_type == 'cluster_template':
self.client = cluster_template_client.ClusterTemplateClient(auth)
elif request_type == 'cluster':
self.client = cluster_client.ClusterClient(auth)
elif request_type == 'service': elif request_type == 'service':
self.client = magnum_service_client.MagnumServiceClient( self.client = magnum_service_client.MagnumServiceClient(auth)
self.auth_provider)
else: else:
self.client = client.MagnumClient(self.auth_provider) self.client = client.MagnumClient(auth)
def bypassed_base_url(self, filters, auth_data=None): def bypassed_base_url(self, filters, auth_data=None):
if (config.Config.magnum_url and if (config.Config.magnum_url and

View File

@ -33,20 +33,20 @@ class TestRootController(api_base.FunctionalTest):
super(TestRootController, self).setUp() super(TestRootController, self).setUp()
self.root_expected = { self.root_expected = {
u'description': u'Magnum is an OpenStack project which ' u'description': u'Magnum is an OpenStack project which '
'aims to provide container management.', 'aims to provide container cluster management.',
u'name': u'OpenStack Magnum API', u'name': u'OpenStack Magnum API',
u'versions': [{u'id': u'v1', u'versions': [{u'id': u'v1',
u'links': u'links':
[{u'href': u'http://localhost/v1/', [{u'href': u'http://localhost/v1/',
u'rel': u'self'}], u'rel': u'self'}],
u'status': u'CURRENT', u'status': u'CURRENT',
u'max_version': u'1.3', u'max_version': u'1.3',
u'min_version': u'1.1'}]} u'min_version': u'1.1'}]}
self.v1_expected = { self.v1_expected = {
u'media_types': u'media_types':
[{u'base': u'application/json', [{u'base': u'application/json',
u'type': u'application/vnd.openstack.magnum.v1+json'}], u'type': u'application/vnd.openstack.magnum.v1+json'}],
u'links': [{u'href': u'http://localhost/v1/', u'links': [{u'href': u'http://localhost/v1/',
u'rel': u'self'}, u'rel': u'self'},
{u'href': {u'href':
@ -61,6 +61,15 @@ class TestRootController(api_base.FunctionalTest):
u'rel': u'self'}, u'rel': u'self'},
{u'href': u'http://localhost/baymodels/', {u'href': u'http://localhost/baymodels/',
u'rel': u'bookmark'}], u'rel': u'bookmark'}],
u'clusters': [{u'href': u'http://localhost/v1/clusters/',
u'rel': u'self'},
{u'href': u'http://localhost/clusters/',
u'rel': u'bookmark'}],
u'clustertemplates':
[{u'href': u'http://localhost/v1/clustertemplates/',
u'rel': u'self'},
{u'href': u'http://localhost/clustertemplates/',
u'rel': u'bookmark'}],
u'id': u'v1', u'id': u'v1',
u'certificates': [{u'href': u'http://localhost/v1/certificates/', u'certificates': [{u'href': u'http://localhost/v1/certificates/',
u'rel': u'self'}, u'rel': u'self'},
@ -199,7 +208,6 @@ class TestHeathcheck(api_base.FunctionalTest):
class TestV1Routing(api_base.FunctionalTest): class TestV1Routing(api_base.FunctionalTest):
def test_route_checks_version(self): def test_route_checks_version(self):
self.get_json('/') self.get_json('/')
self._check_version.assert_called_once_with(mock.ANY, self._check_version.assert_called_once_with(mock.ANY,
@ -207,7 +215,6 @@ class TestV1Routing(api_base.FunctionalTest):
class TestCheckVersions(test_base.TestCase): class TestCheckVersions(test_base.TestCase):
def setUp(self): def setUp(self):
super(TestCheckVersions, self).setUp() super(TestCheckVersions, self).setUp()

View File

@ -0,0 +1,895 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from magnum.api import attr_validator
from magnum.api.controllers.v1 import cluster as api_cluster
from magnum.common import exception
from magnum.conductor import api as rpcapi
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestClusterObject(base.TestCase):
def test_cluster_init(self):
cluster_dict = apiutils.cluster_post_data(cluster_template_id=None)
del cluster_dict['node_count']
del cluster_dict['master_count']
del cluster_dict['create_timeout']
cluster = api_cluster.Cluster(**cluster_dict)
self.assertEqual(1, cluster.node_count)
self.assertEqual(1, cluster.master_count)
self.assertEqual(60, cluster.create_timeout)
class TestListCluster(api_base.FunctionalTest):
_cluster_attrs = ("name", "cluster_template_id", "node_count", "status",
"master_count", "stack_id", "create_timeout")
_expand_cluster_attrs = ("name", "cluster_template_id", "node_count",
"status", "api_address", "discovery_url",
"node_addresses", "master_count",
"master_addresses", "stack_id",
"create_timeout", "status_reason")
def setUp(self):
super(TestListCluster, self).setUp()
obj_utils.create_test_cluster_template(self.context)
def test_empty(self):
response = self.get_json('/clusters')
self.assertEqual([], response['clusters'])
def test_one(self):
cluster = obj_utils.create_test_cluster(self.context)
response = self.get_json('/clusters')
self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"])
self._verify_attrs(self._cluster_attrs, response['clusters'][0])
# Verify attrs do not appear from cluster's get_all response
none_attrs = \
set(self._expand_cluster_attrs) - set(self._cluster_attrs)
self._verify_attrs(none_attrs, response['clusters'][0],
positive=False)
def test_get_one(self):
cluster = obj_utils.create_test_cluster(self.context)
response = self.get_json('/clusters/%s' % cluster['uuid'])
self.assertEqual(cluster.uuid, response['uuid'])
self._verify_attrs(self._expand_cluster_attrs, response)
@mock.patch('magnum.common.clients.OpenStackClients.heat')
def test_get_one_failed_cluster(self, mock_heat):
fake_resources = mock.MagicMock()
fake_resources.resource_name = 'fake_name'
fake_resources.resource_status_reason = 'fake_reason'
ht = mock.MagicMock()
ht.resources.list.return_value = [fake_resources]
mock_heat.return_value = ht
cluster = obj_utils.create_test_cluster(self.context,
status='CREATE_FAILED')
response = self.get_json('/clusters/%s' % cluster['uuid'])
self.assertEqual(cluster.uuid, response['uuid'])
self.assertEqual({'fake_name': 'fake_reason'}, response['faults'])
@mock.patch('magnum.common.clients.OpenStackClients.heat')
def test_get_one_failed_cluster_heatclient_exception(self, mock_heat):
mock_heat.resources.list.side_effect = Exception('fake')
cluster = obj_utils.create_test_cluster(self.context,
status='CREATE_FAILED')
response = self.get_json('/clusters/%s' % cluster['uuid'])
self.assertEqual(cluster.uuid, response['uuid'])
self.assertEqual({}, response['faults'])
def test_get_one_by_name(self):
cluster = obj_utils.create_test_cluster(self.context)
response = self.get_json('/clusters/%s' % cluster['name'])
self.assertEqual(cluster.uuid, response['uuid'])
self._verify_attrs(self._expand_cluster_attrs, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/clusters/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_name_multiple_cluster(self):
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
response = self.get_json('/clusters/test_cluster',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_all_with_pagination_marker(self):
cluster_list = []
for id_ in range(4):
temp_uuid = uuidutils.generate_uuid()
cluster = obj_utils.create_test_cluster(self.context, id=id_,
uuid=temp_uuid)
cluster_list.append(cluster)
response = self.get_json('/clusters?limit=3&marker=%s'
% cluster_list[2].uuid)
self.assertEqual(1, len(response['clusters']))
self.assertEqual(cluster_list[-1].uuid,
response['clusters'][0]['uuid'])
def test_detail(self):
cluster = obj_utils.create_test_cluster(self.context)
response = self.get_json('/clusters/detail')
self.assertEqual(cluster.uuid, response['clusters'][0]["uuid"])
self._verify_attrs(self._expand_cluster_attrs,
response['clusters'][0])
def test_detail_with_pagination_marker(self):
cluster_list = []
for id_ in range(4):
temp_uuid = uuidutils.generate_uuid()
cluster = obj_utils.create_test_cluster(self.context, id=id_,
uuid=temp_uuid)
cluster_list.append(cluster)
response = self.get_json('/clusters/detail?limit=3&marker=%s'
% cluster_list[2].uuid)
self.assertEqual(1, len(response['clusters']))
self.assertEqual(cluster_list[-1].uuid,
response['clusters'][0]['uuid'])
self._verify_attrs(self._expand_cluster_attrs,
response['clusters'][0])
def test_detail_against_single(self):
cluster = obj_utils.create_test_cluster(self.context)
response = self.get_json('/clusters/%s/detail' % cluster['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
temp_uuid = uuidutils.generate_uuid()
cluster = obj_utils.create_test_cluster(self.context, id=id_,
uuid=temp_uuid)
bm_list.append(cluster.uuid)
response = self.get_json('/clusters')
self.assertEqual(len(bm_list), len(response['clusters']))
uuids = [b['uuid'] for b in response['clusters']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster(self.context, id=1, uuid=uuid)
response = self.get_json('/clusters/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'],
bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_cluster(self.context, id=id_,
uuid=uuidutils.generate_uuid())
response = self.get_json('/clusters/?limit=3')
self.assertEqual(3, len(response['clusters']))
next_marker = response['clusters'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_cluster(self.context, id=id_,
uuid=uuidutils.generate_uuid())
response = self.get_json('/clusters')
self.assertEqual(3, len(response['clusters']))
next_marker = response['clusters'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
self.cluster_template_obj = obj_utils.create_test_cluster_template(
self.context)
self.cluster_obj = obj_utils.create_test_cluster(
self.context, name='cluster_example_A', node_count=3)
p = mock.patch.object(rpcapi.API, 'bay_update_async')
self.mock_bay_update = p.start()
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
self.addCleanup(p.stop)
def _simulate_rpc_bay_update(self, bay):
bay.save()
return bay
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok(self, mock_utcnow):
new_node_count = 4
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/node_count',
'value': new_node_count,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
self.assertEqual(new_node_count, response['node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
self.assertEqual(self.cluster_obj.baymodel_id,
response['cluster_template_id'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name(self, mock_utcnow):
new_node_count = 4
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clusters/%s' % self.cluster_obj.name,
[{'path': '/node_count',
'value': new_node_count,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
self.assertEqual(new_node_count, response['node_count'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
self.assertEqual(self.cluster_obj.baymodel_id,
response['cluster_template_id'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_not_found(self, mock_utcnow):
name = 'not_found'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clusters/%s' % name,
[{'path': '/name', 'value': name,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
def test_replace_cluster_template_id_failed(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
uuid=uuidutils.generate_uuid())
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/cluster_template_id',
'value': cluster_template.uuid,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_ok_by_name_multiple_cluster(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
response = self.patch_json('/clusters/test_cluster',
[{'path': '/name',
'value': 'test_cluster',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(409, response.status_code)
def test_replace_non_existent_cluster_template_id(self):
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/cluster_template_id',
'value': uuidutils.generate_uuid(),
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_invalid_node_count(self):
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/node_count', 'value': -1,
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_non_existent_cluster(self):
response = self.patch_json('/clusters/%s' %
uuidutils.generate_uuid(),
[{'path': '/name',
'value': 'cluster_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_replace_cluster_name_failed(self):
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/name',
'value': 'cluster_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_add_non_existent_property(self):
response = self.patch_json(
'/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_remove_ok(self):
response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
self.assertIsNotNone(response['name'])
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/node_count',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_code)
response = self.get_json('/clusters/%s' % self.cluster_obj.uuid)
# only allow node_count for cluster, and default value is 1
self.assertEqual(1, response['node_count'])
# Assert nothing else was changed
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
self.assertEqual(self.cluster_obj.baymodel_id,
response['cluster_template_id'])
self.assertEqual(self.cluster_obj.name, response['name'])
self.assertEqual(self.cluster_obj.master_count,
response['master_count'])
def test_remove_uuid(self):
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_remove_cluster_template_id(self):
response = self.patch_json('/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/cluster_template_id',
'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_remove_non_existent_property(self):
response = self.patch_json(
'/clusters/%s' % self.cluster_obj.uuid,
[{'path': '/non-existent', 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
self.cluster_template = obj_utils.create_test_cluster_template(
self.context)
p = mock.patch.object(rpcapi.API, 'bay_create_async')
self.mock_bay_create = p.start()
self.mock_bay_create.side_effect = self._simulate_rpc_bay_create
self.addCleanup(p.stop)
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
def _simulate_rpc_bay_create(self, bay, bay_create_timeout):
bay.create()
return bay
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster(self, mock_utcnow):
bdict = apiutils.cluster_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/clusters', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_set_project_id_and_user_id(self):
bdict = apiutils.cluster_post_data()
def _simulate_rpc_bay_create(bay, bay_create_timeout):
self.assertEqual(self.context.project_id, bay.project_id)
self.assertEqual(self.context.user_id, bay.user_id)
bay.create()
return bay
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
self.post_json('/clusters', bdict)
def test_create_cluster_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_bay',
wraps=self.dbapi.create_bay) as cc_mock:
bdict = apiutils.cluster_post_data(name='cluster_example_A')
response = self.post_json('/clusters', bdict)
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_generate_uuid(self):
bdict = apiutils.cluster_post_data()
del bdict['uuid']
response = self.post_json('/clusters', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_no_cluster_template_id(self):
bdict = apiutils.cluster_post_data()
del bdict['cluster_template_id']
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
def test_create_cluster_with_non_existent_cluster_template_id(self):
temp_uuid = uuidutils.generate_uuid()
bdict = apiutils.cluster_post_data(cluster_template_id=temp_uuid)
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_cluster_template_name(self):
modelname = self.cluster_template.name
bdict = apiutils.cluster_post_data(cluster_template_id=modelname)
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_node_count_zero(self):
bdict = apiutils.cluster_post_data()
bdict['node_count'] = 0
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_node_count_negative(self):
bdict = apiutils.cluster_post_data()
bdict['node_count'] = -1
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_no_node_count(self):
bdict = apiutils.cluster_post_data()
del bdict['node_count']
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_master_count_zero(self):
bdict = apiutils.cluster_post_data()
bdict['master_count'] = 0
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_no_master_count(self):
bdict = apiutils.cluster_post_data()
del bdict['master_count']
response = self.post_json('/clusters', bdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_invalid_long_name(self):
bdict = apiutils.cluster_post_data(name='x' * 243)
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_invalid_integer_name(self):
bdict = apiutils.cluster_post_data(name='123456')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_invalid_integer_str_name(self):
bdict = apiutils.cluster_post_data(name='123456test_cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_hyphen_invalid_at_start_name(self):
bdict = apiutils.cluster_post_data(name='-test_cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_period_invalid_at_start_name(self):
bdict = apiutils.cluster_post_data(name='.test_cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_underscore_invalid_at_start_name(self):
bdict = apiutils.cluster_post_data(name='_test_cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_valid_str_int_name(self):
bdict = apiutils.cluster_post_data(name='test_cluster123456')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_hyphen_valid_name(self):
bdict = apiutils.cluster_post_data(name='test-cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_period_valid_name(self):
bdict = apiutils.cluster_post_data(name='test.cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_period_at_end_valid_name(self):
bdict = apiutils.cluster_post_data(name='testcluster.')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_hyphen_at_end_valid_name(self):
bdict = apiutils.cluster_post_data(name='testcluster-')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_underscore_at_end_valid_name(self):
bdict = apiutils.cluster_post_data(name='testcluster_')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_mix_special_char_valid_name(self):
bdict = apiutils.cluster_post_data(name='test.-_cluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_capital_letter_start_valid_name(self):
bdict = apiutils.cluster_post_data(name='Testcluster')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_invalid_empty_name(self):
bdict = apiutils.cluster_post_data(name='')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_without_name(self):
bdict = apiutils.cluster_post_data()
del bdict['name']
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_timeout_none(self):
bdict = apiutils.cluster_post_data()
bdict['create_timeout'] = None
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_no_timeout(self):
def _simulate_rpc_bay_create(bay, bay_create_timeout):
self.assertEqual(60, bay_create_timeout)
bay.create()
return bay
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
bdict = apiutils.cluster_post_data()
del bdict['create_timeout']
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_timeout_negative(self):
bdict = apiutils.cluster_post_data()
bdict['create_timeout'] = -1
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_create_cluster_with_timeout_zero(self):
bdict = apiutils.cluster_post_data()
bdict['create_timeout'] = 0
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_invalid_flavor(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = exception.FlavorNotFound(
'test-flavor')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(400, response.status_int)
def test_create_cluster_with_invalid_ext_network(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = \
exception.ExternalNetworkNotFound('test-net')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(400, response.status_int)
def test_create_cluster_with_invalid_keypair(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound(
'test-key')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(404, response.status_int)
def test_create_cluster_with_nonexist_image(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = exception.ImageNotFound(
'test-img')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(400, response.status_int)
def test_create_cluster_with_multi_images_same_name(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = exception.Conflict('test-img')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(409, response.status_int)
def test_create_cluster_with_on_os_distro_image(self):
bdict = apiutils.cluster_post_data()
self.mock_valid_os_res.side_effect = \
exception.OSDistroFieldNotFound('img')
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertTrue(self.mock_valid_os_res.called)
self.assertEqual(400, response.status_int)
def test_create_cluster_with_no_lb_one_node(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context, name='foo', uuid='foo', master_lb_enabled=False)
bdict = apiutils.cluster_post_data(
cluster_template_id=cluster_template.name, master_count=1)
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(202, response.status_int)
def test_create_cluster_with_no_lb_multi_node(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context, name='foo', uuid='foo', master_lb_enabled=False)
bdict = apiutils.cluster_post_data(
cluster_template_id=cluster_template.name, master_count=3)
response = self.post_json('/clusters', bdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
self.cluster_template = obj_utils.create_test_cluster_template(
self.context)
self.cluster = obj_utils.create_test_cluster(self.context)
p = mock.patch.object(rpcapi.API, 'bay_delete_async')
self.mock_bay_delete = p.start()
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
self.addCleanup(p.stop)
def _simulate_rpc_bay_delete(self, bay_uuid):
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
bay.destroy()
def test_delete_cluster(self):
self.delete('/clusters/%s' % self.cluster.uuid)
response = self.get_json('/clusters/%s' % self.cluster.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete('/clusters/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_with_name_not_found(self):
response = self.delete('/clusters/not_found', expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_with_name(self):
response = self.delete('/clusters/%s' % self.cluster.name,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_multiple_cluster_by_name(self):
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster(self.context, name='test_cluster',
uuid=uuidutils.generate_uuid())
response = self.delete('/clusters/test_cluster', expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
class TestClusterPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestClusterPolicyEnforcement, self).setUp()
obj_utils.create_test_cluster_template(self.context)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"cluster:get_all", self.get_json, '/clusters', expect_errors=True)
def test_policy_disallow_get_one(self):
self.cluster = obj_utils.create_test_cluster(self.context)
self._common_policy_check(
"cluster:get", self.get_json, '/clusters/%s' % self.cluster.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"cluster:detail", self.get_json,
'/clusters/%s/detail' % uuidutils.generate_uuid(),
expect_errors=True)
def test_policy_disallow_update(self):
self.cluster = obj_utils.create_test_cluster(self.context,
name='cluster_example_A',
node_count=3)
self._common_policy_check(
"cluster:update", self.patch_json, '/clusters/%s' %
self.cluster.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_create(self):
bdict = apiutils.cluster_post_data(name='cluster_example_A')
self._common_policy_check(
"cluster:create", self.post_json, '/clusters', bdict,
expect_errors=True)
def _simulate_rpc_bay_delete(self, bay_uuid):
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
bay.destroy()
def test_policy_disallow_delete(self):
p = mock.patch.object(rpcapi.API, 'bay_delete')
self.mock_bay_delete = p.start()
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
self.addCleanup(p.stop)
self.cluster = obj_utils.create_test_cluster(self.context)
self._common_policy_check(
"cluster:delete", self.delete, '/clusters/%s' %
self.cluster.uuid,
expect_errors=True)
def _owner_check(self, rule, func, *args, **kwargs):
self.policy.set_rules({rule: "user_id:%(user_id)s"})
response = func(*args, **kwargs)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_only_owner_get_one(self):
cluster = obj_utils.create_test_cluster(self.context,
user_id='another')
self._owner_check("cluster:get", self.get_json,
'/clusters/%s' % cluster.uuid,
expect_errors=True)
def test_policy_only_owner_update(self):
cluster = obj_utils.create_test_cluster(self.context,
user_id='another')
self._owner_check(
"cluster:update", self.patch_json,
'/clusters/%s' % cluster.uuid,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_only_owner_delete(self):
cluster = obj_utils.create_test_cluster(self.context,
user_id='another')
self._owner_check("cluster:delete", self.delete,
'/clusters/%s' % cluster.uuid,
expect_errors=True)

File diff suppressed because it is too large Load Diff

View File

@ -18,6 +18,8 @@ import pytz
from magnum.api.controllers.v1 import bay as bay_controller from magnum.api.controllers.v1 import bay as bay_controller
from magnum.api.controllers.v1 import baymodel as baymodel_controller from magnum.api.controllers.v1 import baymodel as baymodel_controller
from magnum.api.controllers.v1 import cluster as cluster_controller
from magnum.api.controllers.v1 import cluster_template as cluster_tmp_ctrl
from magnum.tests.unit.db import utils from magnum.tests.unit.db import utils
@ -33,6 +35,12 @@ def baymodel_post_data(**kw):
return remove_internal(baymodel, internal) return remove_internal(baymodel, internal)
def cluster_template_post_data(**kw):
cluster_template = utils.get_test_baymodel(**kw)
internal = cluster_tmp_ctrl.ClusterTemplatePatchType.internal_attrs()
return remove_internal(cluster_template, internal)
def bay_post_data(**kw): def bay_post_data(**kw):
bay = utils.get_test_bay(**kw) bay = utils.get_test_bay(**kw)
bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15) bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15)
@ -40,6 +48,17 @@ def bay_post_data(**kw):
return remove_internal(bay, internal) return remove_internal(bay, internal)
def cluster_post_data(**kw):
cluster = utils.get_test_bay(**kw)
cluster['create_timeout'] = kw.get('create_timeout', 15)
cluster['cluster_template_id'] = kw.get('cluster_template_id',
cluster['baymodel_id'])
del cluster['bay_create_timeout']
del cluster['baymodel_id']
internal = cluster_controller.ClusterPatchType.internal_attrs()
return remove_internal(cluster, internal)
def cert_post_data(**kw): def cert_post_data(**kw):
return { return {
'bay_uuid': kw.get('bay_uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'), 'bay_uuid': kw.get('bay_uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),

View File

@ -456,7 +456,7 @@ class TestHandler(db_base.DbTestCase):
self.assertEqual(1, self.assertEqual(1,
cert_manager.delete_certificates_from_bay.call_count) cert_manager.delete_certificates_from_bay.call_count)
# The bay has been destroyed # The bay has been destroyed
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
objects.Bay.get, self.context, self.bay.uuid) objects.Bay.get, self.context, self.bay.uuid)
@patch('magnum.conductor.handlers.bay_conductor.cert_manager') @patch('magnum.conductor.handlers.bay_conductor.cert_manager')

View File

@ -34,7 +34,7 @@ class DbBayTestCase(base.DbTestCase):
def test_create_bay_already_exists(self): def test_create_bay_already_exists(self):
utils.create_test_bay() utils.create_test_bay()
self.assertRaises(exception.BayAlreadyExists, self.assertRaises(exception.ClusterAlreadyExists,
utils.create_test_bay) utils.create_test_bay)
def test_get_bay_by_id(self): def test_get_bay_by_id(self):
@ -56,10 +56,10 @@ class DbBayTestCase(base.DbTestCase):
self.assertEqual(bay.uuid, res.uuid) self.assertEqual(bay.uuid, res.uuid)
def test_get_bay_that_does_not_exist(self): def test_get_bay_that_does_not_exist(self):
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
self.dbapi.get_bay_by_id, self.dbapi.get_bay_by_id,
self.context, 999) self.context, 999)
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
self.dbapi.get_bay_by_uuid, self.dbapi.get_bay_by_uuid,
self.context, self.context,
'12345678-9999-0000-aaaa-123456789012') '12345678-9999-0000-aaaa-123456789012')
@ -174,7 +174,7 @@ class DbBayTestCase(base.DbTestCase):
self.assertIsNotNone(self.dbapi.get_bay_by_id(self.context, self.assertIsNotNone(self.dbapi.get_bay_by_id(self.context,
bay.id)) bay.id))
self.dbapi.destroy_bay(bay.id) self.dbapi.destroy_bay(bay.id)
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
self.dbapi.get_bay_by_id, self.dbapi.get_bay_by_id,
self.context, bay.id) self.context, bay.id)
@ -183,12 +183,12 @@ class DbBayTestCase(base.DbTestCase):
self.assertIsNotNone(self.dbapi.get_bay_by_uuid(self.context, self.assertIsNotNone(self.dbapi.get_bay_by_uuid(self.context,
bay.uuid)) bay.uuid))
self.dbapi.destroy_bay(bay.uuid) self.dbapi.destroy_bay(bay.uuid)
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
self.dbapi.get_bay_by_uuid, self.context, self.dbapi.get_bay_by_uuid, self.context,
bay.uuid) bay.uuid)
def test_destroy_bay_that_does_not_exist(self): def test_destroy_bay_that_does_not_exist(self):
self.assertRaises(exception.BayNotFound, self.assertRaises(exception.ClusterNotFound,
self.dbapi.destroy_bay, self.dbapi.destroy_bay,
'12345678-9999-0000-aaaa-123456789012') '12345678-9999-0000-aaaa-123456789012')
@ -202,7 +202,7 @@ class DbBayTestCase(base.DbTestCase):
def test_update_bay_not_found(self): def test_update_bay_not_found(self):
bay_uuid = uuidutils.generate_uuid() bay_uuid = uuidutils.generate_uuid()
self.assertRaises(exception.BayNotFound, self.dbapi.update_bay, self.assertRaises(exception.ClusterNotFound, self.dbapi.update_bay,
bay_uuid, {'node_count': 5}) bay_uuid, {'node_count': 5})
def test_update_bay_uuid(self): def test_update_bay_uuid(self):

View File

@ -100,7 +100,7 @@ class DbBaymodelTestCase(base.DbTestCase):
self.assertEqual(bm['id'], baymodel.id) self.assertEqual(bm['id'], baymodel.id)
def test_get_baymodel_that_does_not_exist(self): def test_get_baymodel_that_does_not_exist(self):
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.get_baymodel_by_id, self.context, 666) self.dbapi.get_baymodel_by_id, self.context, 666)
def test_get_baymodel_by_name(self): def test_get_baymodel_by_name(self):
@ -128,7 +128,7 @@ class DbBaymodelTestCase(base.DbTestCase):
self.context, 'bm') self.context, 'bm')
def test_get_baymodel_by_name_not_found(self): def test_get_baymodel_by_name_not_found(self):
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.get_baymodel_by_name, self.dbapi.get_baymodel_by_name,
self.context, 'not_found') self.context, 'not_found')
@ -138,7 +138,7 @@ class DbBaymodelTestCase(base.DbTestCase):
self.assertEqual('updated-model', res.name) self.assertEqual('updated-model', res.name)
def test_update_baymodel_that_does_not_exist(self): def test_update_baymodel_that_does_not_exist(self):
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.update_baymodel, 666, {'name': ''}) self.dbapi.update_baymodel, 666, {'name': ''})
def test_update_baymodel_uuid(self): def test_update_baymodel_uuid(self):
@ -150,7 +150,7 @@ class DbBaymodelTestCase(base.DbTestCase):
def test_destroy_baymodel(self): def test_destroy_baymodel(self):
bm = utils.create_test_baymodel() bm = utils.create_test_baymodel()
self.dbapi.destroy_baymodel(bm['id']) self.dbapi.destroy_baymodel(bm['id'])
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.get_baymodel_by_id, self.dbapi.get_baymodel_by_id,
self.context, bm['id']) self.context, bm['id'])
@ -160,23 +160,23 @@ class DbBaymodelTestCase(base.DbTestCase):
self.assertIsNotNone(self.dbapi.get_baymodel_by_uuid(self.context, self.assertIsNotNone(self.dbapi.get_baymodel_by_uuid(self.context,
uuid)) uuid))
self.dbapi.destroy_baymodel(uuid) self.dbapi.destroy_baymodel(uuid)
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.get_baymodel_by_uuid, self.context, uuid) self.dbapi.get_baymodel_by_uuid, self.context, uuid)
def test_destroy_baymodel_that_does_not_exist(self): def test_destroy_baymodel_that_does_not_exist(self):
self.assertRaises(exception.BayModelNotFound, self.assertRaises(exception.ClusterTemplateNotFound,
self.dbapi.destroy_baymodel, 666) self.dbapi.destroy_baymodel, 666)
def test_destroy_baymodel_that_referenced_by_bays(self): def test_destroy_baymodel_that_referenced_by_bays(self):
bm = utils.create_test_baymodel() bm = utils.create_test_baymodel()
bay = utils.create_test_bay(baymodel_id=bm['uuid']) bay = utils.create_test_bay(baymodel_id=bm['uuid'])
self.assertEqual(bm['uuid'], bay.baymodel_id) self.assertEqual(bm['uuid'], bay.baymodel_id)
self.assertRaises(exception.BayModelReferenced, self.assertRaises(exception.ClusterTemplateReferenced,
self.dbapi.destroy_baymodel, bm['id']) self.dbapi.destroy_baymodel, bm['id'])
def test_create_baymodel_already_exists(self): def test_create_baymodel_already_exists(self):
uuid = uuidutils.generate_uuid() uuid = uuidutils.generate_uuid()
utils.create_test_baymodel(id=1, uuid=uuid) utils.create_test_baymodel(id=1, uuid=uuid)
self.assertRaises(exception.BayModelAlreadyExists, self.assertRaises(exception.ClusterTemplateAlreadyExists,
utils.create_test_baymodel, utils.create_test_baymodel,
id=2, uuid=uuid) id=2, uuid=uuid)

View File

@ -53,7 +53,7 @@ def create_test_baymodel(context, **kw):
baymodel = get_test_baymodel(context, **kw) baymodel = get_test_baymodel(context, **kw)
try: try:
baymodel.create() baymodel.create()
except exception.BayModelAlreadyExists: except exception.ClusterTemplateAlreadyExists:
baymodel = objects.BayModel.get(context, baymodel.uuid) baymodel = objects.BayModel.get(context, baymodel.uuid)
return baymodel return baymodel
@ -87,6 +87,42 @@ def create_test_bay(context, **kw):
return bay return bay
def get_test_cluster_template(context, **kw):
"""Return a ClusterTemplate object with appropriate attributes.
NOTE: Object model is the same for ClusterTemplate and
BayModel
"""
return get_test_baymodel(context, **kw)
def create_test_cluster_template(context, **kw):
"""Create and return a test ClusterTemplate object.
NOTE: Object model is the same for ClusterTemplate and
BayModel
"""
return create_test_baymodel(context, **kw)
def get_test_cluster(context, **kw):
"""Return a Cluster object with appropriate attributes.
NOTE: Object model is the same for Cluster and
Bay
"""
return get_test_bay(context, **kw)
def create_test_cluster(context, **kw):
"""Create and return a test cluster object.
NOTE: Object model is the same for Cluster and
Bay
"""
return create_test_bay(context, **kw)
def get_test_x509keypair(context, **kw): def get_test_x509keypair(context, **kw):
"""Return a X509KeyPair object with appropriate attributes. """Return a X509KeyPair object with appropriate attributes.