Rename Bay DB, Object, and internal usage to Cluster
This is patch 3 of 3 to change the internal usage of the terms Bay and BayModel. This patch updates Bay to Cluster in DB and Object as well as all the usages. No functionality should be changed by this patch, just naming and db updates. Change-Id: Ife04b0f944ded03ca932d70e09e6766d09cf5d9f Implements: blueprint rename-bay-to-cluster
This commit is contained in:
parent
68463dd005
commit
729c2d0ab4
@ -143,12 +143,12 @@ class Controller(rest.RestController):
|
|||||||
Example:
|
Example:
|
||||||
@base.Controller.api_version("1.1", "1.2")
|
@base.Controller.api_version("1.1", "1.2")
|
||||||
@expose.expose(Cluster, types.uuid_or_name)
|
@expose.expose(Cluster, types.uuid_or_name)
|
||||||
def get_one(self, bay_ident):
|
def get_one(self, cluster_ident):
|
||||||
{...code for versions 1.1 to 1.2...}
|
{...code for versions 1.1 to 1.2...}
|
||||||
|
|
||||||
@base.Controller.api_version("1.3")
|
@base.Controller.api_version("1.3")
|
||||||
@expose.expose(Cluster, types.uuid_or_name)
|
@expose.expose(Cluster, types.uuid_or_name)
|
||||||
def get_one(self, bay_ident):
|
def get_one(self, cluster_ident):
|
||||||
{...code for versions 1.3 to latest}
|
{...code for versions 1.3 to latest}
|
||||||
|
|
||||||
@min_ver: string representing minimum version
|
@min_ver: string representing minimum version
|
||||||
|
@ -28,7 +28,7 @@ from magnum.api.controllers.v1 import collection
|
|||||||
from magnum.api.controllers.v1 import types
|
from magnum.api.controllers.v1 import types
|
||||||
from magnum.api import expose
|
from magnum.api import expose
|
||||||
from magnum.api import utils as api_utils
|
from magnum.api import utils as api_utils
|
||||||
from magnum.api.validation import validate_bay_properties
|
from magnum.api.validation import validate_cluster_properties
|
||||||
from magnum.common import clients
|
from magnum.common import clients
|
||||||
from magnum.common import exception
|
from magnum.common import exception
|
||||||
from magnum.common import name_generator
|
from magnum.common import name_generator
|
||||||
@ -66,7 +66,7 @@ class Bay(base.APIBase):
|
|||||||
self._baymodel_id = baymodel.uuid
|
self._baymodel_id = baymodel.uuid
|
||||||
except exception.ClusterTemplateNotFound as e:
|
except exception.ClusterTemplateNotFound as e:
|
||||||
# Change error code because 404 (NotFound) is inappropriate
|
# Change error code because 404 (NotFound) is inappropriate
|
||||||
# response for a POST request to create a Bay
|
# response for a POST request to create a Cluster
|
||||||
e.code = 400 # BadRequest
|
e.code = 400 # BadRequest
|
||||||
raise
|
raise
|
||||||
elif value == wtypes.Unset:
|
elif value == wtypes.Unset:
|
||||||
@ -99,7 +99,7 @@ class Bay(base.APIBase):
|
|||||||
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
||||||
"""Stack id of the heat stack"""
|
"""Stack id of the heat stack"""
|
||||||
|
|
||||||
status = wtypes.Enum(str, *fields.BayStatus.ALL)
|
status = wtypes.Enum(str, *fields.ClusterStatus.ALL)
|
||||||
"""Status of the bay from the heat stack"""
|
"""Status of the bay from the heat stack"""
|
||||||
|
|
||||||
status_reason = wtypes.text
|
status_reason = wtypes.text
|
||||||
@ -131,13 +131,43 @@ class Bay(base.APIBase):
|
|||||||
super(Bay, self).__init__()
|
super(Bay, self).__init__()
|
||||||
|
|
||||||
self.fields = []
|
self.fields = []
|
||||||
for field in objects.Bay.fields:
|
for field in objects.Cluster.fields:
|
||||||
# Skip fields we do not expose.
|
# Skip fields we do not expose.
|
||||||
if not hasattr(self, field):
|
if not hasattr(self, field):
|
||||||
continue
|
continue
|
||||||
self.fields.append(field)
|
self.fields.append(field)
|
||||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||||
|
|
||||||
|
# Set the renamed attributes for bay backwards compatibility
|
||||||
|
self.fields.append('baymodel_id')
|
||||||
|
if 'baymodel_id' in kwargs.keys():
|
||||||
|
setattr(self, 'cluster_template_id',
|
||||||
|
kwargs.get('baymodel_id', None))
|
||||||
|
setattr(self, 'baymodel_id',
|
||||||
|
kwargs.get('baymodel_id', None))
|
||||||
|
else:
|
||||||
|
setattr(self, 'baymodel_id', kwargs.get('cluster_template_id',
|
||||||
|
None))
|
||||||
|
|
||||||
|
self.fields.append('bay_create_timeout')
|
||||||
|
if 'bay_create_timeout' in kwargs.keys():
|
||||||
|
setattr(self, 'create_timeout',
|
||||||
|
kwargs.get('bay_create_timeout', wtypes.Unset))
|
||||||
|
setattr(self, 'bay_create_timeout',
|
||||||
|
kwargs.get('bay_create_timeout', wtypes.Unset))
|
||||||
|
else:
|
||||||
|
setattr(self, 'bay_create_timeout', kwargs.get('create_timeout',
|
||||||
|
wtypes.Unset))
|
||||||
|
|
||||||
|
self.fields.append('bay_faults')
|
||||||
|
if 'bay_faults' in kwargs.keys():
|
||||||
|
setattr(self, 'faults',
|
||||||
|
kwargs.get('bay_faults', wtypes.Unset))
|
||||||
|
setattr(self, 'bay_faults',
|
||||||
|
kwargs.get('bay_faults', wtypes.Unset))
|
||||||
|
else:
|
||||||
|
setattr(self, 'bay_faults', kwargs.get('faults', wtypes.Unset))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _convert_with_links(bay, url, expand=True):
|
def _convert_with_links(bay, url, expand=True):
|
||||||
if not expand:
|
if not expand:
|
||||||
@ -167,7 +197,7 @@ class Bay(base.APIBase):
|
|||||||
master_count=1,
|
master_count=1,
|
||||||
bay_create_timeout=15,
|
bay_create_timeout=15,
|
||||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||||
status=fields.BayStatus.CREATE_COMPLETE,
|
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
status_reason="CREATE completed successfully",
|
status_reason="CREATE completed successfully",
|
||||||
api_address='172.24.4.3',
|
api_address='172.24.4.3',
|
||||||
node_addresses=['172.24.4.4', '172.24.4.5'],
|
node_addresses=['172.24.4.4', '172.24.4.5'],
|
||||||
@ -177,6 +207,24 @@ class Bay(base.APIBase):
|
|||||||
container_version=None)
|
container_version=None)
|
||||||
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
||||||
|
|
||||||
|
def as_dict(self):
|
||||||
|
"""Render this object as a dict of its fields."""
|
||||||
|
|
||||||
|
# Override this for old bay values
|
||||||
|
d = super(Bay, self).as_dict()
|
||||||
|
|
||||||
|
d['cluster_template_id'] = d['baymodel_id']
|
||||||
|
del d['baymodel_id']
|
||||||
|
|
||||||
|
d['create_timeout'] = d['bay_create_timeout']
|
||||||
|
del d['bay_create_timeout']
|
||||||
|
|
||||||
|
if 'bay_faults' in d.keys():
|
||||||
|
d['faults'] = d['bay_faults']
|
||||||
|
del d['bay_faults']
|
||||||
|
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
class BayPatchType(types.JsonPatchType):
|
class BayPatchType(types.JsonPatchType):
|
||||||
_api_base = Bay
|
_api_base = Bay
|
||||||
@ -239,10 +287,10 @@ class BaysController(base.Controller):
|
|||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
|
marker_obj = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||||
marker)
|
marker)
|
||||||
|
|
||||||
bays = objects.Bay.list(pecan.request.context, limit,
|
bays = objects.Cluster.list(pecan.request.context, limit,
|
||||||
marker_obj, sort_key=sort_key,
|
marker_obj, sort_key=sort_key,
|
||||||
sort_dir=sort_dir)
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
@ -323,13 +371,13 @@ class BaysController(base.Controller):
|
|||||||
:param bay_ident: UUID of a bay or logical name of the bay.
|
:param bay_ident: UUID of a bay or logical name of the bay.
|
||||||
"""
|
"""
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
bay = api_utils.get_resource('Bay', bay_ident)
|
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||||
policy.enforce(context, 'bay:get', bay,
|
policy.enforce(context, 'bay:get', bay,
|
||||||
action='bay:get')
|
action='bay:get')
|
||||||
|
|
||||||
bay = Bay.convert_with_links(bay)
|
bay = Bay.convert_with_links(bay)
|
||||||
|
|
||||||
if bay.status in fields.BayStatus.STATUS_FAILED:
|
if bay.status in fields.ClusterStatus.STATUS_FAILED:
|
||||||
bay.bay_faults = self._collect_fault_info(context, bay)
|
bay.bay_faults = self._collect_fault_info(context, bay)
|
||||||
|
|
||||||
return bay
|
return bay
|
||||||
@ -342,7 +390,7 @@ class BaysController(base.Controller):
|
|||||||
:param bay: a bay within the request body.
|
:param bay: a bay within the request body.
|
||||||
"""
|
"""
|
||||||
new_bay = self._post(bay)
|
new_bay = self._post(bay)
|
||||||
res_bay = pecan.request.rpcapi.bay_create(new_bay,
|
res_bay = pecan.request.rpcapi.cluster_create(new_bay,
|
||||||
bay.bay_create_timeout)
|
bay.bay_create_timeout)
|
||||||
|
|
||||||
# Set the HTTP Location Header
|
# Set the HTTP Location Header
|
||||||
@ -357,7 +405,8 @@ class BaysController(base.Controller):
|
|||||||
:param bay: a bay within the request body.
|
:param bay: a bay within the request body.
|
||||||
"""
|
"""
|
||||||
new_bay = self._post(bay)
|
new_bay = self._post(bay)
|
||||||
pecan.request.rpcapi.bay_create_async(new_bay, bay.bay_create_timeout)
|
pecan.request.rpcapi.cluster_create_async(new_bay,
|
||||||
|
bay.bay_create_timeout)
|
||||||
return BayID(new_bay.uuid)
|
return BayID(new_bay.uuid)
|
||||||
|
|
||||||
def _post(self, bay):
|
def _post(self, bay):
|
||||||
@ -378,7 +427,7 @@ class BaysController(base.Controller):
|
|||||||
bay_dict['coe_version'] = None
|
bay_dict['coe_version'] = None
|
||||||
bay_dict['container_version'] = None
|
bay_dict['container_version'] = None
|
||||||
|
|
||||||
new_bay = objects.Bay(context, **bay_dict)
|
new_bay = objects.Cluster(context, **bay_dict)
|
||||||
new_bay.uuid = uuid.uuid4()
|
new_bay.uuid = uuid.uuid4()
|
||||||
return new_bay
|
return new_bay
|
||||||
|
|
||||||
@ -392,7 +441,7 @@ class BaysController(base.Controller):
|
|||||||
:param patch: a json PATCH document to apply to this bay.
|
:param patch: a json PATCH document to apply to this bay.
|
||||||
"""
|
"""
|
||||||
bay = self._patch(bay_ident, patch)
|
bay = self._patch(bay_ident, patch)
|
||||||
res_bay = pecan.request.rpcapi.bay_update(bay)
|
res_bay = pecan.request.rpcapi.cluster_update(bay)
|
||||||
return Bay.convert_with_links(res_bay)
|
return Bay.convert_with_links(res_bay)
|
||||||
|
|
||||||
@base.Controller.api_version("1.2", "1.2") # noqa
|
@base.Controller.api_version("1.2", "1.2") # noqa
|
||||||
@ -406,7 +455,7 @@ class BaysController(base.Controller):
|
|||||||
:param patch: a json PATCH document to apply to this bay.
|
:param patch: a json PATCH document to apply to this bay.
|
||||||
"""
|
"""
|
||||||
bay = self._patch(bay_ident, patch)
|
bay = self._patch(bay_ident, patch)
|
||||||
pecan.request.rpcapi.bay_update_async(bay)
|
pecan.request.rpcapi.cluster_update_async(bay)
|
||||||
return BayID(bay.uuid)
|
return BayID(bay.uuid)
|
||||||
|
|
||||||
@base.Controller.api_version("1.3") # noqa
|
@base.Controller.api_version("1.3") # noqa
|
||||||
@ -421,12 +470,12 @@ class BaysController(base.Controller):
|
|||||||
:param patch: a json PATCH document to apply to this bay.
|
:param patch: a json PATCH document to apply to this bay.
|
||||||
"""
|
"""
|
||||||
bay = self._patch(bay_ident, patch)
|
bay = self._patch(bay_ident, patch)
|
||||||
pecan.request.rpcapi.bay_update_async(bay, rollback=rollback)
|
pecan.request.rpcapi.cluster_update_async(bay, rollback=rollback)
|
||||||
return BayID(bay.uuid)
|
return BayID(bay.uuid)
|
||||||
|
|
||||||
def _patch(self, bay_ident, patch):
|
def _patch(self, bay_ident, patch):
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
bay = api_utils.get_resource('Bay', bay_ident)
|
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||||
policy.enforce(context, 'bay:update', bay,
|
policy.enforce(context, 'bay:update', bay,
|
||||||
action='bay:update')
|
action='bay:update')
|
||||||
try:
|
try:
|
||||||
@ -436,7 +485,7 @@ class BaysController(base.Controller):
|
|||||||
raise exception.PatchError(patch=patch, reason=e)
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
# Update only the fields that have changed
|
# Update only the fields that have changed
|
||||||
for field in objects.Bay.fields:
|
for field in objects.Cluster.fields:
|
||||||
try:
|
try:
|
||||||
patch_val = getattr(new_bay, field)
|
patch_val = getattr(new_bay, field)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -449,7 +498,7 @@ class BaysController(base.Controller):
|
|||||||
|
|
||||||
delta = bay.obj_what_changed()
|
delta = bay.obj_what_changed()
|
||||||
|
|
||||||
validate_bay_properties(delta)
|
validate_cluster_properties(delta)
|
||||||
return bay
|
return bay
|
||||||
|
|
||||||
@base.Controller.api_version("1.1", "1.1")
|
@base.Controller.api_version("1.1", "1.1")
|
||||||
@ -461,7 +510,7 @@ class BaysController(base.Controller):
|
|||||||
"""
|
"""
|
||||||
bay = self._delete(bay_ident)
|
bay = self._delete(bay_ident)
|
||||||
|
|
||||||
pecan.request.rpcapi.bay_delete(bay.uuid)
|
pecan.request.rpcapi.cluster_delete(bay.uuid)
|
||||||
|
|
||||||
@base.Controller.api_version("1.2") # noqa
|
@base.Controller.api_version("1.2") # noqa
|
||||||
@expose.expose(None, types.uuid_or_name, status_code=204)
|
@expose.expose(None, types.uuid_or_name, status_code=204)
|
||||||
@ -472,11 +521,11 @@ class BaysController(base.Controller):
|
|||||||
"""
|
"""
|
||||||
bay = self._delete(bay_ident)
|
bay = self._delete(bay_ident)
|
||||||
|
|
||||||
pecan.request.rpcapi.bay_delete_async(bay.uuid)
|
pecan.request.rpcapi.cluster_delete_async(bay.uuid)
|
||||||
|
|
||||||
def _delete(self, bay_ident):
|
def _delete(self, bay_ident):
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
bay = api_utils.get_resource('Bay', bay_ident)
|
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||||
policy.enforce(context, 'bay:delete', bay,
|
policy.enforce(context, 'bay:delete', bay,
|
||||||
action='bay:delete')
|
action='bay:delete')
|
||||||
return bay
|
return bay
|
||||||
|
@ -46,7 +46,7 @@ class BayModel(base.APIBase):
|
|||||||
name = wtypes.StringType(min_length=1, max_length=255)
|
name = wtypes.StringType(min_length=1, max_length=255)
|
||||||
"""The name of the Baymodel"""
|
"""The name of the Baymodel"""
|
||||||
|
|
||||||
coe = wtypes.Enum(str, *fields.BayType.ALL, mandatory=True)
|
coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True)
|
||||||
"""The Container Orchestration Engine for this bay model"""
|
"""The Container Orchestration Engine for this bay model"""
|
||||||
|
|
||||||
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
||||||
@ -181,7 +181,7 @@ class BayModel(base.APIBase):
|
|||||||
docker_volume_size=25,
|
docker_volume_size=25,
|
||||||
docker_storage_driver='devicemapper',
|
docker_storage_driver='devicemapper',
|
||||||
cluster_distro='fedora-atomic',
|
cluster_distro='fedora-atomic',
|
||||||
coe=fields.BayType.KUBERNETES,
|
coe=fields.ClusterType.KUBERNETES,
|
||||||
http_proxy='http://proxy.com:123',
|
http_proxy='http://proxy.com:123',
|
||||||
https_proxy='https://proxy.com:123',
|
https_proxy='https://proxy.com:123',
|
||||||
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
||||||
|
@ -46,11 +46,11 @@ class Certificate(base.APIBase):
|
|||||||
def _set_cluster_uuid(self, value):
|
def _set_cluster_uuid(self, value):
|
||||||
if value and self._cluster_uuid != value:
|
if value and self._cluster_uuid != value:
|
||||||
try:
|
try:
|
||||||
self._cluster = api_utils.get_resource('Bay', value)
|
self._cluster = api_utils.get_resource('Cluster', value)
|
||||||
self._cluster_uuid = self._cluster.uuid
|
self._cluster_uuid = self._cluster.uuid
|
||||||
except exception.ClusterNotFound as e:
|
except exception.ClusterNotFound as e:
|
||||||
# Change error code because 404 (NotFound) is inappropriate
|
# Change error code because 404 (NotFound) is inappropriate
|
||||||
# response for a POST request to create a Bay
|
# response for a POST request to create a Cluster
|
||||||
e.code = 400 # BadRequest
|
e.code = 400 # BadRequest
|
||||||
raise
|
raise
|
||||||
elif value == wtypes.Unset:
|
elif value == wtypes.Unset:
|
||||||
@ -90,7 +90,8 @@ class Certificate(base.APIBase):
|
|||||||
|
|
||||||
def get_cluster(self):
|
def get_cluster(self):
|
||||||
if not self._cluster:
|
if not self._cluster:
|
||||||
self._cluster = api_utils.get_resource('Bay', self.cluster_uuid)
|
self._cluster = api_utils.get_resource('Cluster',
|
||||||
|
self.cluster_uuid)
|
||||||
return self._cluster
|
return self._cluster
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -141,7 +142,7 @@ class CertificateController(base.Controller):
|
|||||||
logical name of the cluster.
|
logical name of the cluster.
|
||||||
"""
|
"""
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||||
policy.enforce(context, 'certificate:get', cluster,
|
policy.enforce(context, 'certificate:get', cluster,
|
||||||
action='certificate:get')
|
action='certificate:get')
|
||||||
certificate = pecan.request.rpcapi.get_ca_certificate(cluster)
|
certificate = pecan.request.rpcapi.get_ca_certificate(cluster)
|
||||||
|
@ -28,7 +28,7 @@ from magnum.api.controllers.v1 import collection
|
|||||||
from magnum.api.controllers.v1 import types
|
from magnum.api.controllers.v1 import types
|
||||||
from magnum.api import expose
|
from magnum.api import expose
|
||||||
from magnum.api import utils as api_utils
|
from magnum.api import utils as api_utils
|
||||||
from magnum.api.validation import validate_bay_properties
|
from magnum.api.validation import validate_cluster_properties
|
||||||
from magnum.common import clients
|
from magnum.common import clients
|
||||||
from magnum.common import exception
|
from magnum.common import exception
|
||||||
from magnum.common import name_generator
|
from magnum.common import name_generator
|
||||||
@ -110,7 +110,7 @@ class Cluster(base.APIBase):
|
|||||||
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
||||||
"""Stack id of the heat stack"""
|
"""Stack id of the heat stack"""
|
||||||
|
|
||||||
status = wtypes.Enum(str, *fields.BayStatus.ALL)
|
status = wtypes.Enum(str, *fields.ClusterStatus.ALL)
|
||||||
"""Status of the cluster from the heat stack"""
|
"""Status of the cluster from the heat stack"""
|
||||||
|
|
||||||
status_reason = wtypes.text
|
status_reason = wtypes.text
|
||||||
@ -141,36 +141,13 @@ class Cluster(base.APIBase):
|
|||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(Cluster, self).__init__()
|
super(Cluster, self).__init__()
|
||||||
self.fields = []
|
self.fields = []
|
||||||
for field in objects.Bay.fields:
|
for field in objects.Cluster.fields:
|
||||||
# Skip fields we do not expose.
|
# Skip fields we do not expose.
|
||||||
if not hasattr(self, field):
|
if not hasattr(self, field):
|
||||||
continue
|
continue
|
||||||
self.fields.append(field)
|
self.fields.append(field)
|
||||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||||
|
|
||||||
# Set the renamed attributes for clusters
|
|
||||||
self.fields.append('cluster_template_id')
|
|
||||||
if 'cluster_template_id' in kwargs.keys():
|
|
||||||
setattr(self, 'cluster_template_id',
|
|
||||||
kwargs.get('cluster_template_id', wtypes.Unset))
|
|
||||||
else:
|
|
||||||
setattr(self, 'cluster_template_id', kwargs.get('baymodel_id',
|
|
||||||
wtypes.Unset))
|
|
||||||
|
|
||||||
self.fields.append('create_timeout')
|
|
||||||
if 'create_timeout' in kwargs.keys():
|
|
||||||
setattr(self, 'create_timeout', kwargs.get('create_timeout',
|
|
||||||
wtypes.Unset))
|
|
||||||
else:
|
|
||||||
setattr(self, 'create_timeout', kwargs.get('bay_create_timeout',
|
|
||||||
wtypes.Unset))
|
|
||||||
|
|
||||||
self.fields.append('faults')
|
|
||||||
if 'faults' in kwargs.keys():
|
|
||||||
setattr(self, 'faults', kwargs.get('faults', wtypes.Unset))
|
|
||||||
else:
|
|
||||||
setattr(self, 'faults', kwargs.get('bay_faults', wtypes.Unset))
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _convert_with_links(cluster, url, expand=True):
|
def _convert_with_links(cluster, url, expand=True):
|
||||||
if not expand:
|
if not expand:
|
||||||
@ -180,9 +157,9 @@ class Cluster(base.APIBase):
|
|||||||
'stack_id'])
|
'stack_id'])
|
||||||
|
|
||||||
cluster.links = [link.Link.make_link('self', url,
|
cluster.links = [link.Link.make_link('self', url,
|
||||||
'bays', cluster.uuid),
|
'clusters', cluster.uuid),
|
||||||
link.Link.make_link('bookmark', url,
|
link.Link.make_link('bookmark', url,
|
||||||
'bays', cluster.uuid,
|
'clusters', cluster.uuid,
|
||||||
bookmark=True)]
|
bookmark=True)]
|
||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
@ -201,7 +178,7 @@ class Cluster(base.APIBase):
|
|||||||
master_count=1,
|
master_count=1,
|
||||||
create_timeout=15,
|
create_timeout=15,
|
||||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||||
status=fields.BayStatus.CREATE_COMPLETE,
|
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
status_reason="CREATE completed successfully",
|
status_reason="CREATE completed successfully",
|
||||||
api_address='172.24.4.3',
|
api_address='172.24.4.3',
|
||||||
node_addresses=['172.24.4.4', '172.24.4.5'],
|
node_addresses=['172.24.4.4', '172.24.4.5'],
|
||||||
@ -211,26 +188,6 @@ class Cluster(base.APIBase):
|
|||||||
container_version=None)
|
container_version=None)
|
||||||
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
||||||
|
|
||||||
def as_dict(self):
|
|
||||||
"""Render this object as a dict of its fields."""
|
|
||||||
|
|
||||||
# Override this for updated cluster values
|
|
||||||
d = super(Cluster, self).as_dict()
|
|
||||||
|
|
||||||
if 'cluster_template_id' in d.keys():
|
|
||||||
d['baymodel_id'] = d['cluster_template_id']
|
|
||||||
del d['cluster_template_id']
|
|
||||||
|
|
||||||
if 'create_timeout' in d.keys():
|
|
||||||
d['bay_create_timeout'] = d['create_timeout']
|
|
||||||
del d['create_timeout']
|
|
||||||
|
|
||||||
if 'faults' in d.keys():
|
|
||||||
d['bay_faults'] = d['faults']
|
|
||||||
del d['faults']
|
|
||||||
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
class ClusterPatchType(types.JsonPatchType):
|
class ClusterPatchType(types.JsonPatchType):
|
||||||
_api_base = Cluster
|
_api_base = Cluster
|
||||||
@ -295,10 +252,10 @@ class ClustersController(base.Controller):
|
|||||||
|
|
||||||
marker_obj = None
|
marker_obj = None
|
||||||
if marker:
|
if marker:
|
||||||
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
|
marker_obj = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||||
marker)
|
marker)
|
||||||
|
|
||||||
clusters = objects.Bay.list(pecan.request.context, limit,
|
clusters = objects.Cluster.list(pecan.request.context, limit,
|
||||||
marker_obj, sort_key=sort_key,
|
marker_obj, sort_key=sort_key,
|
||||||
sort_dir=sort_dir)
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
@ -380,13 +337,13 @@ class ClustersController(base.Controller):
|
|||||||
:param cluster_ident: UUID or logical name of the Cluster.
|
:param cluster_ident: UUID or logical name of the Cluster.
|
||||||
"""
|
"""
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||||
policy.enforce(context, 'cluster:get', cluster,
|
policy.enforce(context, 'cluster:get', cluster,
|
||||||
action='cluster:get')
|
action='cluster:get')
|
||||||
|
|
||||||
cluster = Cluster.convert_with_links(cluster)
|
cluster = Cluster.convert_with_links(cluster)
|
||||||
|
|
||||||
if cluster.status in fields.BayStatus.STATUS_FAILED:
|
if cluster.status in fields.ClusterStatus.STATUS_FAILED:
|
||||||
cluster.faults = self._collect_fault_info(context, cluster)
|
cluster.faults = self._collect_fault_info(context, cluster)
|
||||||
|
|
||||||
return cluster
|
return cluster
|
||||||
@ -420,9 +377,9 @@ class ClustersController(base.Controller):
|
|||||||
cluster_dict['coe_version'] = None
|
cluster_dict['coe_version'] = None
|
||||||
cluster_dict['container_version'] = None
|
cluster_dict['container_version'] = None
|
||||||
|
|
||||||
new_cluster = objects.Bay(context, **cluster_dict)
|
new_cluster = objects.Cluster(context, **cluster_dict)
|
||||||
new_cluster.uuid = uuid.uuid4()
|
new_cluster.uuid = uuid.uuid4()
|
||||||
pecan.request.rpcapi.bay_create_async(new_cluster,
|
pecan.request.rpcapi.cluster_create_async(new_cluster,
|
||||||
cluster.create_timeout)
|
cluster.create_timeout)
|
||||||
|
|
||||||
return ClusterID(new_cluster.uuid)
|
return ClusterID(new_cluster.uuid)
|
||||||
@ -438,7 +395,7 @@ class ClustersController(base.Controller):
|
|||||||
:param patch: a json PATCH document to apply to this cluster.
|
:param patch: a json PATCH document to apply to this cluster.
|
||||||
"""
|
"""
|
||||||
cluster = self._patch(cluster_ident, patch)
|
cluster = self._patch(cluster_ident, patch)
|
||||||
pecan.request.rpcapi.bay_update_async(cluster)
|
pecan.request.rpcapi.cluster_update_async(cluster)
|
||||||
return ClusterID(cluster.uuid)
|
return ClusterID(cluster.uuid)
|
||||||
|
|
||||||
@base.Controller.api_version("1.3") # noqa
|
@base.Controller.api_version("1.3") # noqa
|
||||||
@ -453,12 +410,12 @@ class ClustersController(base.Controller):
|
|||||||
:param patch: a json PATCH document to apply to this cluster.
|
:param patch: a json PATCH document to apply to this cluster.
|
||||||
"""
|
"""
|
||||||
cluster = self._patch(cluster_ident, patch)
|
cluster = self._patch(cluster_ident, patch)
|
||||||
pecan.request.rpcapi.bay_update_async(cluster, rollback)
|
pecan.request.rpcapi.cluster_update_async(cluster, rollback)
|
||||||
return ClusterID(cluster.uuid)
|
return ClusterID(cluster.uuid)
|
||||||
|
|
||||||
def _patch(self, cluster_ident, patch):
|
def _patch(self, cluster_ident, patch):
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||||
policy.enforce(context, 'cluster:update', cluster,
|
policy.enforce(context, 'cluster:update', cluster,
|
||||||
action='cluster:update')
|
action='cluster:update')
|
||||||
try:
|
try:
|
||||||
@ -469,7 +426,7 @@ class ClustersController(base.Controller):
|
|||||||
raise exception.PatchError(patch=patch, reason=e)
|
raise exception.PatchError(patch=patch, reason=e)
|
||||||
|
|
||||||
# Update only the fields that have changed
|
# Update only the fields that have changed
|
||||||
for field in objects.Bay.fields:
|
for field in objects.Cluster.fields:
|
||||||
try:
|
try:
|
||||||
patch_val = getattr(new_cluster, field)
|
patch_val = getattr(new_cluster, field)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
@ -482,7 +439,7 @@ class ClustersController(base.Controller):
|
|||||||
|
|
||||||
delta = cluster.obj_what_changed()
|
delta = cluster.obj_what_changed()
|
||||||
|
|
||||||
validate_bay_properties(delta)
|
validate_cluster_properties(delta)
|
||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
@expose.expose(None, types.uuid_or_name, status_code=204)
|
@expose.expose(None, types.uuid_or_name, status_code=204)
|
||||||
@ -492,8 +449,8 @@ class ClustersController(base.Controller):
|
|||||||
:param cluster_ident: UUID of cluster or logical name of the cluster.
|
:param cluster_ident: UUID of cluster or logical name of the cluster.
|
||||||
"""
|
"""
|
||||||
context = pecan.request.context
|
context = pecan.request.context
|
||||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||||
policy.enforce(context, 'cluster:delete', cluster,
|
policy.enforce(context, 'cluster:delete', cluster,
|
||||||
action='cluster:delete')
|
action='cluster:delete')
|
||||||
|
|
||||||
pecan.request.rpcapi.bay_delete_async(cluster.uuid)
|
pecan.request.rpcapi.cluster_delete_async(cluster.uuid)
|
||||||
|
@ -47,7 +47,7 @@ class ClusterTemplate(base.APIBase):
|
|||||||
name = wtypes.StringType(min_length=1, max_length=255)
|
name = wtypes.StringType(min_length=1, max_length=255)
|
||||||
"""The name of the ClusterTemplate"""
|
"""The name of the ClusterTemplate"""
|
||||||
|
|
||||||
coe = wtypes.Enum(str, *fields.BayType.ALL, mandatory=True)
|
coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True)
|
||||||
"""The Container Orchestration Engine for this clustertemplate"""
|
"""The Container Orchestration Engine for this clustertemplate"""
|
||||||
|
|
||||||
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
||||||
@ -185,7 +185,7 @@ class ClusterTemplate(base.APIBase):
|
|||||||
docker_volume_size=25,
|
docker_volume_size=25,
|
||||||
docker_storage_driver='devicemapper',
|
docker_storage_driver='devicemapper',
|
||||||
cluster_distro='fedora-atomic',
|
cluster_distro='fedora-atomic',
|
||||||
coe=fields.BayType.KUBERNETES,
|
coe=fields.ClusterType.KUBERNETES,
|
||||||
http_proxy='http://proxy.com:123',
|
http_proxy='http://proxy.com:123',
|
||||||
https_proxy='https://proxy.com:123',
|
https_proxy='https://proxy.com:123',
|
||||||
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
||||||
|
@ -65,32 +65,35 @@ cluster_template_opts = [
|
|||||||
cfg.CONF.register_opts(cluster_template_opts, group='cluster_template')
|
cfg.CONF.register_opts(cluster_template_opts, group='cluster_template')
|
||||||
|
|
||||||
|
|
||||||
bay_update_allowed_properties = set(['node_count'])
|
cluster_update_allowed_properties = set(['node_count'])
|
||||||
|
|
||||||
|
|
||||||
def enforce_bay_types(*bay_types):
|
def enforce_cluster_types(*cluster_types):
|
||||||
"""Enforce that bay_type is in supported list."""
|
"""Enforce that cluster_type is in supported list."""
|
||||||
@decorator.decorator
|
@decorator.decorator
|
||||||
def wrapper(func, *args, **kwargs):
|
def wrapper(func, *args, **kwargs):
|
||||||
# Note(eliqiao): This decorator has some assumptions
|
# Note(eliqiao): This decorator has some assumptions
|
||||||
# args[1] should be an APIBase instance or
|
# args[1] should be an APIBase instance or
|
||||||
# args[2] should be a bay_ident
|
# args[2] should be a cluster_ident
|
||||||
obj = args[1]
|
obj = args[1]
|
||||||
if hasattr(obj, 'bay_uuid'):
|
if hasattr(obj, 'cluster_uuid'):
|
||||||
bay = objects.Bay.get_by_uuid(pecan.request.context, obj.bay_uuid)
|
cluster = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||||
|
obj.cluster_uuid)
|
||||||
else:
|
else:
|
||||||
bay_ident = args[2]
|
cluster_ident = args[2]
|
||||||
if uuidutils.is_uuid_like(bay_ident):
|
if uuidutils.is_uuid_like(cluster_ident):
|
||||||
bay = objects.Bay.get_by_uuid(pecan.request.context, bay_ident)
|
cluster = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||||
|
cluster_ident)
|
||||||
else:
|
else:
|
||||||
bay = objects.Bay.get_by_name(pecan.request.context, bay_ident)
|
cluster = objects.Cluster.get_by_name(pecan.request.context,
|
||||||
|
cluster_ident)
|
||||||
|
|
||||||
if bay.cluster_template.coe not in bay_types:
|
if cluster.cluster_template.coe not in cluster_types:
|
||||||
raise exception.InvalidParameterValue(_(
|
raise exception.InvalidParameterValue(_(
|
||||||
'Cannot fulfill request with a %(bay_type)s bay, '
|
'Cannot fulfill request with a %(cluster_type)s cluster, '
|
||||||
'expecting a %(supported_bay_types)s bay.') %
|
'expecting a %(supported_cluster_types)s cluster.') %
|
||||||
{'bay_type': bay.cluster_template.coe,
|
{'cluster_type': cluster.cluster_template.coe,
|
||||||
'supported_bay_types': '/'.join(bay_types)})
|
'supported_cluster_types': '/'.join(cluster_types)})
|
||||||
|
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
@ -192,11 +195,11 @@ def _enforce_volume_storage_size(cluster_template):
|
|||||||
'driver.') % (volume_size, storage_driver)
|
'driver.') % (volume_size, storage_driver)
|
||||||
|
|
||||||
|
|
||||||
def validate_bay_properties(delta):
|
def validate_cluster_properties(delta):
|
||||||
|
|
||||||
update_disallowed_properties = delta - bay_update_allowed_properties
|
update_disallowed_properties = delta - cluster_update_allowed_properties
|
||||||
if update_disallowed_properties:
|
if update_disallowed_properties:
|
||||||
err = (_("cannot change bay property(ies) %s.") %
|
err = (_("cannot change cluster property(ies) %s.") %
|
||||||
", ".join(update_disallowed_properties))
|
", ".join(update_disallowed_properties))
|
||||||
raise exception.InvalidParameterValue(err=err)
|
raise exception.InvalidParameterValue(err=err)
|
||||||
|
|
||||||
|
@ -25,8 +25,8 @@ from oslo_service import service
|
|||||||
from magnum.common import rpc_service
|
from magnum.common import rpc_service
|
||||||
from magnum.common import service as magnum_service
|
from magnum.common import service as magnum_service
|
||||||
from magnum.common import short_id
|
from magnum.common import short_id
|
||||||
from magnum.conductor.handlers import bay_conductor
|
|
||||||
from magnum.conductor.handlers import ca_conductor
|
from magnum.conductor.handlers import ca_conductor
|
||||||
|
from magnum.conductor.handlers import cluster_conductor
|
||||||
from magnum.conductor.handlers import conductor_listener
|
from magnum.conductor.handlers import conductor_listener
|
||||||
from magnum.conductor.handlers import indirection_api
|
from magnum.conductor.handlers import indirection_api
|
||||||
from magnum.i18n import _LI
|
from magnum.i18n import _LI
|
||||||
@ -49,7 +49,7 @@ def main():
|
|||||||
conductor_id = short_id.generate_id()
|
conductor_id = short_id.generate_id()
|
||||||
endpoints = [
|
endpoints = [
|
||||||
indirection_api.Handler(),
|
indirection_api.Handler(),
|
||||||
bay_conductor.Handler(),
|
cluster_conductor.Handler(),
|
||||||
conductor_listener.Handler(),
|
conductor_listener.Handler(),
|
||||||
ca_conductor.Handler(),
|
ca_conductor.Handler(),
|
||||||
]
|
]
|
||||||
|
@ -50,7 +50,7 @@ class TemplateList(lister.Lister):
|
|||||||
parser.add_argument('-d', '--details',
|
parser.add_argument('-d', '--details',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
dest='details',
|
dest='details',
|
||||||
help=('display the bay types provided by '
|
help=('display the cluster types provided by '
|
||||||
'each template'))
|
'each template'))
|
||||||
parser.add_argument('-p', '--paths',
|
parser.add_argument('-p', '--paths',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
@ -77,10 +77,10 @@ class TemplateList(lister.Lister):
|
|||||||
path=definition.template_path)
|
path=definition.template_path)
|
||||||
|
|
||||||
if parsed_args.details:
|
if parsed_args.details:
|
||||||
for bay_type in definition.provides:
|
for cluster_type in definition.provides:
|
||||||
row = dict()
|
row = dict()
|
||||||
row.update(template)
|
row.update(template)
|
||||||
row.update(bay_type)
|
row.update(cluster_type)
|
||||||
rows.append(row)
|
rows.append(row)
|
||||||
else:
|
else:
|
||||||
rows.append(template)
|
rows.append(template)
|
||||||
|
@ -105,15 +105,15 @@ def make_admin_context(show_deleted=False, all_tenants=False):
|
|||||||
return context
|
return context
|
||||||
|
|
||||||
|
|
||||||
def make_bay_context(bay, show_deleted=False):
|
def make_cluster_context(cluster, show_deleted=False):
|
||||||
"""Create a user context based on a bay's stored Keystone trust.
|
"""Create a user context based on a cluster's stored Keystone trust.
|
||||||
|
|
||||||
:param bay: the bay supplying the Keystone trust to use
|
:param cluster: the cluster supplying the Keystone trust to use
|
||||||
:param show_deleted: if True, will show deleted items when query db
|
:param show_deleted: if True, will show deleted items when query db
|
||||||
"""
|
"""
|
||||||
context = RequestContext(user_name=bay.trustee_username,
|
context = RequestContext(user_name=cluster.trustee_username,
|
||||||
password=bay.trustee_password,
|
password=cluster.trustee_password,
|
||||||
trust_id=bay.trust_id,
|
trust_id=cluster.trust_id,
|
||||||
show_deleted=show_deleted,
|
show_deleted=show_deleted,
|
||||||
user_domain_id=CONF.trust.trustee_domain_id,
|
user_domain_id=CONF.trust.trustee_domain_id,
|
||||||
user_domain_name=CONF.trust.trustee_domain_name)
|
user_domain_name=CONF.trust.trustee_domain_name)
|
||||||
|
@ -75,20 +75,21 @@ def is_docker_api_version_atleast(docker, version):
|
|||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def docker_for_bay(context, bay):
|
def docker_for_cluster(context, cluster):
|
||||||
cluster_template = conductor_utils.retrieve_cluster_template(context, bay)
|
cluster_template = conductor_utils.retrieve_cluster_template(
|
||||||
|
context, cluster)
|
||||||
|
|
||||||
ca_cert, magnum_key, magnum_cert = None, None, None
|
ca_cert, magnum_key, magnum_cert = None, None, None
|
||||||
client_kwargs = dict()
|
client_kwargs = dict()
|
||||||
if not cluster_template.tls_disabled:
|
if not cluster_template.tls_disabled:
|
||||||
(ca_cert, magnum_key,
|
(ca_cert, magnum_key,
|
||||||
magnum_cert) = cert_manager.create_client_files(bay)
|
magnum_cert) = cert_manager.create_client_files(cluster)
|
||||||
client_kwargs['ca_cert'] = ca_cert.name
|
client_kwargs['ca_cert'] = ca_cert.name
|
||||||
client_kwargs['client_key'] = magnum_key.name
|
client_kwargs['client_key'] = magnum_key.name
|
||||||
client_kwargs['client_cert'] = magnum_cert.name
|
client_kwargs['client_cert'] = magnum_cert.name
|
||||||
|
|
||||||
yield DockerHTTPClient(
|
yield DockerHTTPClient(
|
||||||
bay.api_address,
|
cluster.api_address,
|
||||||
CONF.docker.docker_remote_api_version,
|
CONF.docker.docker_remote_api_version,
|
||||||
CONF.docker.default_timeout,
|
CONF.docker.default_timeout,
|
||||||
**client_kwargs
|
**client_kwargs
|
||||||
|
@ -154,7 +154,7 @@ class GetDiscoveryUrlFailed(MagnumException):
|
|||||||
message = _("Failed to get discovery url from '%(discovery_endpoint)s'.")
|
message = _("Failed to get discovery url from '%(discovery_endpoint)s'.")
|
||||||
|
|
||||||
|
|
||||||
class InvalidBayDiscoveryURL(Invalid):
|
class InvalidClusterDiscoveryURL(Invalid):
|
||||||
message = _("Invalid discovery URL '%(discovery_url)s'.")
|
message = _("Invalid discovery URL '%(discovery_url)s'.")
|
||||||
|
|
||||||
|
|
||||||
@ -271,11 +271,11 @@ class PodAlreadyExists(Conflict):
|
|||||||
|
|
||||||
|
|
||||||
class PodListNotFound(ResourceNotFound):
|
class PodListNotFound(ResourceNotFound):
|
||||||
message = _("Pod list could not be found for Bay %(bay_uuid)s.")
|
message = _("Pod list could not be found for Cluster %(cluster_uuid)s.")
|
||||||
|
|
||||||
|
|
||||||
class PodCreationFailed(Invalid):
|
class PodCreationFailed(Invalid):
|
||||||
message = _("Pod creation failed in Bay %(bay_uuid)s.")
|
message = _("Pod creation failed in Cluster %(cluster_uuid)s.")
|
||||||
|
|
||||||
|
|
||||||
class ServiceNotFound(ResourceNotFound):
|
class ServiceNotFound(ResourceNotFound):
|
||||||
@ -287,11 +287,12 @@ class ServiceAlreadyExists(Conflict):
|
|||||||
|
|
||||||
|
|
||||||
class ServiceListNotFound(ResourceNotFound):
|
class ServiceListNotFound(ResourceNotFound):
|
||||||
message = _("Service list could not be found for Bay %(bay_uuid)s.")
|
message = _("Service list could not be found for Cluster "
|
||||||
|
"%(cluster_uuid)s.")
|
||||||
|
|
||||||
|
|
||||||
class ServiceCreationFailed(Invalid):
|
class ServiceCreationFailed(Invalid):
|
||||||
message = _("Service creation failed for Bay %(bay_uuid)s.")
|
message = _("Service creation failed for Cluster %(cluster_uuid)s.")
|
||||||
|
|
||||||
|
|
||||||
class ContainerException(Exception):
|
class ContainerException(Exception):
|
||||||
@ -303,13 +304,13 @@ class NotSupported(MagnumException):
|
|||||||
code = 400
|
code = 400
|
||||||
|
|
||||||
|
|
||||||
class BayTypeNotSupported(MagnumException):
|
class ClusterTypeNotSupported(MagnumException):
|
||||||
message = _("Bay type (%(server_type)s, %(os)s, %(coe)s)"
|
message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)"
|
||||||
" not supported.")
|
" not supported.")
|
||||||
|
|
||||||
|
|
||||||
class BayTypeNotEnabled(MagnumException):
|
class ClusterTypeNotEnabled(MagnumException):
|
||||||
message = _("Bay type (%(server_type)s, %(os)s, %(coe)s)"
|
message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)"
|
||||||
" not enabled.")
|
" not enabled.")
|
||||||
|
|
||||||
|
|
||||||
@ -322,7 +323,8 @@ class Urllib2InvalidScheme(MagnumException):
|
|||||||
|
|
||||||
|
|
||||||
class OperationInProgress(Invalid):
|
class OperationInProgress(Invalid):
|
||||||
message = _("Bay %(bay_name)s already has an operation in progress.")
|
message = _("Cluster %(cluster_name)s already has an operation in "
|
||||||
|
"progress.")
|
||||||
|
|
||||||
|
|
||||||
class ImageNotFound(ResourceNotFound):
|
class ImageNotFound(ResourceNotFound):
|
||||||
@ -383,11 +385,11 @@ class MagnumServiceAlreadyExists(Conflict):
|
|||||||
|
|
||||||
|
|
||||||
class UnsupportedK8sQuantityFormat(MagnumException):
|
class UnsupportedK8sQuantityFormat(MagnumException):
|
||||||
message = _("Unsupported quantity format for k8s bay.")
|
message = _("Unsupported quantity format for k8s cluster.")
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedDockerQuantityFormat(MagnumException):
|
class UnsupportedDockerQuantityFormat(MagnumException):
|
||||||
message = _("Unsupported quantity format for Swarm bay.")
|
message = _("Unsupported quantity format for Swarm cluster.")
|
||||||
|
|
||||||
|
|
||||||
class FlavorNotFound(ResourceNotFound):
|
class FlavorNotFound(ResourceNotFound):
|
||||||
@ -429,8 +431,9 @@ class RegionsListFailed(MagnumException):
|
|||||||
message = _("Failed to list regions.")
|
message = _("Failed to list regions.")
|
||||||
|
|
||||||
|
|
||||||
class TrusteeOrTrustToBayFailed(MagnumException):
|
class TrusteeOrTrustToClusterFailed(MagnumException):
|
||||||
message = _("Failed to create trustee or trust for Bay: %(bay_uuid)s")
|
message = _("Failed to create trustee or trust for Cluster: "
|
||||||
|
"%(cluster_uuid)s")
|
||||||
|
|
||||||
|
|
||||||
class CertificatesToClusterFailed(MagnumException):
|
class CertificatesToClusterFailed(MagnumException):
|
||||||
|
@ -32,9 +32,9 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
trust_opts = [
|
trust_opts = [
|
||||||
cfg.StrOpt('trustee_domain_id',
|
cfg.StrOpt('trustee_domain_id',
|
||||||
help=_('Id of the domain to create trustee for bays')),
|
help=_('Id of the domain to create trustee for clusters')),
|
||||||
cfg.StrOpt('trustee_domain_name',
|
cfg.StrOpt('trustee_domain_name',
|
||||||
help=_('Name of the domain to create trustee for bays')),
|
help=_('Name of the domain to create trustee for s')),
|
||||||
cfg.StrOpt('trustee_domain_admin_id',
|
cfg.StrOpt('trustee_domain_admin_id',
|
||||||
help=_('Id of the admin with roles sufficient to manage users'
|
help=_('Id of the admin with roles sufficient to manage users'
|
||||||
' in the trustee_domain')),
|
' in the trustee_domain')),
|
||||||
@ -256,21 +256,21 @@ class KeystoneClientV3(object):
|
|||||||
trustee_user_id=trustee_user)
|
trustee_user_id=trustee_user)
|
||||||
return trust
|
return trust
|
||||||
|
|
||||||
def delete_trust(self, context, bay):
|
def delete_trust(self, context, cluster):
|
||||||
if bay.trust_id is None:
|
if cluster.trust_id is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Trust can only be deleted by the user who creates it. So when
|
# Trust can only be deleted by the user who creates it. So when
|
||||||
# other users in the same project want to delete the bay, we need
|
# other users in the same project want to delete the cluster, we need
|
||||||
# use the trustee which can impersonate the trustor to delete the
|
# use the trustee which can impersonate the trustor to delete the
|
||||||
# trust.
|
# trust.
|
||||||
if context.user_id == bay.user_id:
|
if context.user_id == cluster.user_id:
|
||||||
client = self.client
|
client = self.client
|
||||||
else:
|
else:
|
||||||
auth = ka_v3.Password(auth_url=self.auth_url,
|
auth = ka_v3.Password(auth_url=self.auth_url,
|
||||||
user_id=bay.trustee_user_id,
|
user_id=cluster.trustee_user_id,
|
||||||
password=bay.trustee_password,
|
password=cluster.trustee_password,
|
||||||
trust_id=bay.trust_id)
|
trust_id=cluster.trust_id)
|
||||||
|
|
||||||
sess = ka_loading.session.Session().load_from_options(
|
sess = ka_loading.session.Session().load_from_options(
|
||||||
auth=auth,
|
auth=auth,
|
||||||
@ -280,12 +280,12 @@ class KeystoneClientV3(object):
|
|||||||
cert=CONF[CFG_LEGACY_GROUP].certfile)
|
cert=CONF[CFG_LEGACY_GROUP].certfile)
|
||||||
client = kc_v3.Client(session=sess)
|
client = kc_v3.Client(session=sess)
|
||||||
try:
|
try:
|
||||||
client.trusts.delete(bay.trust_id)
|
client.trusts.delete(cluster.trust_id)
|
||||||
except kc_exception.NotFound:
|
except kc_exception.NotFound:
|
||||||
pass
|
pass
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Failed to delete trust'))
|
LOG.exception(_LE('Failed to delete trust'))
|
||||||
raise exception.TrustDeleteFailed(trust_id=bay.trust_id)
|
raise exception.TrustDeleteFailed(trust_id=cluster.trust_id)
|
||||||
|
|
||||||
def create_trustee(self, username, password):
|
def create_trustee(self, username, password):
|
||||||
domain_id = self.trustee_domain_id
|
domain_id = self.trustee_domain_id
|
||||||
|
@ -104,11 +104,11 @@ def enforce_wsgi(api_name, act=None):
|
|||||||
|
|
||||||
example:
|
example:
|
||||||
from magnum.common import policy
|
from magnum.common import policy
|
||||||
class BaysController(rest.RestController):
|
class ClustersController(rest.RestController):
|
||||||
....
|
....
|
||||||
@policy.enforce_wsgi("bay", "delete")
|
@policy.enforce_wsgi("cluster", "delete")
|
||||||
@wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
|
@wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
|
||||||
def delete(self, bay_ident):
|
def delete(self, cluster_ident):
|
||||||
...
|
...
|
||||||
"""
|
"""
|
||||||
@decorator.decorator
|
@decorator.decorator
|
||||||
|
@ -44,8 +44,8 @@ TRANSPORT_ALIASES = {
|
|||||||
periodic_opts = [
|
periodic_opts = [
|
||||||
cfg.BoolOpt('periodic_global_stack_list',
|
cfg.BoolOpt('periodic_global_stack_list',
|
||||||
default=False,
|
default=False,
|
||||||
help="List Heat stacks globally when syncing bays. "
|
help="List Heat stacks globally when syncing clusters. "
|
||||||
"Default is to do retrieve each bay's stack "
|
"Default is to do retrieve each cluster's stack "
|
||||||
"individually. Reduces number of requests against "
|
"individually. Reduces number of requests against "
|
||||||
"Heat API if enabled but requires changes to Heat's "
|
"Heat API if enabled but requires changes to Heat's "
|
||||||
"policy.json."),
|
"policy.json."),
|
||||||
|
@ -28,27 +28,27 @@ class API(rpc_service.API):
|
|||||||
super(API, self).__init__(transport, context,
|
super(API, self).__init__(transport, context,
|
||||||
topic=cfg.CONF.conductor.topic)
|
topic=cfg.CONF.conductor.topic)
|
||||||
|
|
||||||
# Bay Operations
|
# Cluster Operations
|
||||||
|
|
||||||
def bay_create(self, bay, bay_create_timeout):
|
def cluster_create(self, cluster, create_timeout):
|
||||||
return self._call('bay_create', bay=bay,
|
return self._call('cluster_create', cluster=cluster,
|
||||||
bay_create_timeout=bay_create_timeout)
|
create_timeout=create_timeout)
|
||||||
|
|
||||||
def bay_create_async(self, bay, bay_create_timeout):
|
def cluster_create_async(self, cluster, create_timeout):
|
||||||
self._cast('bay_create', bay=bay,
|
self._cast('cluster_create', cluster=cluster,
|
||||||
bay_create_timeout=bay_create_timeout)
|
create_timeout=create_timeout)
|
||||||
|
|
||||||
def bay_delete(self, uuid):
|
def cluster_delete(self, uuid):
|
||||||
return self._call('bay_delete', uuid=uuid)
|
return self._call('cluster_delete', uuid=uuid)
|
||||||
|
|
||||||
def bay_delete_async(self, uuid):
|
def cluster_delete_async(self, uuid):
|
||||||
self._cast('bay_delete', uuid=uuid)
|
self._cast('cluster_delete', uuid=uuid)
|
||||||
|
|
||||||
def bay_update(self, bay):
|
def cluster_update(self, cluster):
|
||||||
return self._call('bay_update', bay=bay)
|
return self._call('cluster_update', cluster=cluster)
|
||||||
|
|
||||||
def bay_update_async(self, bay, rollback=False):
|
def cluster_update_async(self, cluster, rollback=False):
|
||||||
self._cast('bay_update', bay=bay, rollback=rollback)
|
self._cast('cluster_update', cluster=cluster, rollback=rollback)
|
||||||
|
|
||||||
# CA operations
|
# CA operations
|
||||||
|
|
||||||
|
@ -68,8 +68,9 @@ CONF.register_opts(cluster_heat_opts, group='cluster_heat')
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _extract_template_definition(context, bay, scale_manager=None):
|
def _extract_template_definition(context, cluster, scale_manager=None):
|
||||||
cluster_template = conductor_utils.retrieve_cluster_template(context, bay)
|
cluster_template = conductor_utils.retrieve_cluster_template(context,
|
||||||
|
cluster)
|
||||||
cluster_distro = cluster_template.cluster_distro
|
cluster_distro = cluster_template.cluster_distro
|
||||||
cluster_coe = cluster_template.coe
|
cluster_coe = cluster_template.coe
|
||||||
cluster_server_type = cluster_template.server_type
|
cluster_server_type = cluster_template.server_type
|
||||||
@ -77,7 +78,7 @@ def _extract_template_definition(context, bay, scale_manager=None):
|
|||||||
cluster_server_type,
|
cluster_server_type,
|
||||||
cluster_distro,
|
cluster_distro,
|
||||||
cluster_coe)
|
cluster_coe)
|
||||||
return definition.extract_definition(context, cluster_template, bay,
|
return definition.extract_definition(context, cluster_template, cluster,
|
||||||
scale_manager=scale_manager)
|
scale_manager=scale_manager)
|
||||||
|
|
||||||
|
|
||||||
@ -91,9 +92,9 @@ def _get_env_files(template_path, env_rel_paths):
|
|||||||
return environment_files, env_map
|
return environment_files, env_map
|
||||||
|
|
||||||
|
|
||||||
def _create_stack(context, osc, bay, bay_create_timeout):
|
def _create_stack(context, osc, cluster, create_timeout):
|
||||||
template_path, heat_params, env_files = (
|
template_path, heat_params, env_files = (
|
||||||
_extract_template_definition(context, bay))
|
_extract_template_definition(context, cluster))
|
||||||
|
|
||||||
tpl_files, template = template_utils.get_template_contents(template_path)
|
tpl_files, template = template_utils.get_template_contents(template_path)
|
||||||
|
|
||||||
@ -101,11 +102,11 @@ def _create_stack(context, osc, bay, bay_create_timeout):
|
|||||||
tpl_files.update(env_map)
|
tpl_files.update(env_map)
|
||||||
|
|
||||||
# Make sure no duplicate stack name
|
# Make sure no duplicate stack name
|
||||||
stack_name = '%s-%s' % (bay.name, short_id.generate_id())
|
stack_name = '%s-%s' % (cluster.name, short_id.generate_id())
|
||||||
if bay_create_timeout:
|
if create_timeout:
|
||||||
heat_timeout = bay_create_timeout
|
heat_timeout = create_timeout
|
||||||
else:
|
else:
|
||||||
# no bay_create_timeout value was passed in to the request
|
# no create_timeout value was passed in to the request
|
||||||
# so falling back on configuration file value
|
# so falling back on configuration file value
|
||||||
heat_timeout = cfg.CONF.cluster_heat.create_timeout
|
heat_timeout = cfg.CONF.cluster_heat.create_timeout
|
||||||
fields = {
|
fields = {
|
||||||
@ -121,9 +122,9 @@ def _create_stack(context, osc, bay, bay_create_timeout):
|
|||||||
return created_stack
|
return created_stack
|
||||||
|
|
||||||
|
|
||||||
def _update_stack(context, osc, bay, scale_manager=None, rollback=False):
|
def _update_stack(context, osc, cluster, scale_manager=None, rollback=False):
|
||||||
template_path, heat_params, env_files = _extract_template_definition(
|
template_path, heat_params, env_files = _extract_template_definition(
|
||||||
context, bay, scale_manager=scale_manager)
|
context, cluster, scale_manager=scale_manager)
|
||||||
|
|
||||||
tpl_files, template = template_utils.get_template_contents(template_path)
|
tpl_files, template = template_utils.get_template_contents(template_path)
|
||||||
environment_files, env_map = _get_env_files(template_path, env_files)
|
environment_files, env_map = _get_env_files(template_path, env_files)
|
||||||
@ -137,7 +138,7 @@ def _update_stack(context, osc, bay, scale_manager=None, rollback=False):
|
|||||||
'disable_rollback': not rollback
|
'disable_rollback': not rollback
|
||||||
}
|
}
|
||||||
|
|
||||||
return osc.heat().stacks.update(bay.stack_id, **fields)
|
return osc.heat().stacks.update(cluster.stack_id, **fields)
|
||||||
|
|
||||||
|
|
||||||
class Handler(object):
|
class Handler(object):
|
||||||
@ -145,26 +146,28 @@ class Handler(object):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(Handler, self).__init__()
|
super(Handler, self).__init__()
|
||||||
|
|
||||||
# Bay Operations
|
# Cluster Operations
|
||||||
|
|
||||||
def bay_create(self, context, bay, bay_create_timeout):
|
def cluster_create(self, context, cluster, create_timeout):
|
||||||
LOG.debug('bay_heat bay_create')
|
LOG.debug('cluster_heat cluster_create')
|
||||||
|
|
||||||
osc = clients.OpenStackClients(context)
|
osc = clients.OpenStackClients(context)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Create trustee/trust and set them to bay
|
# Create trustee/trust and set them to cluster
|
||||||
trust_manager.create_trustee_and_trust(osc, bay)
|
trust_manager.create_trustee_and_trust(osc, cluster)
|
||||||
# Generate certificate and set the cert reference to bay
|
# Generate certificate and set the cert reference to cluster
|
||||||
cert_manager.generate_certificates_to_cluster(bay, context=context)
|
cert_manager.generate_certificates_to_cluster(cluster,
|
||||||
conductor_utils.notify_about_bay_operation(
|
context=context)
|
||||||
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING)
|
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING)
|
||||||
created_stack = _create_stack(context, osc, bay,
|
created_stack = _create_stack(context, osc, cluster,
|
||||||
bay_create_timeout)
|
create_timeout)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
cert_manager.delete_certificates_from_cluster(bay, context=context)
|
cert_manager.delete_certificates_from_cluster(cluster,
|
||||||
trust_manager.delete_trustee_and_trust(osc, context, bay)
|
context=context)
|
||||||
conductor_utils.notify_about_bay_operation(
|
trust_manager.delete_trustee_and_trust(osc, context, cluster)
|
||||||
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE)
|
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE)
|
||||||
|
|
||||||
if isinstance(e, exc.HTTPBadRequest):
|
if isinstance(e, exc.HTTPBadRequest):
|
||||||
@ -173,111 +176,112 @@ class Handler(object):
|
|||||||
raise e
|
raise e
|
||||||
raise
|
raise
|
||||||
|
|
||||||
bay.stack_id = created_stack['stack']['id']
|
cluster.stack_id = created_stack['stack']['id']
|
||||||
bay.status = fields.BayStatus.CREATE_IN_PROGRESS
|
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
|
||||||
bay.create()
|
cluster.create()
|
||||||
|
|
||||||
self._poll_and_check(osc, bay)
|
self._poll_and_check(osc, cluster)
|
||||||
|
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
def bay_update(self, context, bay, rollback=False):
|
def cluster_update(self, context, cluster, rollback=False):
|
||||||
LOG.debug('bay_heat bay_update')
|
LOG.debug('cluster_heat cluster_update')
|
||||||
|
|
||||||
osc = clients.OpenStackClients(context)
|
osc = clients.OpenStackClients(context)
|
||||||
stack = osc.heat().stacks.get(bay.stack_id)
|
stack = osc.heat().stacks.get(cluster.stack_id)
|
||||||
allow_update_status = (
|
allow_update_status = (
|
||||||
fields.BayStatus.CREATE_COMPLETE,
|
fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
fields.BayStatus.UPDATE_COMPLETE,
|
fields.ClusterStatus.UPDATE_COMPLETE,
|
||||||
fields.BayStatus.RESUME_COMPLETE,
|
fields.ClusterStatus.RESUME_COMPLETE,
|
||||||
fields.BayStatus.RESTORE_COMPLETE,
|
fields.ClusterStatus.RESTORE_COMPLETE,
|
||||||
fields.BayStatus.ROLLBACK_COMPLETE,
|
fields.ClusterStatus.ROLLBACK_COMPLETE,
|
||||||
fields.BayStatus.SNAPSHOT_COMPLETE,
|
fields.ClusterStatus.SNAPSHOT_COMPLETE,
|
||||||
fields.BayStatus.CHECK_COMPLETE,
|
fields.ClusterStatus.CHECK_COMPLETE,
|
||||||
fields.BayStatus.ADOPT_COMPLETE
|
fields.ClusterStatus.ADOPT_COMPLETE
|
||||||
)
|
)
|
||||||
if stack.stack_status not in allow_update_status:
|
if stack.stack_status not in allow_update_status:
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
|
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
|
||||||
operation = _('Updating a bay when stack status is '
|
operation = _('Updating a cluster when stack status is '
|
||||||
'"%s"') % stack.stack_status
|
'"%s"') % stack.stack_status
|
||||||
raise exception.NotSupported(operation=operation)
|
raise exception.NotSupported(operation=operation)
|
||||||
|
|
||||||
delta = bay.obj_what_changed()
|
delta = cluster.obj_what_changed()
|
||||||
if not delta:
|
if not delta:
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
manager = scale_manager.ScaleManager(context, osc, bay)
|
manager = scale_manager.ScaleManager(context, osc, cluster)
|
||||||
|
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
|
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
|
||||||
|
|
||||||
_update_stack(context, osc, bay, manager, rollback)
|
_update_stack(context, osc, cluster, manager, rollback)
|
||||||
self._poll_and_check(osc, bay)
|
self._poll_and_check(osc, cluster)
|
||||||
|
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
def bay_delete(self, context, uuid):
|
def cluster_delete(self, context, uuid):
|
||||||
LOG.debug('bay_heat bay_delete')
|
LOG.debug('cluster_heat cluster_delete')
|
||||||
osc = clients.OpenStackClients(context)
|
osc = clients.OpenStackClients(context)
|
||||||
bay = objects.Bay.get_by_uuid(context, uuid)
|
cluster = objects.Cluster.get_by_uuid(context, uuid)
|
||||||
|
|
||||||
stack_id = bay.stack_id
|
stack_id = cluster.stack_id
|
||||||
# NOTE(sdake): This will execute a stack_delete operation. This will
|
# NOTE(sdake): This will execute a stack_delete operation. This will
|
||||||
# Ignore HTTPNotFound exceptions (stack wasn't present). In the case
|
# Ignore HTTPNotFound exceptions (stack wasn't present). In the case
|
||||||
# that Heat couldn't find the stack representing the bay, likely a user
|
# that Heat couldn't find the stack representing the cluster, likely a
|
||||||
# has deleted the stack outside the context of Magnum. Therefore the
|
# user has deleted the stack outside the context of Magnum. Therefore
|
||||||
# contents of the bay are forever lost.
|
# the contents of the cluster are forever lost.
|
||||||
#
|
#
|
||||||
# If the exception is unhandled, the original exception will be raised.
|
# If the exception is unhandled, the original exception will be raised.
|
||||||
try:
|
try:
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING)
|
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING)
|
||||||
osc.heat().stacks.delete(stack_id)
|
osc.heat().stacks.delete(stack_id)
|
||||||
except exc.HTTPNotFound:
|
except exc.HTTPNotFound:
|
||||||
LOG.info(_LI('The stack %s was not found during bay'
|
LOG.info(_LI('The stack %s was not found during cluster'
|
||||||
' deletion.'), stack_id)
|
' deletion.'), stack_id)
|
||||||
try:
|
try:
|
||||||
trust_manager.delete_trustee_and_trust(osc, context, bay)
|
trust_manager.delete_trustee_and_trust(osc, context, cluster)
|
||||||
cert_manager.delete_certificates_from_cluster(bay,
|
cert_manager.delete_certificates_from_cluster(cluster,
|
||||||
context=context)
|
context=context)
|
||||||
bay.destroy()
|
cluster.destroy()
|
||||||
except exception.ClusterNotFound:
|
except exception.ClusterNotFound:
|
||||||
LOG.info(_LI('The bay %s has been deleted by others.'), uuid)
|
LOG.info(_LI('The cluster %s has been deleted by others.'),
|
||||||
conductor_utils.notify_about_bay_operation(
|
uuid)
|
||||||
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS)
|
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS)
|
||||||
return None
|
return None
|
||||||
except exc.HTTPConflict:
|
except exc.HTTPConflict:
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||||
raise exception.OperationInProgress(bay_name=bay.name)
|
raise exception.OperationInProgress(cluster_name=cluster.name)
|
||||||
except Exception:
|
except Exception:
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
bay.status = fields.BayStatus.DELETE_IN_PROGRESS
|
cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS
|
||||||
bay.save()
|
cluster.save()
|
||||||
|
|
||||||
self._poll_and_check(osc, bay)
|
self._poll_and_check(osc, cluster)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _poll_and_check(self, osc, bay):
|
def _poll_and_check(self, osc, cluster):
|
||||||
poller = HeatPoller(osc, bay)
|
poller = HeatPoller(osc, cluster)
|
||||||
lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check)
|
lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check)
|
||||||
lc.start(cfg.CONF.cluster_heat.wait_interval, True)
|
lc.start(cfg.CONF.cluster_heat.wait_interval, True)
|
||||||
|
|
||||||
|
|
||||||
class HeatPoller(object):
|
class HeatPoller(object):
|
||||||
|
|
||||||
def __init__(self, openstack_client, bay):
|
def __init__(self, openstack_client, cluster):
|
||||||
self.openstack_client = openstack_client
|
self.openstack_client = openstack_client
|
||||||
self.context = self.openstack_client.context
|
self.context = self.openstack_client.context
|
||||||
self.bay = bay
|
self.cluster = cluster
|
||||||
self.attempts = 0
|
self.attempts = 0
|
||||||
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
||||||
self.context, bay)
|
self.context, cluster)
|
||||||
self.template_def = \
|
self.template_def = \
|
||||||
template_def.TemplateDefinition.get_template_definition(
|
template_def.TemplateDefinition.get_template_definition(
|
||||||
self.cluster_template.server_type,
|
self.cluster_template.server_type,
|
||||||
@ -286,97 +290,97 @@ class HeatPoller(object):
|
|||||||
|
|
||||||
def poll_and_check(self):
|
def poll_and_check(self):
|
||||||
# TODO(yuanying): temporary implementation to update api_address,
|
# TODO(yuanying): temporary implementation to update api_address,
|
||||||
# node_addresses and bay status
|
# node_addresses and cluster status
|
||||||
stack = self.openstack_client.heat().stacks.get(self.bay.stack_id)
|
stack = self.openstack_client.heat().stacks.get(self.cluster.stack_id)
|
||||||
self.attempts += 1
|
self.attempts += 1
|
||||||
status_to_event = {
|
status_to_event = {
|
||||||
fields.BayStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
fields.ClusterStatus.DELETE_COMPLETE: taxonomy.ACTION_DELETE,
|
||||||
fields.BayStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
fields.ClusterStatus.CREATE_COMPLETE: taxonomy.ACTION_CREATE,
|
||||||
fields.BayStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
fields.ClusterStatus.UPDATE_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||||
fields.BayStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
fields.ClusterStatus.ROLLBACK_COMPLETE: taxonomy.ACTION_UPDATE,
|
||||||
fields.BayStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
fields.ClusterStatus.CREATE_FAILED: taxonomy.ACTION_CREATE,
|
||||||
fields.BayStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
fields.ClusterStatus.DELETE_FAILED: taxonomy.ACTION_DELETE,
|
||||||
fields.BayStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
fields.ClusterStatus.UPDATE_FAILED: taxonomy.ACTION_UPDATE,
|
||||||
fields.BayStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
fields.ClusterStatus.ROLLBACK_FAILED: taxonomy.ACTION_UPDATE
|
||||||
}
|
}
|
||||||
# poll_and_check is detached and polling long time to check status,
|
# poll_and_check is detached and polling long time to check status,
|
||||||
# so another user/client can call delete bay/stack.
|
# so another user/client can call delete cluster/stack.
|
||||||
if stack.stack_status == fields.BayStatus.DELETE_COMPLETE:
|
if stack.stack_status == fields.ClusterStatus.DELETE_COMPLETE:
|
||||||
self._delete_complete()
|
self._delete_complete()
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
self.context, status_to_event[stack.stack_status],
|
self.context, status_to_event[stack.stack_status],
|
||||||
taxonomy.OUTCOME_SUCCESS)
|
taxonomy.OUTCOME_SUCCESS)
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
|
|
||||||
if stack.stack_status in (fields.BayStatus.CREATE_COMPLETE,
|
if stack.stack_status in (fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
fields.BayStatus.UPDATE_COMPLETE):
|
fields.ClusterStatus.UPDATE_COMPLETE):
|
||||||
self._sync_bay_and_template_status(stack)
|
self._sync_cluster_and_template_status(stack)
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
self.context, status_to_event[stack.stack_status],
|
self.context, status_to_event[stack.stack_status],
|
||||||
taxonomy.OUTCOME_SUCCESS)
|
taxonomy.OUTCOME_SUCCESS)
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
elif stack.stack_status != self.bay.status:
|
elif stack.stack_status != self.cluster.status:
|
||||||
self._sync_bay_status(stack)
|
self._sync_cluster_status(stack)
|
||||||
|
|
||||||
if stack.stack_status in (fields.BayStatus.CREATE_FAILED,
|
if stack.stack_status in (fields.ClusterStatus.CREATE_FAILED,
|
||||||
fields.BayStatus.DELETE_FAILED,
|
fields.ClusterStatus.DELETE_FAILED,
|
||||||
fields.BayStatus.UPDATE_FAILED,
|
fields.ClusterStatus.UPDATE_FAILED,
|
||||||
fields.BayStatus.ROLLBACK_COMPLETE,
|
fields.ClusterStatus.ROLLBACK_COMPLETE,
|
||||||
fields.BayStatus.ROLLBACK_FAILED):
|
fields.ClusterStatus.ROLLBACK_FAILED):
|
||||||
self._sync_bay_and_template_status(stack)
|
self._sync_cluster_and_template_status(stack)
|
||||||
self._bay_failed(stack)
|
self._cluster_failed(stack)
|
||||||
conductor_utils.notify_about_bay_operation(
|
conductor_utils.notify_about_cluster_operation(
|
||||||
self.context, status_to_event[stack.stack_status],
|
self.context, status_to_event[stack.stack_status],
|
||||||
taxonomy.OUTCOME_FAILURE)
|
taxonomy.OUTCOME_FAILURE)
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
# only check max attempts when the stack is being created when
|
# only check max attempts when the stack is being created when
|
||||||
# the timeout hasn't been set. If the timeout has been set then
|
# the timeout hasn't been set. If the timeout has been set then
|
||||||
# the loop will end when the stack completes or the timeout occurs
|
# the loop will end when the stack completes or the timeout occurs
|
||||||
if stack.stack_status == fields.BayStatus.CREATE_IN_PROGRESS:
|
if stack.stack_status == fields.ClusterStatus.CREATE_IN_PROGRESS:
|
||||||
if (stack.timeout_mins is None and
|
if (stack.timeout_mins is None and
|
||||||
self.attempts > cfg.CONF.cluster_heat.max_attempts):
|
self.attempts > cfg.CONF.cluster_heat.max_attempts):
|
||||||
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
|
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
|
||||||
'stack_id: %(id)s, stack_status: %(status)s') %
|
'stack_id: %(id)s, stack_status: %(status)s') %
|
||||||
{'attempts': cfg.CONF.cluster_heat.max_attempts,
|
{'attempts': cfg.CONF.cluster_heat.max_attempts,
|
||||||
'id': self.bay.stack_id,
|
'id': self.cluster.stack_id,
|
||||||
'status': stack.stack_status})
|
'status': stack.stack_status})
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
else:
|
else:
|
||||||
if self.attempts > cfg.CONF.cluster_heat.max_attempts:
|
if self.attempts > cfg.CONF.cluster_heat.max_attempts:
|
||||||
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
|
LOG.error(_LE('Cluster check exit after %(attempts)s attempts,'
|
||||||
'stack_id: %(id)s, stack_status: %(status)s') %
|
'stack_id: %(id)s, stack_status: %(status)s') %
|
||||||
{'attempts': cfg.CONF.cluster_heat.max_attempts,
|
{'attempts': cfg.CONF.cluster_heat.max_attempts,
|
||||||
'id': self.bay.stack_id,
|
'id': self.cluster.stack_id,
|
||||||
'status': stack.stack_status})
|
'status': stack.stack_status})
|
||||||
raise loopingcall.LoopingCallDone()
|
raise loopingcall.LoopingCallDone()
|
||||||
|
|
||||||
def _delete_complete(self):
|
def _delete_complete(self):
|
||||||
LOG.info(_LI('Bay has been deleted, stack_id: %s')
|
LOG.info(_LI('Cluster has been deleted, stack_id: %s')
|
||||||
% self.bay.stack_id)
|
% self.cluster.stack_id)
|
||||||
try:
|
try:
|
||||||
trust_manager.delete_trustee_and_trust(self.openstack_client,
|
trust_manager.delete_trustee_and_trust(self.openstack_client,
|
||||||
self.context,
|
self.context,
|
||||||
self.bay)
|
self.cluster)
|
||||||
cert_manager.delete_certificates_from_cluster(self.bay,
|
cert_manager.delete_certificates_from_cluster(self.cluster,
|
||||||
context=self.context)
|
context=self.context)
|
||||||
self.bay.destroy()
|
self.cluster.destroy()
|
||||||
except exception.ClusterNotFound:
|
except exception.ClusterNotFound:
|
||||||
LOG.info(_LI('The bay %s has been deleted by others.')
|
LOG.info(_LI('The cluster %s has been deleted by others.')
|
||||||
% self.bay.uuid)
|
% self.cluster.uuid)
|
||||||
|
|
||||||
def _sync_bay_status(self, stack):
|
def _sync_cluster_status(self, stack):
|
||||||
self.bay.status = stack.stack_status
|
self.cluster.status = stack.stack_status
|
||||||
self.bay.status_reason = stack.stack_status_reason
|
self.cluster.status_reason = stack.stack_status_reason
|
||||||
stack_nc_param = self.template_def.get_heat_param(
|
stack_nc_param = self.template_def.get_heat_param(
|
||||||
bay_attr='node_count')
|
cluster_attr='node_count')
|
||||||
self.bay.node_count = stack.parameters[stack_nc_param]
|
self.cluster.node_count = stack.parameters[stack_nc_param]
|
||||||
self.bay.save()
|
self.cluster.save()
|
||||||
|
|
||||||
def get_version_info(self, stack):
|
def get_version_info(self, stack):
|
||||||
stack_param = self.template_def.get_heat_param(
|
stack_param = self.template_def.get_heat_param(
|
||||||
bay_attr='coe_version')
|
cluster_attr='coe_version')
|
||||||
if stack_param:
|
if stack_param:
|
||||||
self.bay.coe_version = stack.parameters[stack_param]
|
self.cluster.coe_version = stack.parameters[stack_param]
|
||||||
|
|
||||||
tdef = template_def.TemplateDefinition.get_template_definition(
|
tdef = template_def.TemplateDefinition.get_template_definition(
|
||||||
self.cluster_template.server_type,
|
self.cluster_template.server_type,
|
||||||
@ -388,18 +392,18 @@ class HeatPoller(object):
|
|||||||
container_version = ver.container_version
|
container_version = ver.container_version
|
||||||
except Exception:
|
except Exception:
|
||||||
container_version = None
|
container_version = None
|
||||||
self.bay.container_version = container_version
|
self.cluster.container_version = container_version
|
||||||
|
|
||||||
def _sync_bay_and_template_status(self, stack):
|
def _sync_cluster_and_template_status(self, stack):
|
||||||
self.template_def.update_outputs(stack, self.cluster_template,
|
self.template_def.update_outputs(stack, self.cluster_template,
|
||||||
self.bay)
|
self.cluster)
|
||||||
self.get_version_info(stack)
|
self.get_version_info(stack)
|
||||||
self._sync_bay_status(stack)
|
self._sync_cluster_status(stack)
|
||||||
|
|
||||||
def _bay_failed(self, stack):
|
def _cluster_failed(self, stack):
|
||||||
LOG.error(_LE('Bay error, stack status: %(bay_status)s, '
|
LOG.error(_LE('Cluster error, stack status: %(cluster_status)s, '
|
||||||
'stack_id: %(stack_id)s, '
|
'stack_id: %(stack_id)s, '
|
||||||
'reason: %(reason)s') %
|
'reason: %(reason)s') %
|
||||||
{'bay_status': stack.stack_status,
|
{'cluster_status': stack.stack_status,
|
||||||
'stack_id': self.bay.stack_id,
|
'stack_id': self.cluster.stack_id,
|
||||||
'reason': self.bay.status_reason})
|
'reason': self.cluster.status_reason})
|
@ -19,36 +19,39 @@ from magnum.i18n import _LE
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def create_trustee_and_trust(osc, bay):
|
def create_trustee_and_trust(osc, cluster):
|
||||||
try:
|
try:
|
||||||
password = utils.generate_password(length=18)
|
password = utils.generate_password(length=18)
|
||||||
trustee = osc.keystone().create_trustee(
|
trustee = osc.keystone().create_trustee(
|
||||||
bay.uuid,
|
cluster.uuid,
|
||||||
password,
|
password,
|
||||||
)
|
)
|
||||||
bay.trustee_username = trustee.name
|
cluster.trustee_username = trustee.name
|
||||||
bay.trustee_user_id = trustee.id
|
cluster.trustee_user_id = trustee.id
|
||||||
bay.trustee_password = password
|
cluster.trustee_password = password
|
||||||
trust = osc.keystone().create_trust(trustee.id)
|
trust = osc.keystone().create_trust(trustee.id)
|
||||||
bay.trust_id = trust.id
|
cluster.trust_id = trust.id
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Failed to create trustee and trust for Bay: %s'),
|
LOG.exception(
|
||||||
bay.uuid)
|
_LE('Failed to create trustee and trust for Cluster: %s'),
|
||||||
raise exception.TrusteeOrTrustToBayFailed(bay_uuid=bay.uuid)
|
cluster.uuid)
|
||||||
|
raise exception.TrusteeOrTrustToClusterFailed(
|
||||||
|
cluster_uuid=cluster.uuid)
|
||||||
|
|
||||||
|
|
||||||
def delete_trustee_and_trust(osc, context, bay):
|
def delete_trustee_and_trust(osc, context, cluster):
|
||||||
try:
|
try:
|
||||||
# The bay which is upgraded from Liberty doesn't have trust_id
|
# The cluster which is upgraded from Liberty doesn't have trust_id
|
||||||
if bay.trust_id:
|
if cluster.trust_id:
|
||||||
osc.keystone().delete_trust(context, bay)
|
osc.keystone().delete_trust(context, cluster)
|
||||||
except Exception:
|
except Exception:
|
||||||
# Exceptions are already logged by keystone().delete_trust
|
# Exceptions are already logged by keystone().delete_trust
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
# The bay which is upgraded from Liberty doesn't have trustee_user_id
|
# The cluster which is upgraded from Liberty doesn't have
|
||||||
if bay.trustee_user_id:
|
# trustee_user_id
|
||||||
osc.keystone().delete_trustee(bay.trustee_user_id)
|
if cluster.trustee_user_id:
|
||||||
|
osc.keystone().delete_trustee(cluster.trustee_user_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
# Exceptions are already logged by keystone().delete_trustee
|
# Exceptions are already logged by keystone().delete_trustee
|
||||||
pass
|
pass
|
||||||
|
@ -41,17 +41,17 @@ class K8sAPI(apiv_api.ApivApi):
|
|||||||
raise
|
raise
|
||||||
return tmp
|
return tmp
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, cluster):
|
||||||
self.ca_file = None
|
self.ca_file = None
|
||||||
self.cert_file = None
|
self.cert_file = None
|
||||||
self.key_file = None
|
self.key_file = None
|
||||||
|
|
||||||
if bay.magnum_cert_ref:
|
if cluster.magnum_cert_ref:
|
||||||
(self.ca_file, self.key_file,
|
(self.ca_file, self.key_file,
|
||||||
self.cert_file) = create_client_files(bay, context)
|
self.cert_file) = create_client_files(cluster, context)
|
||||||
|
|
||||||
# build a connection with Kubernetes master
|
# build a connection with Kubernetes master
|
||||||
client = api_client.ApiClient(bay.api_address,
|
client = api_client.ApiClient(cluster.api_address,
|
||||||
key_file=self.key_file.name,
|
key_file=self.key_file.name,
|
||||||
cert_file=self.cert_file.name,
|
cert_file=self.cert_file.name,
|
||||||
ca_certs=self.ca_file.name)
|
ca_certs=self.ca_file.name)
|
||||||
@ -67,13 +67,13 @@ class K8sAPI(apiv_api.ApivApi):
|
|||||||
self.key_file.close()
|
self.key_file.close()
|
||||||
|
|
||||||
|
|
||||||
def create_k8s_api(context, bay):
|
def create_k8s_api(context, cluster):
|
||||||
"""Create a kubernetes API client
|
"""Create a kubernetes API client
|
||||||
|
|
||||||
Creates connection with Kubernetes master and creates ApivApi instance
|
Creates connection with Kubernetes master and creates ApivApi instance
|
||||||
to call Kubernetes APIs.
|
to call Kubernetes APIs.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
:param bay: Bay object
|
:param cluster: Cluster object
|
||||||
"""
|
"""
|
||||||
return K8sAPI(context, bay)
|
return K8sAPI(context, cluster)
|
||||||
|
@ -19,8 +19,8 @@ from magnum.conductor import monitors
|
|||||||
|
|
||||||
class K8sMonitor(monitors.MonitorBase):
|
class K8sMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, cluster):
|
||||||
super(K8sMonitor, self).__init__(context, bay)
|
super(K8sMonitor, self).__init__(context, cluster)
|
||||||
self.data = {}
|
self.data = {}
|
||||||
self.data['nodes'] = []
|
self.data['nodes'] = []
|
||||||
self.data['pods'] = []
|
self.data['pods'] = []
|
||||||
@ -39,7 +39,7 @@ class K8sMonitor(monitors.MonitorBase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def pull_data(self):
|
def pull_data(self):
|
||||||
k8s_api = k8s.create_k8s_api(self.context, self.bay)
|
k8s_api = k8s.create_k8s_api(self.context, self.cluster)
|
||||||
nodes = k8s_api.list_namespaced_node()
|
nodes = k8s_api.list_namespaced_node()
|
||||||
self.data['nodes'] = self._parse_node_info(nodes)
|
self.data['nodes'] = self._parse_node_info(nodes)
|
||||||
pods = k8s_api.list_namespaced_pod('default')
|
pods = k8s_api.list_namespaced_pod('default')
|
||||||
|
@ -18,8 +18,8 @@ from magnum.conductor import monitors
|
|||||||
|
|
||||||
class MesosMonitor(monitors.MonitorBase):
|
class MesosMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, cluster):
|
||||||
super(MesosMonitor, self).__init__(context, bay)
|
super(MesosMonitor, self).__init__(context, cluster)
|
||||||
self.data = {}
|
self.data = {}
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -46,7 +46,7 @@ class MesosMonitor(monitors.MonitorBase):
|
|||||||
self.data['mem_used'] = 0
|
self.data['mem_used'] = 0
|
||||||
self.data['cpu_total'] = 0
|
self.data['cpu_total'] = 0
|
||||||
self.data['cpu_used'] = 0
|
self.data['cpu_used'] = 0
|
||||||
for master_addr in self.bay.master_addresses:
|
for master_addr in self.cluster.master_addresses:
|
||||||
mesos_master_url = self._build_url(master_addr, port='5050',
|
mesos_master_url = self._build_url(master_addr, port='5050',
|
||||||
path='/state')
|
path='/state')
|
||||||
master = jsonutils.loads(urlfetch.get(mesos_master_url))
|
master = jsonutils.loads(urlfetch.get(mesos_master_url))
|
||||||
|
@ -34,18 +34,18 @@ CONF.import_opt('default_timeout',
|
|||||||
group='docker')
|
group='docker')
|
||||||
|
|
||||||
COE_CLASS_PATH = {
|
COE_CLASS_PATH = {
|
||||||
fields.BayType.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
fields.ClusterType.SWARM: 'magnum.conductor.swarm_monitor.SwarmMonitor',
|
||||||
fields.BayType.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
fields.ClusterType.KUBERNETES: 'magnum.conductor.k8s_monitor.K8sMonitor',
|
||||||
fields.BayType.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
fields.ClusterType.MESOS: 'magnum.conductor.mesos_monitor.MesosMonitor'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
class MonitorBase(object):
|
class MonitorBase(object):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, cluster):
|
||||||
self.context = context
|
self.context = context
|
||||||
self.bay = bay
|
self.cluster = cluster
|
||||||
|
|
||||||
@abc.abstractproperty
|
@abc.abstractproperty
|
||||||
def metrics_spec(self):
|
def metrics_spec(self):
|
||||||
@ -67,12 +67,12 @@ class MonitorBase(object):
|
|||||||
return func()
|
return func()
|
||||||
|
|
||||||
|
|
||||||
def create_monitor(context, bay):
|
def create_monitor(context, cluster):
|
||||||
if bay.cluster_template.coe in COE_CLASS_PATH:
|
if cluster.cluster_template.coe in COE_CLASS_PATH:
|
||||||
coe_cls = importutils.import_class(
|
coe_cls = importutils.import_class(
|
||||||
COE_CLASS_PATH[bay.cluster_template.coe])
|
COE_CLASS_PATH[cluster.cluster_template.coe])
|
||||||
return coe_cls(context, bay)
|
return coe_cls(context, cluster)
|
||||||
|
|
||||||
LOG.debug("Cannot create monitor with bay type '%s'",
|
LOG.debug("Cannot create monitor with cluster type '%s'",
|
||||||
bay.cluster_template.coe)
|
cluster.cluster_template.coe)
|
||||||
return None
|
return None
|
||||||
|
@ -27,18 +27,18 @@ LOG = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class ScaleManager(object):
|
class ScaleManager(object):
|
||||||
|
|
||||||
def __init__(self, context, osclient, bay):
|
def __init__(self, context, osclient, cluster):
|
||||||
self.context = context
|
self.context = context
|
||||||
self.osclient = osclient
|
self.osclient = osclient
|
||||||
self.old_bay = objects.Bay.get_by_uuid(context, bay.uuid)
|
self.old_cluster = objects.Cluster.get_by_uuid(context, cluster.uuid)
|
||||||
self.new_bay = bay
|
self.new_cluster = cluster
|
||||||
|
|
||||||
def get_removal_nodes(self, hosts_output):
|
def get_removal_nodes(self, hosts_output):
|
||||||
if not self._is_scale_down():
|
if not self._is_scale_down():
|
||||||
return list()
|
return list()
|
||||||
|
|
||||||
bay = self.new_bay
|
cluster = self.new_cluster
|
||||||
stack = self.osclient.heat().stacks.get(bay.stack_id)
|
stack = self.osclient.heat().stacks.get(cluster.stack_id)
|
||||||
hosts = hosts_output.get_output_value(stack)
|
hosts = hosts_output.get_output_value(stack)
|
||||||
if hosts is None:
|
if hosts is None:
|
||||||
raise exception.MagnumException(_(
|
raise exception.MagnumException(_(
|
||||||
@ -47,7 +47,7 @@ class ScaleManager(object):
|
|||||||
'stack_id': stack.id})
|
'stack_id': stack.id})
|
||||||
|
|
||||||
hosts_no_container = list(hosts)
|
hosts_no_container = list(hosts)
|
||||||
k8s_api = k8s.create_k8s_api(self.context, bay)
|
k8s_api = k8s.create_k8s_api(self.context, cluster)
|
||||||
for pod in k8s_api.list_namespaced_pod(namespace='default').items:
|
for pod in k8s_api.list_namespaced_pod(namespace='default').items:
|
||||||
host = pod.spec.node_name
|
host = pod.spec.node_name
|
||||||
if host in hosts_no_container:
|
if host in hosts_no_container:
|
||||||
@ -72,7 +72,7 @@ class ScaleManager(object):
|
|||||||
return hosts_to_remove
|
return hosts_to_remove
|
||||||
|
|
||||||
def _is_scale_down(self):
|
def _is_scale_down(self):
|
||||||
return self.new_bay.node_count < self.old_bay.node_count
|
return self.new_cluster.node_count < self.old_cluster.node_count
|
||||||
|
|
||||||
def _get_num_of_removal(self):
|
def _get_num_of_removal(self):
|
||||||
return self.old_bay.node_count - self.new_bay.node_count
|
return self.old_cluster.node_count - self.new_cluster.node_count
|
||||||
|
@ -24,8 +24,8 @@ LOG = log.getLogger(__name__)
|
|||||||
|
|
||||||
class SwarmMonitor(monitors.MonitorBase):
|
class SwarmMonitor(monitors.MonitorBase):
|
||||||
|
|
||||||
def __init__(self, context, bay):
|
def __init__(self, context, cluster):
|
||||||
super(SwarmMonitor, self).__init__(context, bay)
|
super(SwarmMonitor, self).__init__(context, cluster)
|
||||||
self.data = {}
|
self.data = {}
|
||||||
self.data['nodes'] = []
|
self.data['nodes'] = []
|
||||||
self.data['containers'] = []
|
self.data['containers'] = []
|
||||||
@ -40,8 +40,8 @@ class SwarmMonitor(monitors.MonitorBase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def pull_data(self):
|
def pull_data(self):
|
||||||
with docker_utils.docker_for_bay(self.context,
|
with docker_utils.docker_for_cluster(self.context,
|
||||||
self.bay) as docker:
|
self.cluster) as docker:
|
||||||
system_info = docker.info()
|
system_info = docker.info()
|
||||||
self.data['nodes'] = self._parse_node_info(system_info)
|
self.data['nodes'] = self._parse_node_info(system_info)
|
||||||
|
|
||||||
|
@ -20,33 +20,33 @@ from pycadf import resource
|
|||||||
|
|
||||||
from magnum.common import clients
|
from magnum.common import clients
|
||||||
from magnum.common import rpc
|
from magnum.common import rpc
|
||||||
from magnum.objects import bay
|
from magnum.objects import cluster
|
||||||
from magnum.objects import cluster_template
|
from magnum.objects import cluster_template
|
||||||
|
|
||||||
|
|
||||||
def retrieve_bay(context, bay_ident):
|
def retrieve_cluster(context, cluster_ident):
|
||||||
if not uuidutils.is_uuid_like(bay_ident):
|
if not uuidutils.is_uuid_like(cluster_ident):
|
||||||
return bay.Bay.get_by_name(context, bay_ident)
|
return cluster.Cluster.get_by_name(context, cluster_ident)
|
||||||
else:
|
else:
|
||||||
return bay.Bay.get_by_uuid(context, bay_ident)
|
return cluster.Cluster.get_by_uuid(context, cluster_ident)
|
||||||
|
|
||||||
|
|
||||||
def retrieve_cluster_template(context, bay):
|
def retrieve_cluster_template(context, cluster):
|
||||||
return cluster_template.ClusterTemplate.get_by_uuid(context,
|
return cluster_template.ClusterTemplate.get_by_uuid(
|
||||||
bay.baymodel_id)
|
context, cluster.cluster_template_id)
|
||||||
|
|
||||||
|
|
||||||
def retrieve_bay_uuid(context, bay_ident):
|
def retrieve_cluster_uuid(context, cluster_ident):
|
||||||
if not uuidutils.is_uuid_like(bay_ident):
|
if not uuidutils.is_uuid_like(cluster_ident):
|
||||||
bay_obj = bay.Bay.get_by_name(context, bay_ident)
|
cluster_obj = cluster.Cluster.get_by_name(context, cluster_ident)
|
||||||
return bay_obj.uuid
|
return cluster_obj.uuid
|
||||||
else:
|
else:
|
||||||
return bay_ident
|
return cluster_ident
|
||||||
|
|
||||||
|
|
||||||
def object_has_stack(context, bay_uuid):
|
def object_has_stack(context, cluster_uuid):
|
||||||
osc = clients.OpenStackClients(context)
|
osc = clients.OpenStackClients(context)
|
||||||
obj = retrieve_bay(context, bay_uuid)
|
obj = retrieve_cluster(context, cluster_uuid)
|
||||||
|
|
||||||
stack = osc.heat().stacks.get(obj.stack_id)
|
stack = osc.heat().stacks.get(obj.stack_id)
|
||||||
if (stack.stack_status == 'DELETE_COMPLETE' or
|
if (stack.stack_status == 'DELETE_COMPLETE' or
|
||||||
@ -86,8 +86,8 @@ def _get_request_audit_info(context):
|
|||||||
return initiator
|
return initiator
|
||||||
|
|
||||||
|
|
||||||
def notify_about_bay_operation(context, action, outcome):
|
def notify_about_cluster_operation(context, action, outcome):
|
||||||
"""Send a notification about bay operation.
|
"""Send a notification about cluster operation.
|
||||||
|
|
||||||
:param action: CADF action being audited
|
:param action: CADF action being audited
|
||||||
:param outcome: CADF outcome
|
:param outcome: CADF outcome
|
||||||
@ -98,10 +98,10 @@ def notify_about_bay_operation(context, action, outcome):
|
|||||||
outcome=outcome,
|
outcome=outcome,
|
||||||
action=action,
|
action=action,
|
||||||
initiator=_get_request_audit_info(context),
|
initiator=_get_request_audit_info(context),
|
||||||
target=resource.Resource(typeURI='service/magnum/bay'),
|
target=resource.Resource(typeURI='service/magnum/cluster'),
|
||||||
observer=resource.Resource(typeURI='service/magnum/bay'))
|
observer=resource.Resource(typeURI='service/magnum/cluster'))
|
||||||
service = 'magnum'
|
service = 'magnum'
|
||||||
event_type = '%(service)s.bay.%(action)s' % {
|
event_type = '%(service)s.cluster.%(action)s' % {
|
||||||
'service': service, 'action': action}
|
'service': service, 'action': action}
|
||||||
payload = event.as_dict()
|
payload = event.as_dict()
|
||||||
|
|
||||||
|
@ -41,17 +41,17 @@ class Connection(object):
|
|||||||
"""Constructor."""
|
"""Constructor."""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_bay_list(self, context, filters=None, limit=None,
|
def get_cluster_list(self, context, filters=None, limit=None,
|
||||||
marker=None, sort_key=None, sort_dir=None):
|
marker=None, sort_key=None, sort_dir=None):
|
||||||
"""Get matching bays.
|
"""Get matching clusters.
|
||||||
|
|
||||||
Return a list of the specified columns for all bays that match the
|
Return a list of the specified columns for all clusters that match the
|
||||||
specified filters.
|
specified filters.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
:param filters: Filters to apply. Defaults to None.
|
:param filters: Filters to apply. Defaults to None.
|
||||||
|
|
||||||
:param limit: Maximum number of bays to return.
|
:param limit: Maximum number of clusters to return.
|
||||||
:param marker: the last item of the previous page; we return the next
|
:param marker: the last item of the previous page; we return the next
|
||||||
result set.
|
result set.
|
||||||
:param sort_key: Attribute by which results should be sorted.
|
:param sort_key: Attribute by which results should be sorted.
|
||||||
@ -61,12 +61,13 @@ class Connection(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def create_bay(self, values):
|
def create_cluster(self, values):
|
||||||
"""Create a new bay.
|
"""Create a new cluster.
|
||||||
|
|
||||||
:param values: A dict containing several items used to identify
|
:param values: A dict containing several items used to identify
|
||||||
and track the bay, and several dicts which are passed
|
and track the cluster, and several dicts which are
|
||||||
into the Drivers when managing this bay. For example:
|
passed into the Drivers when managing this cluster.
|
||||||
|
For example:
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
@ -75,49 +76,49 @@ class Connection(object):
|
|||||||
'name': 'example',
|
'name': 'example',
|
||||||
'type': 'virt'
|
'type': 'virt'
|
||||||
}
|
}
|
||||||
:returns: A bay.
|
:returns: A cluster.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_bay_by_id(self, context, bay_id):
|
def get_cluster_by_id(self, context, cluster_id):
|
||||||
"""Return a bay.
|
"""Return a cluster.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
:param bay_id: The id of a bay.
|
:param cluster_id: The id of a cluster.
|
||||||
:returns: A bay.
|
:returns: A cluster.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_bay_by_uuid(self, context, bay_uuid):
|
def get_cluster_by_uuid(self, context, cluster_uuid):
|
||||||
"""Return a bay.
|
"""Return a cluster.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
:param bay_uuid: The uuid of a bay.
|
:param cluster_uuid: The uuid of a cluster.
|
||||||
:returns: A bay.
|
:returns: A cluster.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_bay_by_name(self, context, bay_name):
|
def get_cluster_by_name(self, context, cluster_name):
|
||||||
"""Return a bay.
|
"""Return a cluster.
|
||||||
|
|
||||||
:param context: The security context
|
:param context: The security context
|
||||||
:param bay_name: The name of a bay.
|
:param cluster_name: The name of a cluster.
|
||||||
:returns: A bay.
|
:returns: A cluster.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def destroy_bay(self, bay_id):
|
def destroy_cluster(self, cluster_id):
|
||||||
"""Destroy a bay and all associated interfaces.
|
"""Destroy a cluster and all associated interfaces.
|
||||||
|
|
||||||
:param bay_id: The id or uuid of a bay.
|
:param cluster_id: The id or uuid of a cluster.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def update_bay(self, bay_id, values):
|
def update_cluster(self, cluster_id, values):
|
||||||
"""Update properties of a bay.
|
"""Update properties of a cluster.
|
||||||
|
|
||||||
:param bay_id: The id or uuid of a bay.
|
:param cluster_id: The id or uuid of a cluster.
|
||||||
:returns: A bay.
|
:returns: A cluster.
|
||||||
:raises: ClusterNotFound
|
:raises: ClusterNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -0,0 +1,35 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""rename bay table to cluster
|
||||||
|
|
||||||
|
Revision ID: 720f640f43d1
|
||||||
|
Revises: fb03fdef8919
|
||||||
|
Create Date: 2016-09-02 09:43:41.485934
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '720f640f43d1'
|
||||||
|
down_revision = 'fb03fdef8919'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
op.alter_column('bay', 'baymodel_id',
|
||||||
|
new_column_name='cluster_template_id',
|
||||||
|
existing_type=sa.String(255))
|
||||||
|
op.alter_column('bay', 'bay_create_timeout',
|
||||||
|
new_column_name='create_timeout',
|
||||||
|
existing_type=sa.Integer())
|
||||||
|
op.rename_table('bay', 'cluster')
|
@ -120,11 +120,11 @@ class Connection(api.Connection):
|
|||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def _add_bays_filters(self, query, filters):
|
def _add_clusters_filters(self, query, filters):
|
||||||
if filters is None:
|
if filters is None:
|
||||||
filters = {}
|
filters = {}
|
||||||
|
|
||||||
possible_filters = ["baymodel_id", "name", "node_count",
|
possible_filters = ["cluster_template_id", "name", "node_count",
|
||||||
"master_count", "stack_id", "api_address",
|
"master_count", "stack_id", "api_address",
|
||||||
"node_addresses", "project_id", "user_id"]
|
"node_addresses", "project_id", "user_id"]
|
||||||
|
|
||||||
@ -135,91 +135,91 @@ class Connection(api.Connection):
|
|||||||
query = query.filter_by(**filter_dict)
|
query = query.filter_by(**filter_dict)
|
||||||
|
|
||||||
if 'status' in filters:
|
if 'status' in filters:
|
||||||
query = query.filter(models.Bay.status.in_(filters['status']))
|
query = query.filter(models.Cluster.status.in_(filters['status']))
|
||||||
|
|
||||||
return query
|
return query
|
||||||
|
|
||||||
def get_bay_list(self, context, filters=None, limit=None, marker=None,
|
def get_cluster_list(self, context, filters=None, limit=None, marker=None,
|
||||||
sort_key=None, sort_dir=None):
|
sort_key=None, sort_dir=None):
|
||||||
query = model_query(models.Bay)
|
query = model_query(models.Cluster)
|
||||||
query = self._add_tenant_filters(context, query)
|
query = self._add_tenant_filters(context, query)
|
||||||
query = self._add_bays_filters(query, filters)
|
query = self._add_clusters_filters(query, filters)
|
||||||
return _paginate_query(models.Bay, limit, marker,
|
return _paginate_query(models.Cluster, limit, marker,
|
||||||
sort_key, sort_dir, query)
|
sort_key, sort_dir, query)
|
||||||
|
|
||||||
def create_bay(self, values):
|
def create_cluster(self, values):
|
||||||
# ensure defaults are present for new bays
|
# ensure defaults are present for new clusters
|
||||||
if not values.get('uuid'):
|
if not values.get('uuid'):
|
||||||
values['uuid'] = uuidutils.generate_uuid()
|
values['uuid'] = uuidutils.generate_uuid()
|
||||||
|
|
||||||
bay = models.Bay()
|
cluster = models.Cluster()
|
||||||
bay.update(values)
|
cluster.update(values)
|
||||||
try:
|
try:
|
||||||
bay.save()
|
cluster.save()
|
||||||
except db_exc.DBDuplicateEntry:
|
except db_exc.DBDuplicateEntry:
|
||||||
raise exception.ClusterAlreadyExists(uuid=values['uuid'])
|
raise exception.ClusterAlreadyExists(uuid=values['uuid'])
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
def get_bay_by_id(self, context, bay_id):
|
def get_cluster_by_id(self, context, cluster_id):
|
||||||
query = model_query(models.Bay)
|
query = model_query(models.Cluster)
|
||||||
query = self._add_tenant_filters(context, query)
|
query = self._add_tenant_filters(context, query)
|
||||||
query = query.filter_by(id=bay_id)
|
query = query.filter_by(id=cluster_id)
|
||||||
try:
|
try:
|
||||||
return query.one()
|
return query.one()
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
raise exception.ClusterNotFound(cluster=bay_id)
|
raise exception.ClusterNotFound(cluster=cluster_id)
|
||||||
|
|
||||||
def get_bay_by_name(self, context, bay_name):
|
def get_cluster_by_name(self, context, cluster_name):
|
||||||
query = model_query(models.Bay)
|
query = model_query(models.Cluster)
|
||||||
query = self._add_tenant_filters(context, query)
|
query = self._add_tenant_filters(context, query)
|
||||||
query = query.filter_by(name=bay_name)
|
query = query.filter_by(name=cluster_name)
|
||||||
try:
|
try:
|
||||||
return query.one()
|
return query.one()
|
||||||
except MultipleResultsFound:
|
except MultipleResultsFound:
|
||||||
raise exception.Conflict('Multiple bays exist with same name.'
|
raise exception.Conflict('Multiple clusters exist with same name.'
|
||||||
' Please use the bay uuid instead.')
|
' Please use the cluster uuid instead.')
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
raise exception.ClusterNotFound(cluster=bay_name)
|
raise exception.ClusterNotFound(cluster=cluster_name)
|
||||||
|
|
||||||
def get_bay_by_uuid(self, context, bay_uuid):
|
def get_cluster_by_uuid(self, context, cluster_uuid):
|
||||||
query = model_query(models.Bay)
|
query = model_query(models.Cluster)
|
||||||
query = self._add_tenant_filters(context, query)
|
query = self._add_tenant_filters(context, query)
|
||||||
query = query.filter_by(uuid=bay_uuid)
|
query = query.filter_by(uuid=cluster_uuid)
|
||||||
try:
|
try:
|
||||||
return query.one()
|
return query.one()
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
raise exception.ClusterNotFound(cluster=bay_uuid)
|
raise exception.ClusterNotFound(cluster=cluster_uuid)
|
||||||
|
|
||||||
def destroy_bay(self, bay_id):
|
def destroy_cluster(self, cluster_id):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = model_query(models.Bay, session=session)
|
query = model_query(models.Cluster, session=session)
|
||||||
query = add_identity_filter(query, bay_id)
|
query = add_identity_filter(query, cluster_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
query.one()
|
query.one()
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
raise exception.ClusterNotFound(cluster=bay_id)
|
raise exception.ClusterNotFound(cluster=cluster_id)
|
||||||
|
|
||||||
query.delete()
|
query.delete()
|
||||||
|
|
||||||
def update_bay(self, bay_id, values):
|
def update_cluster(self, cluster_id, values):
|
||||||
# NOTE(dtantsur): this can lead to very strange errors
|
# NOTE(dtantsur): this can lead to very strange errors
|
||||||
if 'uuid' in values:
|
if 'uuid' in values:
|
||||||
msg = _("Cannot overwrite UUID for an existing Bay.")
|
msg = _("Cannot overwrite UUID for an existing Cluster.")
|
||||||
raise exception.InvalidParameterValue(err=msg)
|
raise exception.InvalidParameterValue(err=msg)
|
||||||
|
|
||||||
return self._do_update_bay(bay_id, values)
|
return self._do_update_cluster(cluster_id, values)
|
||||||
|
|
||||||
def _do_update_bay(self, bay_id, values):
|
def _do_update_cluster(self, cluster_id, values):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
query = model_query(models.Bay, session=session)
|
query = model_query(models.Cluster, session=session)
|
||||||
query = add_identity_filter(query, bay_id)
|
query = add_identity_filter(query, cluster_id)
|
||||||
try:
|
try:
|
||||||
ref = query.with_lockmode('update').one()
|
ref = query.with_lockmode('update').one()
|
||||||
except NoResultFound:
|
except NoResultFound:
|
||||||
raise exception.ClusterNotFound(cluster=bay_id)
|
raise exception.ClusterNotFound(cluster=cluster_id)
|
||||||
|
|
||||||
if 'provision_state' in values:
|
if 'provision_state' in values:
|
||||||
values['provision_updated_at'] = timeutils.utcnow()
|
values['provision_updated_at'] = timeutils.utcnow()
|
||||||
@ -309,8 +309,8 @@ class Connection(api.Connection):
|
|||||||
|
|
||||||
def _is_cluster_template_referenced(self, session, cluster_template_uuid):
|
def _is_cluster_template_referenced(self, session, cluster_template_uuid):
|
||||||
"""Checks whether the ClusterTemplate is referenced by cluster(s)."""
|
"""Checks whether the ClusterTemplate is referenced by cluster(s)."""
|
||||||
query = model_query(models.Bay, session=session)
|
query = model_query(models.Cluster, session=session)
|
||||||
query = self._add_bays_filters(query, {'baymodel_id':
|
query = self._add_clusters_filters(query, {'cluster_template_id':
|
||||||
cluster_template_uuid})
|
cluster_template_uuid})
|
||||||
return query.count() != 0
|
return query.count() != 0
|
||||||
|
|
||||||
|
@ -99,10 +99,10 @@ class MagnumBase(models.TimestampMixin,
|
|||||||
Base = declarative_base(cls=MagnumBase)
|
Base = declarative_base(cls=MagnumBase)
|
||||||
|
|
||||||
|
|
||||||
class Bay(Base):
|
class Cluster(Base):
|
||||||
"""Represents a bay."""
|
"""Represents a Cluster."""
|
||||||
|
|
||||||
__tablename__ = 'bay'
|
__tablename__ = 'cluster'
|
||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
schema.UniqueConstraint('uuid', name='uniq_bay0uuid'),
|
schema.UniqueConstraint('uuid', name='uniq_bay0uuid'),
|
||||||
table_args()
|
table_args()
|
||||||
@ -112,7 +112,7 @@ class Bay(Base):
|
|||||||
user_id = Column(String(255))
|
user_id = Column(String(255))
|
||||||
uuid = Column(String(36))
|
uuid = Column(String(36))
|
||||||
name = Column(String(255))
|
name = Column(String(255))
|
||||||
baymodel_id = Column(String(255))
|
cluster_template_id = Column(String(255))
|
||||||
stack_id = Column(String(255))
|
stack_id = Column(String(255))
|
||||||
api_address = Column(String(255))
|
api_address = Column(String(255))
|
||||||
node_addresses = Column(JSONEncodedList)
|
node_addresses = Column(JSONEncodedList)
|
||||||
@ -120,7 +120,7 @@ class Bay(Base):
|
|||||||
master_count = Column(Integer())
|
master_count = Column(Integer())
|
||||||
status = Column(String(20))
|
status = Column(String(20))
|
||||||
status_reason = Column(Text)
|
status_reason = Column(Text)
|
||||||
bay_create_timeout = Column(Integer())
|
create_timeout = Column(Integer())
|
||||||
discovery_url = Column(String(255))
|
discovery_url = Column(String(255))
|
||||||
master_addresses = Column(JSONEncodedList)
|
master_addresses = Column(JSONEncodedList)
|
||||||
# TODO(wanghua): encrypt trust_id in db
|
# TODO(wanghua): encrypt trust_id in db
|
||||||
|
@ -24,8 +24,8 @@ KUBE_INSECURE_PORT = '8080'
|
|||||||
|
|
||||||
class K8sApiAddressOutputMapping(template_def.OutputMapping):
|
class K8sApiAddressOutputMapping(template_def.OutputMapping):
|
||||||
|
|
||||||
def set_output(self, stack, cluster_template, bay):
|
def set_output(self, stack, cluster_template, cluster):
|
||||||
if self.bay_attr is None:
|
if self.cluster_attr is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
output_value = self.get_output_value(stack)
|
output_value = self.get_output_value(stack)
|
||||||
@ -43,7 +43,7 @@ class K8sApiAddressOutputMapping(template_def.OutputMapping):
|
|||||||
'port': port,
|
'port': port,
|
||||||
}
|
}
|
||||||
value = "%(protocol)s://%(address)s:%(port)s" % params
|
value = "%(protocol)s://%(address)s:%(port)s" % params
|
||||||
setattr(bay, self.bay_attr, value)
|
setattr(cluster, self.cluster_attr, value)
|
||||||
|
|
||||||
|
|
||||||
class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
||||||
@ -56,7 +56,7 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
self.add_parameter('minion_flavor',
|
self.add_parameter('minion_flavor',
|
||||||
cluster_template_attr='flavor_id')
|
cluster_template_attr='flavor_id')
|
||||||
self.add_parameter('number_of_minions',
|
self.add_parameter('number_of_minions',
|
||||||
bay_attr='node_count')
|
cluster_attr='node_count')
|
||||||
self.add_parameter('external_network',
|
self.add_parameter('external_network',
|
||||||
cluster_template_attr='external_network_id',
|
cluster_template_attr='external_network_id',
|
||||||
required=True)
|
required=True)
|
||||||
@ -69,23 +69,23 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
required=True)
|
required=True)
|
||||||
self.add_parameter('registry_enabled',
|
self.add_parameter('registry_enabled',
|
||||||
cluster_template_attr='registry_enabled')
|
cluster_template_attr='registry_enabled')
|
||||||
self.add_parameter('bay_uuid',
|
self.add_parameter('cluster_uuid',
|
||||||
bay_attr='uuid',
|
cluster_attr='uuid',
|
||||||
param_type=str)
|
param_type=str)
|
||||||
self.add_parameter('insecure_registry_url',
|
self.add_parameter('insecure_registry_url',
|
||||||
cluster_template_attr='insecure_registry')
|
cluster_template_attr='insecure_registry')
|
||||||
self.add_parameter('kube_version',
|
self.add_parameter('kube_version',
|
||||||
bay_attr='coe_version')
|
cluster_attr='coe_version')
|
||||||
|
|
||||||
self.add_output('api_address',
|
self.add_output('api_address',
|
||||||
bay_attr='api_address',
|
cluster_attr='api_address',
|
||||||
mapping_type=K8sApiAddressOutputMapping)
|
mapping_type=K8sApiAddressOutputMapping)
|
||||||
self.add_output('kube_minions_private',
|
self.add_output('kube_minions_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
self.add_output('kube_masters_private',
|
self.add_output('kube_masters_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
extra_params = kwargs.pop('extra_params', {})
|
extra_params = kwargs.pop('extra_params', {})
|
||||||
scale_mgr = kwargs.pop('scale_manager', None)
|
scale_mgr = kwargs.pop('scale_manager', None)
|
||||||
if scale_mgr:
|
if scale_mgr:
|
||||||
@ -93,7 +93,7 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
extra_params['minions_to_remove'] = (
|
extra_params['minions_to_remove'] = (
|
||||||
scale_mgr.get_removal_nodes(hosts))
|
scale_mgr.get_removal_nodes(hosts))
|
||||||
|
|
||||||
extra_params['discovery_url'] = self.get_discovery_url(bay)
|
extra_params['discovery_url'] = self.get_discovery_url(cluster)
|
||||||
osc = self.get_osc(context)
|
osc = self.get_osc(context)
|
||||||
extra_params['magnum_url'] = osc.magnum_url()
|
extra_params['magnum_url'] = osc.magnum_url()
|
||||||
|
|
||||||
@ -112,6 +112,6 @@ class K8sTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
CONF.docker_registry.swift_registry_container)
|
CONF.docker_registry.swift_registry_container)
|
||||||
|
|
||||||
return super(K8sTemplateDefinition,
|
return super(K8sTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
@ -62,37 +62,37 @@ CONF.import_opt('trustee_domain_id', 'magnum.common.keystone', group='trust')
|
|||||||
|
|
||||||
|
|
||||||
class ParameterMapping(object):
|
class ParameterMapping(object):
|
||||||
"""A mapping associating heat param and bay/cluster_template attr.
|
"""A mapping associating heat param and cluster_template attr.
|
||||||
|
|
||||||
A ParameterMapping is an association of a Heat parameter name with
|
A ParameterMapping is an association of a Heat parameter name with
|
||||||
an attribute on a Bay, ClusterTemplate, or both.
|
an attribute on a Cluster, ClusterTemplate, or both.
|
||||||
|
|
||||||
In the case of both cluster_template_attr and bay_attr being set, the
|
In the case of both cluster_template_attr and cluster_attr being set, the
|
||||||
ClusterTemplate will be checked first and then Bay if the attribute isn't
|
ClusterTemplate will be checked first and then Cluster if the attribute
|
||||||
set on the ClusterTemplate.
|
isn't set on the ClusterTemplate.
|
||||||
|
|
||||||
Parameters can also be set as 'required'. If a required parameter
|
Parameters can also be set as 'required'. If a required parameter
|
||||||
isn't set, a RequiredArgumentNotProvided exception will be raised.
|
isn't set, a RequiredArgumentNotProvided exception will be raised.
|
||||||
"""
|
"""
|
||||||
def __init__(self, heat_param, cluster_template_attr=None,
|
def __init__(self, heat_param, cluster_template_attr=None,
|
||||||
bay_attr=None, required=False,
|
cluster_attr=None, required=False,
|
||||||
param_type=lambda x: x):
|
param_type=lambda x: x):
|
||||||
self.heat_param = heat_param
|
self.heat_param = heat_param
|
||||||
self.cluster_template_attr = cluster_template_attr
|
self.cluster_template_attr = cluster_template_attr
|
||||||
self.bay_attr = bay_attr
|
self.cluster_attr = cluster_attr
|
||||||
self.required = required
|
self.required = required
|
||||||
self.param_type = param_type
|
self.param_type = param_type
|
||||||
|
|
||||||
def set_param(self, params, cluster_template, bay):
|
def set_param(self, params, cluster_template, cluster):
|
||||||
value = None
|
value = None
|
||||||
|
|
||||||
if (self.cluster_template_attr and
|
if (self.cluster_template_attr and
|
||||||
getattr(cluster_template, self.cluster_template_attr, None)
|
getattr(cluster_template, self.cluster_template_attr, None)
|
||||||
is not None):
|
is not None):
|
||||||
value = getattr(cluster_template, self.cluster_template_attr)
|
value = getattr(cluster_template, self.cluster_template_attr)
|
||||||
elif (self.bay_attr and
|
elif (self.cluster_attr and
|
||||||
getattr(bay, self.bay_attr, None) is not None):
|
getattr(cluster, self.cluster_attr, None) is not None):
|
||||||
value = getattr(bay, self.bay_attr)
|
value = getattr(cluster, self.cluster_attr)
|
||||||
elif self.required:
|
elif self.required:
|
||||||
kwargs = dict(heat_param=self.heat_param)
|
kwargs = dict(heat_param=self.heat_param)
|
||||||
raise exception.RequiredParameterNotProvided(**kwargs)
|
raise exception.RequiredParameterNotProvided(**kwargs)
|
||||||
@ -103,23 +103,23 @@ class ParameterMapping(object):
|
|||||||
|
|
||||||
|
|
||||||
class OutputMapping(object):
|
class OutputMapping(object):
|
||||||
"""A mapping associating heat outputs and bay attr.
|
"""A mapping associating heat outputs and cluster attr.
|
||||||
|
|
||||||
An OutputMapping is an association of a Heat output with a key
|
An OutputMapping is an association of a Heat output with a key
|
||||||
Magnum understands.
|
Magnum understands.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, heat_output, bay_attr=None):
|
def __init__(self, heat_output, cluster_attr=None):
|
||||||
self.bay_attr = bay_attr
|
self.cluster_attr = cluster_attr
|
||||||
self.heat_output = heat_output
|
self.heat_output = heat_output
|
||||||
|
|
||||||
def set_output(self, stack, cluster_template, bay):
|
def set_output(self, stack, cluster_template, cluster):
|
||||||
if self.bay_attr is None:
|
if self.cluster_attr is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
output_value = self.get_output_value(stack)
|
output_value = self.get_output_value(stack)
|
||||||
if output_value is not None:
|
if output_value is not None:
|
||||||
setattr(bay, self.bay_attr, output_value)
|
setattr(cluster, self.cluster_attr, output_value)
|
||||||
|
|
||||||
def matched(self, output_key):
|
def matched(self, output_key):
|
||||||
return self.heat_output == output_key
|
return self.heat_output == output_key
|
||||||
@ -155,7 +155,7 @@ class TemplateDefinition(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_template_definitions(cls):
|
def get_template_definitions(cls):
|
||||||
'''Retrieves bay definitions from python entry_points.
|
'''Retrieves cluster definitions from python entry_points.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
@ -190,11 +190,11 @@ class TemplateDefinition(object):
|
|||||||
if not cls.definitions:
|
if not cls.definitions:
|
||||||
cls.definitions = dict()
|
cls.definitions = dict()
|
||||||
for entry_point, def_class in cls.load_entry_points():
|
for entry_point, def_class in cls.load_entry_points():
|
||||||
for bay_type in def_class.provides:
|
for cluster_type in def_class.provides:
|
||||||
bay_type_tuple = (bay_type['server_type'],
|
cluster_type_tuple = (cluster_type['server_type'],
|
||||||
bay_type['os'],
|
cluster_type['os'],
|
||||||
bay_type['coe'])
|
cluster_type['coe'])
|
||||||
providers = cls.definitions.setdefault(bay_type_tuple,
|
providers = cls.definitions.setdefault(cluster_type_tuple,
|
||||||
dict())
|
dict())
|
||||||
providers[entry_point.name] = def_class
|
providers[entry_point.name] = def_class
|
||||||
|
|
||||||
@ -205,7 +205,7 @@ class TemplateDefinition(object):
|
|||||||
'''Get enabled TemplateDefinitions.
|
'''Get enabled TemplateDefinitions.
|
||||||
|
|
||||||
Returns the enabled TemplateDefinition class for the provided
|
Returns the enabled TemplateDefinition class for the provided
|
||||||
bay_type.
|
cluster_type.
|
||||||
|
|
||||||
With the following classes:
|
With the following classes:
|
||||||
class TemplateDefinition1(TemplateDefinition):
|
class TemplateDefinition1(TemplateDefinition):
|
||||||
@ -227,30 +227,30 @@ class TemplateDefinition(object):
|
|||||||
get_template_name_1_definition('server_type2', 'os2', 'coe2')
|
get_template_name_1_definition('server_type2', 'os2', 'coe2')
|
||||||
will return: TemplateDefinition2
|
will return: TemplateDefinition2
|
||||||
|
|
||||||
:param server_type: The server_type the bay definition
|
:param server_type: The server_type the cluster definition
|
||||||
will build on
|
will build on
|
||||||
:param os: The operating system the bay definition will build on
|
:param os: The operating system the cluster definition will build on
|
||||||
:param coe: The Container Orchestration Environment the bay will
|
:param coe: The Container Orchestration Environment the cluster will
|
||||||
produce
|
produce
|
||||||
|
|
||||||
:return: class
|
:return: class
|
||||||
'''
|
'''
|
||||||
|
|
||||||
definition_map = cls.get_template_definitions()
|
definition_map = cls.get_template_definitions()
|
||||||
bay_type = (server_type, os, coe)
|
cluster_type = (server_type, os, coe)
|
||||||
|
|
||||||
if bay_type not in definition_map:
|
if cluster_type not in definition_map:
|
||||||
raise exception.BayTypeNotSupported(
|
raise exception.ClusterTypeNotSupported(
|
||||||
server_type=server_type,
|
server_type=server_type,
|
||||||
os=os,
|
os=os,
|
||||||
coe=coe)
|
coe=coe)
|
||||||
type_definitions = definition_map[bay_type]
|
type_definitions = definition_map[cluster_type]
|
||||||
|
|
||||||
for name in cfg.CONF.cluster.enabled_definitions:
|
for name in cfg.CONF.cluster.enabled_definitions:
|
||||||
if name in type_definitions:
|
if name in type_definitions:
|
||||||
return type_definitions[name]()
|
return type_definitions[name]()
|
||||||
|
|
||||||
raise exception.BayTypeNotEnabled(
|
raise exception.ClusterTypeNotEnabled(
|
||||||
server_type=server_type, os=os, coe=coe)
|
server_type=server_type, os=os, coe=coe)
|
||||||
|
|
||||||
def add_parameter(self, *args, **kwargs):
|
def add_parameter(self, *args, **kwargs):
|
||||||
@ -269,13 +269,13 @@ class TemplateDefinition(object):
|
|||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
"""Pulls template parameters from ClusterTemplate and/or Bay.
|
"""Pulls template parameters from ClusterTemplate.
|
||||||
|
|
||||||
:param context: Context to pull template parameters for
|
:param context: Context to pull template parameters for
|
||||||
:param cluster_template: ClusterTemplate to pull template parameters
|
:param cluster_template: ClusterTemplate to pull template parameters
|
||||||
from
|
from
|
||||||
:param bay: Bay to pull template parameters from
|
:param cluster: Cluster to pull template parameters from
|
||||||
:param extra_params: Any extra params to be provided to the template
|
:param extra_params: Any extra params to be provided to the template
|
||||||
|
|
||||||
:return: dict of template parameters
|
:return: dict of template parameters
|
||||||
@ -283,7 +283,7 @@ class TemplateDefinition(object):
|
|||||||
template_params = dict()
|
template_params = dict()
|
||||||
|
|
||||||
for mapping in self.param_mappings:
|
for mapping in self.param_mappings:
|
||||||
mapping.set_param(template_params, cluster_template, bay)
|
mapping.set_param(template_params, cluster_template, cluster)
|
||||||
|
|
||||||
if 'extra_params' in kwargs:
|
if 'extra_params' in kwargs:
|
||||||
template_params.update(kwargs.get('extra_params'))
|
template_params.update(kwargs.get('extra_params'))
|
||||||
@ -302,26 +302,27 @@ class TemplateDefinition(object):
|
|||||||
"""
|
"""
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def get_heat_param(self, bay_attr=None, cluster_template_attr=None):
|
def get_heat_param(self, cluster_attr=None, cluster_template_attr=None):
|
||||||
"""Returns stack param name.
|
"""Returns stack param name.
|
||||||
|
|
||||||
Return stack param name using bay and cluster_template attributes
|
Return stack param name using cluster and cluster_template attributes
|
||||||
:param bay_attr bay attribute from which it maps to stack attribute
|
:param cluster_attr cluster attribute from which it maps to stack
|
||||||
|
attribute
|
||||||
:param cluster_template_attr cluster_template attribute from which it
|
:param cluster_template_attr cluster_template attribute from which it
|
||||||
maps to stack attribute
|
maps to stack attribute
|
||||||
|
|
||||||
:return stack parameter name or None
|
:return stack parameter name or None
|
||||||
"""
|
"""
|
||||||
for mapping in self.param_mappings:
|
for mapping in self.param_mappings:
|
||||||
if (mapping.bay_attr == bay_attr and
|
if (mapping.cluster_attr == cluster_attr and
|
||||||
mapping.cluster_template_attr == cluster_template_attr):
|
mapping.cluster_template_attr == cluster_template_attr):
|
||||||
return mapping.heat_param
|
return mapping.heat_param
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def update_outputs(self, stack, cluster_template, bay):
|
def update_outputs(self, stack, cluster_template, cluster):
|
||||||
for output in self.output_mappings:
|
for output in self.output_mappings:
|
||||||
output.set_output(stack, cluster_template, bay)
|
output.set_output(stack, cluster_template, cluster)
|
||||||
|
|
||||||
@abc.abstractproperty
|
@abc.abstractproperty
|
||||||
def driver_module_path(self):
|
def driver_module_path(self):
|
||||||
@ -331,9 +332,9 @@ class TemplateDefinition(object):
|
|||||||
def template_path(self):
|
def template_path(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def extract_definition(self, context, cluster_template, bay, **kwargs):
|
def extract_definition(self, context, cluster_template, cluster, **kwargs):
|
||||||
return (self.template_path,
|
return (self.template_path,
|
||||||
self.get_params(context, cluster_template, bay, **kwargs),
|
self.get_params(context, cluster_template, cluster, **kwargs),
|
||||||
self.get_env_files(cluster_template))
|
self.get_env_files(cluster_template))
|
||||||
|
|
||||||
|
|
||||||
@ -356,7 +357,7 @@ class BaseTemplateDefinition(TemplateDefinition):
|
|||||||
self.add_parameter('no_proxy',
|
self.add_parameter('no_proxy',
|
||||||
cluster_template_attr='no_proxy')
|
cluster_template_attr='no_proxy')
|
||||||
self.add_parameter('number_of_masters',
|
self.add_parameter('number_of_masters',
|
||||||
bay_attr='master_count')
|
cluster_attr='master_count')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def driver_module_path(self):
|
def driver_module_path(self):
|
||||||
@ -371,19 +372,19 @@ class BaseTemplateDefinition(TemplateDefinition):
|
|||||||
self._osc = clients.OpenStackClients(context)
|
self._osc = clients.OpenStackClients(context)
|
||||||
return self._osc
|
return self._osc
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
osc = self.get_osc(context)
|
osc = self.get_osc(context)
|
||||||
|
|
||||||
extra_params = kwargs.pop('extra_params', {})
|
extra_params = kwargs.pop('extra_params', {})
|
||||||
extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id
|
extra_params['trustee_domain_id'] = osc.keystone().trustee_domain_id
|
||||||
extra_params['trustee_user_id'] = bay.trustee_user_id
|
extra_params['trustee_user_id'] = cluster.trustee_user_id
|
||||||
extra_params['trustee_username'] = bay.trustee_username
|
extra_params['trustee_username'] = cluster.trustee_username
|
||||||
extra_params['trustee_password'] = bay.trustee_password
|
extra_params['trustee_password'] = cluster.trustee_password
|
||||||
extra_params['trust_id'] = bay.trust_id
|
extra_params['trust_id'] = cluster.trust_id
|
||||||
extra_params['auth_url'] = context.auth_url
|
extra_params['auth_url'] = context.auth_url
|
||||||
|
|
||||||
return super(BaseTemplateDefinition,
|
return super(BaseTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
@ -404,17 +405,17 @@ class BaseTemplateDefinition(TemplateDefinition):
|
|||||||
try:
|
try:
|
||||||
result = ast.literal_eval(result)
|
result = ast.literal_eval(result)
|
||||||
except (ValueError, SyntaxError):
|
except (ValueError, SyntaxError):
|
||||||
raise exception.InvalidBayDiscoveryURL(
|
raise exception.InvalidClusterDiscoveryURL(
|
||||||
discovery_url=discovery_url)
|
discovery_url=discovery_url)
|
||||||
|
|
||||||
node_value = result.get('node', None)
|
node_value = result.get('node', None)
|
||||||
if node_value is None:
|
if node_value is None:
|
||||||
raise exception.InvalidBayDiscoveryURL(
|
raise exception.InvalidClusterDiscoveryURL(
|
||||||
discovery_url=discovery_url)
|
discovery_url=discovery_url)
|
||||||
|
|
||||||
value = node_value.get('value', None)
|
value = node_value.get('value', None)
|
||||||
if value is None:
|
if value is None:
|
||||||
raise exception.InvalidBayDiscoveryURL(
|
raise exception.InvalidClusterDiscoveryURL(
|
||||||
discovery_url=discovery_url)
|
discovery_url=discovery_url)
|
||||||
elif int(value) != expect_size:
|
elif int(value) != expect_size:
|
||||||
raise exception.InvalidClusterSize(
|
raise exception.InvalidClusterSize(
|
||||||
@ -422,18 +423,18 @@ class BaseTemplateDefinition(TemplateDefinition):
|
|||||||
size=int(value),
|
size=int(value),
|
||||||
discovery_url=discovery_url)
|
discovery_url=discovery_url)
|
||||||
|
|
||||||
def get_discovery_url(self, bay):
|
def get_discovery_url(self, cluster):
|
||||||
if hasattr(bay, 'discovery_url') and bay.discovery_url:
|
if hasattr(cluster, 'discovery_url') and cluster.discovery_url:
|
||||||
if getattr(bay, 'master_count', None) is not None:
|
if getattr(cluster, 'master_count', None) is not None:
|
||||||
self.validate_discovery_url(bay.discovery_url,
|
self.validate_discovery_url(cluster.discovery_url,
|
||||||
bay.master_count)
|
cluster.master_count)
|
||||||
else:
|
else:
|
||||||
self.validate_discovery_url(bay.discovery_url, 1)
|
self.validate_discovery_url(cluster.discovery_url, 1)
|
||||||
discovery_url = bay.discovery_url
|
discovery_url = cluster.discovery_url
|
||||||
else:
|
else:
|
||||||
discovery_endpoint = (
|
discovery_endpoint = (
|
||||||
cfg.CONF.cluster.etcd_discovery_service_endpoint_format %
|
cfg.CONF.cluster.etcd_discovery_service_endpoint_format %
|
||||||
{'size': bay.master_count})
|
{'size': cluster.master_count})
|
||||||
try:
|
try:
|
||||||
discovery_url = requests.get(discovery_endpoint).text
|
discovery_url = requests.get(discovery_endpoint).text
|
||||||
except req_exceptions.RequestException as err:
|
except req_exceptions.RequestException as err:
|
||||||
@ -445,5 +446,5 @@ class BaseTemplateDefinition(TemplateDefinition):
|
|||||||
discovery_url=discovery_url,
|
discovery_url=discovery_url,
|
||||||
discovery_endpoint=discovery_endpoint)
|
discovery_endpoint=discovery_endpoint)
|
||||||
else:
|
else:
|
||||||
bay.discovery_url = discovery_url
|
cluster.discovery_url = discovery_url
|
||||||
return discovery_url
|
return discovery_url
|
||||||
|
@ -33,7 +33,7 @@ $configure_docker_storage_driver
|
|||||||
if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then
|
if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then
|
||||||
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) \
|
if [ $(echo -e "$(uname -r)\n3.18" | sort -V | head -1) \
|
||||||
= $(uname -r) ]; then
|
= $(uname -r) ]; then
|
||||||
ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Bay node kernel version: $(uname -r)"
|
ERROR_MESSAGE="OverlayFS requires at least Linux kernel 3.18. Cluster node kernel version: $(uname -r)"
|
||||||
echo "ERROR: ${ERROR_MESSAGE}" >&2
|
echo "ERROR: ${ERROR_MESSAGE}" >&2
|
||||||
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'"
|
sh -c "${WAIT_CURL} --data-binary '{\"status\": \"FAILURE\", \"reason\": \"${ERROR_MESSAGE}\"}'"
|
||||||
else
|
else
|
||||||
|
@ -32,9 +32,9 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(CoreOSK8sTemplateDefinition, self).__init__()
|
super(CoreOSK8sTemplateDefinition, self).__init__()
|
||||||
self.add_output('kube_minions',
|
self.add_output('kube_minions',
|
||||||
bay_attr='node_addresses')
|
cluster_attr='node_addresses')
|
||||||
self.add_output('kube_masters',
|
self.add_output('kube_masters',
|
||||||
bay_attr='master_addresses')
|
cluster_attr='master_addresses')
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template):
|
||||||
if cluster_template.master_lb_enabled:
|
if cluster_template.master_lb_enabled:
|
||||||
|
@ -85,7 +85,7 @@ write_files:
|
|||||||
|
|
||||||
ca_cert_json=$(curl -X GET \
|
ca_cert_json=$(curl -X GET \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
$MAGNUM_URL/certificates/$BAY_UUID)
|
$MAGNUM_URL/certificates/$CLUSTER_UUID)
|
||||||
parse_json_response "${ca_cert_json}" > ${CA_CERT}
|
parse_json_response "${ca_cert_json}" > ${CA_CERT}
|
||||||
|
|
||||||
# Create config for client's csr
|
# Create config for client's csr
|
||||||
@ -117,7 +117,7 @@ write_files:
|
|||||||
|
|
||||||
# encode newline (\n) characters
|
# encode newline (\n) characters
|
||||||
csr=$(cat $CLIENT_CSR | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/\\n/g')
|
csr=$(cat $CLIENT_CSR | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/\\n/g')
|
||||||
csr_req="{\"bay_uuid\": \"$BAY_UUID\", \"csr\": \"$csr\"}"
|
csr_req="{\"cluster_uuid\": \"$CLUSTER_UUID\", \"csr\": \"$csr\"}"
|
||||||
# Send csr to Magnum to have it signed
|
# Send csr to Magnum to have it signed
|
||||||
client_cert_json=$(curl -X POST \
|
client_cert_json=$(curl -X POST \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
|
@ -106,10 +106,10 @@ write_files:
|
|||||||
|
|
||||||
rm -rf auth.json
|
rm -rf auth.json
|
||||||
|
|
||||||
# Get CA certificate for this bay
|
# Get CA certificate for this cluster
|
||||||
ca_cert_json=$(curl -X GET \
|
ca_cert_json=$(curl -X GET \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
$MAGNUM_URL/certificates/$BAY_UUID)
|
$MAGNUM_URL/certificates/$CLUSTER_UUID)
|
||||||
parse_json_response "${ca_cert_json}" > ${CA_CERT}
|
parse_json_response "${ca_cert_json}" > ${CA_CERT}
|
||||||
|
|
||||||
# Create config for server's csr
|
# Create config for server's csr
|
||||||
@ -136,7 +136,7 @@ write_files:
|
|||||||
|
|
||||||
# encode newline (\n) characters
|
# encode newline (\n) characters
|
||||||
csr=$(cat $SERVER_CSR | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/\\n/g')
|
csr=$(cat $SERVER_CSR | sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/\\n/g')
|
||||||
csr_req="{\"bay_uuid\": \"$BAY_UUID\", \"csr\": \"$csr\"}"
|
csr_req="{\"cluster_uuid\": \"$CLUSTER_UUID\", \"csr\": \"$csr\"}"
|
||||||
# Send csr to Magnum to have it signed
|
# Send csr to Magnum to have it signed
|
||||||
server_cert_json=$(curl -X POST \
|
server_cert_json=$(curl -X POST \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
|
@ -24,7 +24,7 @@ write_files:
|
|||||||
TENANT_NAME="$TENANT_NAME"
|
TENANT_NAME="$TENANT_NAME"
|
||||||
CLUSTER_SUBNET="$CLUSTER_SUBNET"
|
CLUSTER_SUBNET="$CLUSTER_SUBNET"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
HTTP_PROXY="$HTTP_PROXY"
|
HTTP_PROXY="$HTTP_PROXY"
|
||||||
HTTPS_PROXY="$HTTPS_PROXY"
|
HTTPS_PROXY="$HTTPS_PROXY"
|
||||||
|
@ -21,7 +21,7 @@ write_files:
|
|||||||
REGISTRY_INSECURE="$REGISTRY_INSECURE"
|
REGISTRY_INSECURE="$REGISTRY_INSECURE"
|
||||||
REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE"
|
REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
AUTH_URL="$AUTH_URL"
|
AUTH_URL="$AUTH_URL"
|
||||||
USERNAME="$USERNAME"
|
USERNAME="$USERNAME"
|
||||||
|
@ -123,9 +123,9 @@ parameters:
|
|||||||
service.
|
service.
|
||||||
default: 6443
|
default: 6443
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -286,7 +286,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resources that expose the IPs of either the kube master or a given
|
# resources that expose the IPs of either the kube master or a given
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -340,7 +340,7 @@ resources:
|
|||||||
no_proxy: {get_param: no_proxy}
|
no_proxy: {get_param: no_proxy}
|
||||||
kube_version: {get_param: kube_version}
|
kube_version: {get_param: kube_version}
|
||||||
wait_condition_timeout: {get_param: wait_condition_timeout}
|
wait_condition_timeout: {get_param: wait_condition_timeout}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
api_pool_id: {get_resource: api_pool}
|
api_pool_id: {get_resource: api_pool}
|
||||||
etcd_pool_id: {get_resource: etcd_pool}
|
etcd_pool_id: {get_resource: etcd_pool}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
@ -384,7 +384,7 @@ resources:
|
|||||||
kube_version: {get_param: kube_version}
|
kube_version: {get_param: kube_version}
|
||||||
etcd_server_ip: {get_attr: [etcd_address_switch, private_ip]}
|
etcd_server_ip: {get_attr: [etcd_address_switch, private_ip]}
|
||||||
wait_condition_timeout: {get_param: wait_condition_timeout}
|
wait_condition_timeout: {get_param: wait_condition_timeout}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
trustee_user_id: {get_param: trustee_user_id}
|
trustee_user_id: {get_param: trustee_user_id}
|
||||||
trustee_password: {get_param: trustee_password}
|
trustee_password: {get_param: trustee_password}
|
||||||
|
@ -95,9 +95,9 @@ parameters:
|
|||||||
type: string
|
type: string
|
||||||
description: version of kubernetes used for kubernetes cluster
|
description: version of kubernetes used for kubernetes cluster
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -161,7 +161,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resource that exposes the IPs of either the kube master or the API
|
# resource that exposes the IPs of either the kube master or the API
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -237,7 +237,7 @@ resources:
|
|||||||
"$KUBE_API_PORT": {get_param: kubernetes_port}
|
"$KUBE_API_PORT": {get_param: kubernetes_port}
|
||||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||||
"$KUBE_VERSION": {get_param: kube_version}
|
"$KUBE_VERSION": {get_param: kube_version}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$HTTP_PROXY": {get_param: http_proxy}
|
"$HTTP_PROXY": {get_param: http_proxy}
|
||||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||||
|
@ -45,9 +45,9 @@ parameters:
|
|||||||
service.
|
service.
|
||||||
default: 6443
|
default: 6443
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -165,7 +165,7 @@ resources:
|
|||||||
"$NETWORK_DRIVER": {get_param: network_driver}
|
"$NETWORK_DRIVER": {get_param: network_driver}
|
||||||
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
|
"$ETCD_SERVER_IP": {get_param: etcd_server_ip}
|
||||||
"$KUBE_VERSION": {get_param: kube_version}
|
"$KUBE_VERSION": {get_param: kube_version}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$HTTP_PROXY": {get_param: http_proxy}
|
"$HTTP_PROXY": {get_param: http_proxy}
|
||||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||||
|
@ -32,17 +32,17 @@ class ServerAddressOutputMapping(template_def.OutputMapping):
|
|||||||
public_ip_output_key = None
|
public_ip_output_key = None
|
||||||
private_ip_output_key = None
|
private_ip_output_key = None
|
||||||
|
|
||||||
def __init__(self, dummy_arg, bay_attr=None):
|
def __init__(self, dummy_arg, cluster_attr=None):
|
||||||
self.bay_attr = bay_attr
|
self.cluster_attr = cluster_attr
|
||||||
self.heat_output = self.public_ip_output_key
|
self.heat_output = self.public_ip_output_key
|
||||||
|
|
||||||
def set_output(self, stack, cluster_template, bay):
|
def set_output(self, stack, cluster_template, cluster):
|
||||||
if not cluster_template.floating_ip_enabled:
|
if not cluster_template.floating_ip_enabled:
|
||||||
self.heat_output = self.private_ip_output_key
|
self.heat_output = self.private_ip_output_key
|
||||||
|
|
||||||
LOG.debug("Using heat_output: %s", self.heat_output)
|
LOG.debug("Using heat_output: %s", self.heat_output)
|
||||||
super(ServerAddressOutputMapping,
|
super(ServerAddressOutputMapping,
|
||||||
self).set_output(stack, cluster_template, bay)
|
self).set_output(stack, cluster_template, cluster)
|
||||||
|
|
||||||
|
|
||||||
class MasterAddressOutputMapping(ServerAddressOutputMapping):
|
class MasterAddressOutputMapping(ServerAddressOutputMapping):
|
||||||
@ -71,13 +71,13 @@ class AtomicK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
self.add_parameter('docker_storage_driver',
|
self.add_parameter('docker_storage_driver',
|
||||||
cluster_template_attr='docker_storage_driver')
|
cluster_template_attr='docker_storage_driver')
|
||||||
self.add_output('kube_minions',
|
self.add_output('kube_minions',
|
||||||
bay_attr='node_addresses',
|
cluster_attr='node_addresses',
|
||||||
mapping_type=NodeAddressOutputMapping)
|
mapping_type=NodeAddressOutputMapping)
|
||||||
self.add_output('kube_masters',
|
self.add_output('kube_masters',
|
||||||
bay_attr='master_addresses',
|
cluster_attr='master_addresses',
|
||||||
mapping_type=MasterAddressOutputMapping)
|
mapping_type=MasterAddressOutputMapping)
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
extra_params = kwargs.pop('extra_params', {})
|
extra_params = kwargs.pop('extra_params', {})
|
||||||
|
|
||||||
extra_params['username'] = context.user_name
|
extra_params['username'] = context.user_name
|
||||||
@ -86,7 +86,7 @@ class AtomicK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
extra_params['region_name'] = osc.cinder_region_name()
|
extra_params['region_name'] = osc.cinder_region_name()
|
||||||
|
|
||||||
return super(AtomicK8sTemplateDefinition,
|
return super(AtomicK8sTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
@ -156,14 +156,14 @@ class FedoraK8sIronicTemplateDefinition(AtomicK8sTemplateDefinition):
|
|||||||
|
|
||||||
return subnet['network_id']
|
return subnet['network_id']
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
ep = kwargs.pop('extra_params', {})
|
ep = kwargs.pop('extra_params', {})
|
||||||
|
|
||||||
osc = self.get_osc(context)
|
osc = self.get_osc(context)
|
||||||
ep['fixed_network'] = self.get_fixed_network_id(osc, cluster_template)
|
ep['fixed_network'] = self.get_fixed_network_id(osc, cluster_template)
|
||||||
|
|
||||||
return super(FedoraK8sIronicTemplateDefinition,
|
return super(FedoraK8sIronicTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=ep,
|
extra_params=ep,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
|
@ -67,10 +67,10 @@ url="$AUTH_URL/auth/tokens"
|
|||||||
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
|
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
|
||||||
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
|
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
|
||||||
|
|
||||||
# Get CA certificate for this bay
|
# Get CA certificate for this cluster
|
||||||
curl -X GET \
|
curl -X GET \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
$MAGNUM_URL/certificates/$BAY_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
|
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > $CA_CERT
|
||||||
|
|
||||||
# Create config for client's csr
|
# Create config for client's csr
|
||||||
cat > ${cert_conf_dir}/client.conf <<EOF
|
cat > ${cert_conf_dir}/client.conf <<EOF
|
||||||
@ -100,7 +100,7 @@ openssl req -new -days 1000 \
|
|||||||
-config "${cert_conf_dir}/client.conf"
|
-config "${cert_conf_dir}/client.conf"
|
||||||
|
|
||||||
# Send csr to Magnum to have it signed
|
# Send csr to Magnum to have it signed
|
||||||
csr_req=$(python -c "import json; fp = open('${CLIENT_CSR}'); print json.dumps({'bay_uuid': '$BAY_UUID', 'csr': fp.read()}); fp.close()")
|
csr_req=$(python -c "import json; fp = open('${CLIENT_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
|
||||||
curl -X POST \
|
curl -X POST \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
|
@ -89,10 +89,10 @@ url="$AUTH_URL/auth/tokens"
|
|||||||
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
|
USER_TOKEN=`curl -s -i -X POST -H "$content_type" -d "$auth_json" $url \
|
||||||
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
|
| grep X-Subject-Token | awk '{print $2}' | tr -d '[[:space:]]'`
|
||||||
|
|
||||||
# Get CA certificate for this bay
|
# Get CA certificate for this cluster
|
||||||
curl -X GET \
|
curl -X GET \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
$MAGNUM_URL/certificates/$BAY_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
|
$MAGNUM_URL/certificates/$CLUSTER_UUID | python -c 'import sys, json; print json.load(sys.stdin)["pem"]' > ${CA_CERT}
|
||||||
|
|
||||||
# Create config for server's csr
|
# Create config for server's csr
|
||||||
cat > ${cert_conf_dir}/server.conf <<EOF
|
cat > ${cert_conf_dir}/server.conf <<EOF
|
||||||
@ -117,7 +117,7 @@ openssl req -new -days 1000 \
|
|||||||
-config "${cert_conf_dir}/server.conf"
|
-config "${cert_conf_dir}/server.conf"
|
||||||
|
|
||||||
# Send csr to Magnum to have it signed
|
# Send csr to Magnum to have it signed
|
||||||
csr_req=$(python -c "import json; fp = open('${SERVER_CSR}'); print json.dumps({'bay_uuid': '$BAY_UUID', 'csr': fp.read()}); fp.close()")
|
csr_req=$(python -c "import json; fp = open('${SERVER_CSR}'); print json.dumps({'cluster_uuid': '$CLUSTER_UUID', 'csr': fp.read()}); fp.close()")
|
||||||
curl -X POST \
|
curl -X POST \
|
||||||
-H "X-Auth-Token: $USER_TOKEN" \
|
-H "X-Auth-Token: $USER_TOKEN" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
|
@ -25,7 +25,7 @@ write_files:
|
|||||||
TENANT_NAME="$TENANT_NAME"
|
TENANT_NAME="$TENANT_NAME"
|
||||||
CLUSTER_SUBNET="$CLUSTER_SUBNET"
|
CLUSTER_SUBNET="$CLUSTER_SUBNET"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
HTTP_PROXY="$HTTP_PROXY"
|
HTTP_PROXY="$HTTP_PROXY"
|
||||||
HTTPS_PROXY="$HTTPS_PROXY"
|
HTTPS_PROXY="$HTTPS_PROXY"
|
||||||
|
@ -22,7 +22,7 @@ write_files:
|
|||||||
REGISTRY_INSECURE="$REGISTRY_INSECURE"
|
REGISTRY_INSECURE="$REGISTRY_INSECURE"
|
||||||
REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE"
|
REGISTRY_CHUNKSIZE="$REGISTRY_CHUNKSIZE"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
AUTH_URL="$AUTH_URL"
|
AUTH_URL="$AUTH_URL"
|
||||||
USERNAME="$USERNAME"
|
USERNAME="$USERNAME"
|
||||||
|
@ -212,9 +212,9 @@ parameters:
|
|||||||
service.
|
service.
|
||||||
default: 6443
|
default: 6443
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -374,7 +374,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resources that expose the IPs of either the kube master or a given
|
# resources that expose the IPs of either the kube master or a given
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -419,7 +419,7 @@ resources:
|
|||||||
flannel_backend: {get_param: flannel_backend}
|
flannel_backend: {get_param: flannel_backend}
|
||||||
portal_network_cidr: {get_param: portal_network_cidr}
|
portal_network_cidr: {get_param: portal_network_cidr}
|
||||||
discovery_url: {get_param: discovery_url}
|
discovery_url: {get_param: discovery_url}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
fixed_network: {get_param: fixed_network}
|
fixed_network: {get_param: fixed_network}
|
||||||
fixed_subnet: {get_param: fixed_subnet}
|
fixed_subnet: {get_param: fixed_subnet}
|
||||||
@ -475,7 +475,7 @@ resources:
|
|||||||
registry_container: {get_param: registry_container}
|
registry_container: {get_param: registry_container}
|
||||||
registry_insecure: {get_param: registry_insecure}
|
registry_insecure: {get_param: registry_insecure}
|
||||||
registry_chunksize: {get_param: registry_chunksize}
|
registry_chunksize: {get_param: registry_chunksize}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
volume_driver: {get_param: volume_driver}
|
volume_driver: {get_param: volume_driver}
|
||||||
region_name: {get_param: region_name}
|
region_name: {get_param: region_name}
|
||||||
|
@ -204,9 +204,9 @@ parameters:
|
|||||||
service.
|
service.
|
||||||
default: 6443
|
default: 6443
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -396,7 +396,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resources that expose the IPs of either the kube master or a given
|
# resources that expose the IPs of either the kube master or a given
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_lb_switch:
|
api_address_lb_switch:
|
||||||
@ -416,7 +416,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resources that expose the IPs of either floating ip or a given
|
# resources that expose the IPs of either floating ip or a given
|
||||||
# fixed ip depending on whether FloatingIP is enabled for the bay.
|
# fixed ip depending on whether FloatingIP is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_floating_switch:
|
api_address_floating_switch:
|
||||||
@ -456,7 +456,7 @@ resources:
|
|||||||
flannel_backend: {get_param: flannel_backend}
|
flannel_backend: {get_param: flannel_backend}
|
||||||
portal_network_cidr: {get_param: portal_network_cidr}
|
portal_network_cidr: {get_param: portal_network_cidr}
|
||||||
discovery_url: {get_param: discovery_url}
|
discovery_url: {get_param: discovery_url}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
fixed_network: {get_resource: fixed_network}
|
fixed_network: {get_resource: fixed_network}
|
||||||
fixed_subnet: {get_resource: fixed_subnet}
|
fixed_subnet: {get_resource: fixed_subnet}
|
||||||
@ -515,7 +515,7 @@ resources:
|
|||||||
registry_container: {get_param: registry_container}
|
registry_container: {get_param: registry_container}
|
||||||
registry_insecure: {get_param: registry_insecure}
|
registry_insecure: {get_param: registry_insecure}
|
||||||
registry_chunksize: {get_param: registry_chunksize}
|
registry_chunksize: {get_param: registry_chunksize}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
volume_driver: {get_param: volume_driver}
|
volume_driver: {get_param: volume_driver}
|
||||||
region_name: {get_param: region_name}
|
region_name: {get_param: region_name}
|
||||||
|
@ -72,9 +72,9 @@ parameters:
|
|||||||
The port which are used by kube-apiserver to provide Kubernetes
|
The port which are used by kube-apiserver to provide Kubernetes
|
||||||
service.
|
service.
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -192,7 +192,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resource that exposes the IPs of either the kube master or the API
|
# resource that exposes the IPs of either the kube master or the API
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -235,7 +235,7 @@ resources:
|
|||||||
"$TENANT_NAME": {get_param: tenant_name}
|
"$TENANT_NAME": {get_param: tenant_name}
|
||||||
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
|
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
|
||||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$HTTP_PROXY": {get_param: http_proxy}
|
"$HTTP_PROXY": {get_param: http_proxy}
|
||||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||||
|
@ -78,9 +78,9 @@ parameters:
|
|||||||
The port which are used by kube-apiserver to provide Kubernetes
|
The port which are used by kube-apiserver to provide Kubernetes
|
||||||
service.
|
service.
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -198,7 +198,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resource that exposes the IPs of either the kube master or the API
|
# resource that exposes the IPs of either the kube master or the API
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -243,7 +243,7 @@ resources:
|
|||||||
"$TENANT_NAME": {get_param: tenant_name}
|
"$TENANT_NAME": {get_param: tenant_name}
|
||||||
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
|
"$CLUSTER_SUBNET": {get_param: fixed_subnet}
|
||||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$HTTP_PROXY": {get_param: http_proxy}
|
"$HTTP_PROXY": {get_param: http_proxy}
|
||||||
"$HTTPS_PROXY": {get_param: https_proxy}
|
"$HTTPS_PROXY": {get_param: https_proxy}
|
||||||
|
@ -47,9 +47,9 @@ parameters:
|
|||||||
The port which are used by kube-apiserver to provide Kubernetes
|
The port which are used by kube-apiserver to provide Kubernetes
|
||||||
service.
|
service.
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -228,7 +228,7 @@ resources:
|
|||||||
$REGISTRY_INSECURE: {get_param: registry_insecure}
|
$REGISTRY_INSECURE: {get_param: registry_insecure}
|
||||||
$REGISTRY_CHUNKSIZE: {get_param: registry_chunksize}
|
$REGISTRY_CHUNKSIZE: {get_param: registry_chunksize}
|
||||||
$TLS_DISABLED: {get_param: tls_disabled}
|
$TLS_DISABLED: {get_param: tls_disabled}
|
||||||
$BAY_UUID: {get_param: bay_uuid}
|
$CLUSTER _UUID: {get_param: cluster_uuid}
|
||||||
$MAGNUM_URL: {get_param: magnum_url}
|
$MAGNUM_URL: {get_param: magnum_url}
|
||||||
$USERNAME: {get_param: username}
|
$USERNAME: {get_param: username}
|
||||||
$PASSWORD: {get_param: password}
|
$PASSWORD: {get_param: password}
|
||||||
|
@ -53,9 +53,9 @@ parameters:
|
|||||||
The port which are used by kube-apiserver to provide Kubernetes
|
The port which are used by kube-apiserver to provide Kubernetes
|
||||||
service.
|
service.
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -236,7 +236,7 @@ resources:
|
|||||||
$REGISTRY_INSECURE: {get_param: registry_insecure}
|
$REGISTRY_INSECURE: {get_param: registry_insecure}
|
||||||
$REGISTRY_CHUNKSIZE: {get_param: registry_chunksize}
|
$REGISTRY_CHUNKSIZE: {get_param: registry_chunksize}
|
||||||
$TLS_DISABLED: {get_param: tls_disabled}
|
$TLS_DISABLED: {get_param: tls_disabled}
|
||||||
$BAY_UUID: {get_param: bay_uuid}
|
$CLUSTER_UUID: {get_param: cluster_uuid}
|
||||||
$MAGNUM_URL: {get_param: magnum_url}
|
$MAGNUM_URL: {get_param: magnum_url}
|
||||||
$USERNAME: {get_param: username}
|
$USERNAME: {get_param: username}
|
||||||
$PASSWORD: {get_param: password}
|
$PASSWORD: {get_param: password}
|
||||||
|
@ -29,28 +29,28 @@ class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
cluster_template_attr='external_network_id',
|
cluster_template_attr='external_network_id',
|
||||||
required=True)
|
required=True)
|
||||||
self.add_parameter('number_of_slaves',
|
self.add_parameter('number_of_slaves',
|
||||||
bay_attr='node_count')
|
cluster_attr='node_count')
|
||||||
self.add_parameter('master_flavor',
|
self.add_parameter('master_flavor',
|
||||||
cluster_template_attr='master_flavor_id')
|
cluster_template_attr='master_flavor_id')
|
||||||
self.add_parameter('slave_flavor',
|
self.add_parameter('slave_flavor',
|
||||||
cluster_template_attr='flavor_id')
|
cluster_template_attr='flavor_id')
|
||||||
self.add_parameter('cluster_name',
|
self.add_parameter('cluster_name',
|
||||||
bay_attr='name')
|
cluster_attr='name')
|
||||||
self.add_parameter('volume_driver',
|
self.add_parameter('volume_driver',
|
||||||
cluster_template_attr='volume_driver')
|
cluster_template_attr='volume_driver')
|
||||||
|
|
||||||
self.add_output('api_address',
|
self.add_output('api_address',
|
||||||
bay_attr='api_address')
|
cluster_attr='api_address')
|
||||||
self.add_output('mesos_master_private',
|
self.add_output('mesos_master_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
self.add_output('mesos_master',
|
self.add_output('mesos_master',
|
||||||
bay_attr='master_addresses')
|
cluster_attr='master_addresses')
|
||||||
self.add_output('mesos_slaves_private',
|
self.add_output('mesos_slaves_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
self.add_output('mesos_slaves',
|
self.add_output('mesos_slaves',
|
||||||
bay_attr='node_addresses')
|
cluster_attr='node_addresses')
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
extra_params = kwargs.pop('extra_params', {})
|
extra_params = kwargs.pop('extra_params', {})
|
||||||
# HACK(apmelton) - This uses the user's bearer token, ideally
|
# HACK(apmelton) - This uses the user's bearer token, ideally
|
||||||
# it should be replaced with an actual trust token with only
|
# it should be replaced with an actual trust token with only
|
||||||
@ -77,7 +77,7 @@ class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
scale_mgr.get_removal_nodes(hosts))
|
scale_mgr.get_removal_nodes(hosts))
|
||||||
|
|
||||||
return super(UbuntuMesosTemplateDefinition,
|
return super(UbuntuMesosTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
|
@ -361,7 +361,7 @@ resources:
|
|||||||
lb_method: ROUND_ROBIN
|
lb_method: ROUND_ROBIN
|
||||||
vip:
|
vip:
|
||||||
protocol_port: 8080
|
protocol_port: 8080
|
||||||
description: API pool for Mesos bay
|
description: API pool for Mesos cluster
|
||||||
|
|
||||||
api_pool_floating:
|
api_pool_floating:
|
||||||
type: Magnum::Optional::Neutron::Pool::FloatingIP
|
type: Magnum::Optional::Neutron::Pool::FloatingIP
|
||||||
|
@ -45,15 +45,15 @@ locations:
|
|||||||
These files are typically installed in the same locations on the
|
These files are typically installed in the same locations on the
|
||||||
Neutron controller node. The policy.json file is copied into the
|
Neutron controller node. The policy.json file is copied into the
|
||||||
Docker image because it is fairly static and does not require
|
Docker image because it is fairly static and does not require
|
||||||
customization for the bay. If it is changed in the Neutron master
|
customization for the cluster. If it is changed in the Neutron master
|
||||||
repo, you just need to rebuild the Docker image to update the file.
|
repo, you just need to rebuild the Docker image to update the file.
|
||||||
Magnum will create the other 2 files on each bay node in the
|
Magnum will create the other 2 files on each cluster node in the
|
||||||
directory /etc/kuryr and map them to the proper directories in
|
directory /etc/kuryr and map them to the proper directories in
|
||||||
the container using the Docker -v option.
|
the container using the Docker -v option.
|
||||||
|
|
||||||
Since Openvswitch needs to operate on the host network name space,
|
Since Openvswitch needs to operate on the host network name space,
|
||||||
the Docker container will need the -net=host option.
|
the Docker container will need the -net=host option.
|
||||||
The /var/run/openvswitch directory is also mapped to the bay node
|
The /var/run/openvswitch directory is also mapped to the cluster node
|
||||||
so that the Kuryr container can talk to openvswitch.
|
so that the Kuryr container can talk to openvswitch.
|
||||||
To run the image from Fedora Atomic::
|
To run the image from Fedora Atomic::
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ DOCKER_PORT = '2376'
|
|||||||
|
|
||||||
class SwarmApiAddressOutputMapping(template_def.OutputMapping):
|
class SwarmApiAddressOutputMapping(template_def.OutputMapping):
|
||||||
|
|
||||||
def set_output(self, stack, cluster_template, bay):
|
def set_output(self, stack, cluster_template, cluster):
|
||||||
if self.bay_attr is None:
|
if self.cluster_attr is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
output_value = self.get_output_value(stack)
|
output_value = self.get_output_value(stack)
|
||||||
@ -36,7 +36,7 @@ class SwarmApiAddressOutputMapping(template_def.OutputMapping):
|
|||||||
'port': DOCKER_PORT,
|
'port': DOCKER_PORT,
|
||||||
}
|
}
|
||||||
value = "%(protocol)s://%(address)s:%(port)s" % params
|
value = "%(protocol)s://%(address)s:%(port)s" % params
|
||||||
setattr(bay, self.bay_attr, value)
|
setattr(cluster, self.cluster_attr, value)
|
||||||
|
|
||||||
|
|
||||||
class AtomicSwarmTemplateDefinition(template_def.BaseTemplateDefinition):
|
class AtomicSwarmTemplateDefinition(template_def.BaseTemplateDefinition):
|
||||||
@ -48,11 +48,11 @@ class AtomicSwarmTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(AtomicSwarmTemplateDefinition, self).__init__()
|
super(AtomicSwarmTemplateDefinition, self).__init__()
|
||||||
self.add_parameter('bay_uuid',
|
self.add_parameter('cluster_uuid',
|
||||||
bay_attr='uuid',
|
cluster_attr='uuid',
|
||||||
param_type=str)
|
param_type=str)
|
||||||
self.add_parameter('number_of_nodes',
|
self.add_parameter('number_of_nodes',
|
||||||
bay_attr='node_count')
|
cluster_attr='node_count')
|
||||||
self.add_parameter('master_flavor',
|
self.add_parameter('master_flavor',
|
||||||
cluster_template_attr='master_flavor_id')
|
cluster_template_attr='master_flavor_id')
|
||||||
self.add_parameter('node_flavor',
|
self.add_parameter('node_flavor',
|
||||||
@ -74,25 +74,25 @@ class AtomicSwarmTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
self.add_parameter('docker_storage_driver',
|
self.add_parameter('docker_storage_driver',
|
||||||
cluster_template_attr='docker_storage_driver')
|
cluster_template_attr='docker_storage_driver')
|
||||||
self.add_parameter('swarm_version',
|
self.add_parameter('swarm_version',
|
||||||
bay_attr='coe_version')
|
cluster_attr='coe_version')
|
||||||
|
|
||||||
self.add_output('api_address',
|
self.add_output('api_address',
|
||||||
bay_attr='api_address',
|
cluster_attr='api_address',
|
||||||
mapping_type=SwarmApiAddressOutputMapping)
|
mapping_type=SwarmApiAddressOutputMapping)
|
||||||
self.add_output('swarm_master_private',
|
self.add_output('swarm_master_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
self.add_output('swarm_masters',
|
self.add_output('swarm_masters',
|
||||||
bay_attr='master_addresses')
|
cluster_attr='master_addresses')
|
||||||
self.add_output('swarm_nodes_private',
|
self.add_output('swarm_nodes_private',
|
||||||
bay_attr=None)
|
cluster_attr=None)
|
||||||
self.add_output('swarm_nodes',
|
self.add_output('swarm_nodes',
|
||||||
bay_attr='node_addresses')
|
cluster_attr='node_addresses')
|
||||||
self.add_output('discovery_url',
|
self.add_output('discovery_url',
|
||||||
bay_attr='discovery_url')
|
cluster_attr='discovery_url')
|
||||||
|
|
||||||
def get_params(self, context, cluster_template, bay, **kwargs):
|
def get_params(self, context, cluster_template, cluster, **kwargs):
|
||||||
extra_params = kwargs.pop('extra_params', {})
|
extra_params = kwargs.pop('extra_params', {})
|
||||||
extra_params['discovery_url'] = self.get_discovery_url(bay)
|
extra_params['discovery_url'] = self.get_discovery_url(cluster)
|
||||||
# HACK(apmelton) - This uses the user's bearer token, ideally
|
# HACK(apmelton) - This uses the user's bearer token, ideally
|
||||||
# it should be replaced with an actual trust token with only
|
# it should be replaced with an actual trust token with only
|
||||||
# access to do what the template needs it to do.
|
# access to do what the template needs it to do.
|
||||||
@ -113,7 +113,7 @@ class AtomicSwarmTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
CONF.docker_registry.swift_registry_container)
|
CONF.docker_registry.swift_registry_container)
|
||||||
|
|
||||||
return super(AtomicSwarmTemplateDefinition,
|
return super(AtomicSwarmTemplateDefinition,
|
||||||
self).get_params(context, cluster_template, bay,
|
self).get_params(context, cluster_template, cluster,
|
||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
|
@ -25,9 +25,9 @@ parameters:
|
|||||||
type: string
|
type: string
|
||||||
description: url provided for node discovery
|
description: url provided for node discovery
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -335,7 +335,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resources that expose the IPs of either the swarm master or a given
|
# resources that expose the IPs of either the swarm master or a given
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -381,7 +381,7 @@ resources:
|
|||||||
https_proxy: {get_param: https_proxy}
|
https_proxy: {get_param: https_proxy}
|
||||||
no_proxy: {get_param: no_proxy}
|
no_proxy: {get_param: no_proxy}
|
||||||
swarm_api_ip: {get_attr: [api_pool, vip, address]}
|
swarm_api_ip: {get_attr: [api_pool, vip, address]}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
tls_disabled: {get_param: tls_disabled}
|
tls_disabled: {get_param: tls_disabled}
|
||||||
secgroup_swarm_master_id: {get_resource: secgroup_manager}
|
secgroup_swarm_master_id: {get_resource: secgroup_manager}
|
||||||
@ -423,7 +423,7 @@ resources:
|
|||||||
https_proxy: {get_param: https_proxy}
|
https_proxy: {get_param: https_proxy}
|
||||||
no_proxy: {get_param: no_proxy}
|
no_proxy: {get_param: no_proxy}
|
||||||
swarm_api_ip: {get_attr: [api_address_switch, private_ip]}
|
swarm_api_ip: {get_attr: [api_address_switch, private_ip]}
|
||||||
bay_uuid: {get_param: bay_uuid}
|
cluster_uuid: {get_param: cluster_uuid}
|
||||||
magnum_url: {get_param: magnum_url}
|
magnum_url: {get_param: magnum_url}
|
||||||
tls_disabled: {get_param: tls_disabled}
|
tls_disabled: {get_param: tls_disabled}
|
||||||
secgroup_swarm_node_id: {get_resource: secgroup_manager}
|
secgroup_swarm_node_id: {get_resource: secgroup_manager}
|
||||||
|
@ -81,10 +81,10 @@ def _build_subject_alt_names(config):
|
|||||||
|
|
||||||
|
|
||||||
def write_ca_cert(config):
|
def write_ca_cert(config):
|
||||||
bay_cert_url = '%s/certificates/%s' % (config['MAGNUM_URL'],
|
cluster_cert_url = '%s/certificates/%s' % (config['MAGNUM_URL'],
|
||||||
config['BAY_UUID'])
|
config['CLUSTER_UUID'])
|
||||||
headers = {'X-Auth-Token': config['USER_TOKEN']}
|
headers = {'X-Auth-Token': config['USER_TOKEN']}
|
||||||
ca_cert_resp = requests.get(bay_cert_url,
|
ca_cert_resp = requests.get(cluster_cert_url,
|
||||||
headers=headers)
|
headers=headers)
|
||||||
|
|
||||||
with open(CA_CERT_PATH, 'w') as fp:
|
with open(CA_CERT_PATH, 'w') as fp:
|
||||||
@ -116,7 +116,7 @@ def create_server_csr(config):
|
|||||||
'-config', SERVER_CONF_PATH])
|
'-config', SERVER_CONF_PATH])
|
||||||
|
|
||||||
with open(SERVER_CSR_PATH, 'r') as fp:
|
with open(SERVER_CSR_PATH, 'r') as fp:
|
||||||
return {'bay_uuid': config['BAY_UUID'], 'csr': fp.read()}
|
return {'cluster_uuid': config['CLUSTER_UUID'], 'csr': fp.read()}
|
||||||
|
|
||||||
|
|
||||||
def write_server_cert(config, csr_req):
|
def write_server_cert(config, csr_req):
|
||||||
|
@ -16,7 +16,7 @@ write_files:
|
|||||||
NO_PROXY="$NO_PROXY"
|
NO_PROXY="$NO_PROXY"
|
||||||
SWARM_API_IP="$SWARM_API_IP"
|
SWARM_API_IP="$SWARM_API_IP"
|
||||||
SWARM_NODE_IP="$SWARM_NODE_IP"
|
SWARM_NODE_IP="$SWARM_NODE_IP"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
NETWORK_DRIVER="$NETWORK_DRIVER"
|
NETWORK_DRIVER="$NETWORK_DRIVER"
|
||||||
|
@ -15,7 +15,7 @@ write_files:
|
|||||||
NO_PROXY="$NO_PROXY"
|
NO_PROXY="$NO_PROXY"
|
||||||
SWARM_API_IP="$SWARM_API_IP"
|
SWARM_API_IP="$SWARM_API_IP"
|
||||||
SWARM_NODE_IP="$SWARM_NODE_IP"
|
SWARM_NODE_IP="$SWARM_NODE_IP"
|
||||||
BAY_UUID="$BAY_UUID"
|
CLUSTER_UUID="$CLUSTER_UUID"
|
||||||
MAGNUM_URL="$MAGNUM_URL"
|
MAGNUM_URL="$MAGNUM_URL"
|
||||||
TLS_DISABLED="$TLS_DISABLED"
|
TLS_DISABLED="$TLS_DISABLED"
|
||||||
NETWORK_DRIVER="$NETWORK_DRIVER"
|
NETWORK_DRIVER="$NETWORK_DRIVER"
|
||||||
|
@ -30,9 +30,9 @@ parameters:
|
|||||||
type: string
|
type: string
|
||||||
description: url provided for node discovery
|
description: url provided for node discovery
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -170,7 +170,7 @@ resources:
|
|||||||
######################################################################
|
######################################################################
|
||||||
#
|
#
|
||||||
# resource that exposes the IPs of either the Swarm master or the API
|
# resource that exposes the IPs of either the Swarm master or the API
|
||||||
# LBaaS pool depending on whether LBaaS is enabled for the bay.
|
# LBaaS pool depending on whether LBaaS is enabled for the cluster.
|
||||||
#
|
#
|
||||||
|
|
||||||
api_address_switch:
|
api_address_switch:
|
||||||
@ -211,7 +211,7 @@ resources:
|
|||||||
"$NO_PROXY": {get_param: no_proxy}
|
"$NO_PROXY": {get_param: no_proxy}
|
||||||
"$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]}
|
"$SWARM_API_IP": {get_attr: [api_address_switch, private_ip]}
|
||||||
"$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
"$SWARM_NODE_IP": {get_attr: [swarm_master_eth0, fixed_ips, 0, ip_address]}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||||
"$NETWORK_DRIVER": {get_param: network_driver}
|
"$NETWORK_DRIVER": {get_param: network_driver}
|
||||||
@ -286,7 +286,7 @@ resources:
|
|||||||
group: ungrouped
|
group: ungrouped
|
||||||
config:
|
config:
|
||||||
str_replace:
|
str_replace:
|
||||||
template: {get_file: fragments/write-bay-failure-service.yaml}
|
template: {get_file: fragments/write-cluster-failure-service.yaml}
|
||||||
params:
|
params:
|
||||||
"$SERVICE": swarm-manager
|
"$SERVICE": swarm-manager
|
||||||
"$WAIT_HANDLE_ENDPOINT": {get_attr: [master_wait_handle, endpoint]}
|
"$WAIT_HANDLE_ENDPOINT": {get_attr: [master_wait_handle, endpoint]}
|
||||||
|
@ -71,9 +71,9 @@ parameters:
|
|||||||
type: string
|
type: string
|
||||||
description: swarm master's api server public ip address
|
description: swarm master's api server public ip address
|
||||||
|
|
||||||
bay_uuid:
|
cluster_uuid:
|
||||||
type: string
|
type: string
|
||||||
description: identifier for the bay this template is generating
|
description: identifier for the cluster this template is generating
|
||||||
|
|
||||||
magnum_url:
|
magnum_url:
|
||||||
type: string
|
type: string
|
||||||
@ -195,7 +195,7 @@ resources:
|
|||||||
"$NO_PROXY": {get_param: no_proxy}
|
"$NO_PROXY": {get_param: no_proxy}
|
||||||
"$SWARM_API_IP": {get_param: swarm_api_ip}
|
"$SWARM_API_IP": {get_param: swarm_api_ip}
|
||||||
"$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
|
"$SWARM_NODE_IP": {get_attr: [swarm_node_eth0, fixed_ips, 0, ip_address]}
|
||||||
"$BAY_UUID": {get_param: bay_uuid}
|
"$CLUSTER_UUID": {get_param: cluster_uuid}
|
||||||
"$MAGNUM_URL": {get_param: magnum_url}
|
"$MAGNUM_URL": {get_param: magnum_url}
|
||||||
"$TLS_DISABLED": {get_param: tls_disabled}
|
"$TLS_DISABLED": {get_param: tls_disabled}
|
||||||
"$NETWORK_DRIVER": {get_param: network_driver}
|
"$NETWORK_DRIVER": {get_param: network_driver}
|
||||||
@ -269,7 +269,7 @@ resources:
|
|||||||
group: ungrouped
|
group: ungrouped
|
||||||
config:
|
config:
|
||||||
str_replace:
|
str_replace:
|
||||||
template: {get_file: fragments/write-bay-failure-service.yaml}
|
template: {get_file: fragments/write-cluster-failure-service.yaml}
|
||||||
params:
|
params:
|
||||||
"$SERVICE": swarm-agent
|
"$SERVICE": swarm-agent
|
||||||
"$WAIT_HANDLE_ENDPOINT": {get_attr: [node_wait_handle, endpoint]}
|
"$WAIT_HANDLE_ENDPOINT": {get_attr: [node_wait_handle, endpoint]}
|
||||||
|
@ -12,19 +12,19 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from magnum.objects import bay
|
|
||||||
from magnum.objects import certificate
|
from magnum.objects import certificate
|
||||||
|
from magnum.objects import cluster
|
||||||
from magnum.objects import cluster_template
|
from magnum.objects import cluster_template
|
||||||
from magnum.objects import magnum_service
|
from magnum.objects import magnum_service
|
||||||
from magnum.objects import x509keypair
|
from magnum.objects import x509keypair
|
||||||
|
|
||||||
|
|
||||||
Bay = bay.Bay
|
Cluster = cluster.Cluster
|
||||||
ClusterTemplate = cluster_template.ClusterTemplate
|
ClusterTemplate = cluster_template.ClusterTemplate
|
||||||
MagnumService = magnum_service.MagnumService
|
MagnumService = magnum_service.MagnumService
|
||||||
X509KeyPair = x509keypair.X509KeyPair
|
X509KeyPair = x509keypair.X509KeyPair
|
||||||
Certificate = certificate.Certificate
|
Certificate = certificate.Certificate
|
||||||
__all__ = (Bay,
|
__all__ = (Cluster,
|
||||||
ClusterTemplate,
|
ClusterTemplate,
|
||||||
MagnumService,
|
MagnumService,
|
||||||
X509KeyPair,
|
X509KeyPair,
|
||||||
|
@ -20,12 +20,12 @@ from oslo_versionedobjects import fields
|
|||||||
from magnum.common import exception
|
from magnum.common import exception
|
||||||
from magnum.db import api as dbapi
|
from magnum.db import api as dbapi
|
||||||
from magnum.objects import base
|
from magnum.objects import base
|
||||||
from magnum.objects import cluster_template
|
from magnum.objects.cluster_template import ClusterTemplate
|
||||||
from magnum.objects import fields as m_fields
|
from magnum.objects import fields as m_fields
|
||||||
|
|
||||||
|
|
||||||
@base.MagnumObjectRegistry.register
|
@base.MagnumObjectRegistry.register
|
||||||
class Bay(base.MagnumPersistentObject, base.MagnumObject,
|
class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
||||||
base.MagnumObjectDictCompat):
|
base.MagnumObjectDictCompat):
|
||||||
# Version 1.0: Initial version
|
# Version 1.0: Initial version
|
||||||
# Version 1.1: Added 'bay_create_timeout' field
|
# Version 1.1: Added 'bay_create_timeout' field
|
||||||
@ -38,8 +38,11 @@ class Bay(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
# Version 1.6: Add rollback support for Bay
|
# Version 1.6: Add rollback support for Bay
|
||||||
# Version 1.7: Added 'coe_version' and 'container_version' fields
|
# Version 1.7: Added 'coe_version' and 'container_version' fields
|
||||||
# Version 1.8: Rename 'baymodel' to 'cluster_template'
|
# Version 1.8: Rename 'baymodel' to 'cluster_template'
|
||||||
|
# Version 1.9: Rename table name from 'bay' to 'cluster'
|
||||||
|
# Rename 'baymodel_id' to 'cluster_template_id'
|
||||||
|
# Rename 'bay_create_timeout' to 'create_timeout'
|
||||||
|
|
||||||
VERSION = '1.8'
|
VERSION = '1.9'
|
||||||
|
|
||||||
dbapi = dbapi.get_instance()
|
dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
@ -49,11 +52,11 @@ class Bay(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
'name': fields.StringField(nullable=True),
|
'name': fields.StringField(nullable=True),
|
||||||
'project_id': fields.StringField(nullable=True),
|
'project_id': fields.StringField(nullable=True),
|
||||||
'user_id': fields.StringField(nullable=True),
|
'user_id': fields.StringField(nullable=True),
|
||||||
'baymodel_id': fields.StringField(nullable=True),
|
'cluster_template_id': fields.StringField(nullable=True),
|
||||||
'stack_id': fields.StringField(nullable=True),
|
'stack_id': fields.StringField(nullable=True),
|
||||||
'status': m_fields.BayStatusField(nullable=True),
|
'status': m_fields.ClusterStatusField(nullable=True),
|
||||||
'status_reason': fields.StringField(nullable=True),
|
'status_reason': fields.StringField(nullable=True),
|
||||||
'bay_create_timeout': fields.IntegerField(nullable=True),
|
'create_timeout': fields.IntegerField(nullable=True),
|
||||||
'api_address': fields.StringField(nullable=True),
|
'api_address': fields.StringField(nullable=True),
|
||||||
'node_addresses': fields.ListOfStringsField(nullable=True),
|
'node_addresses': fields.ListOfStringsField(nullable=True),
|
||||||
'node_count': fields.IntegerField(nullable=True),
|
'node_count': fields.IntegerField(nullable=True),
|
||||||
@ -72,135 +75,136 @@ class Bay(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _from_db_object(bay, db_bay):
|
def _from_db_object(cluster, db_cluster):
|
||||||
"""Converts a database entity to a formal object."""
|
"""Converts a database entity to a formal object."""
|
||||||
for field in bay.fields:
|
for field in cluster.fields:
|
||||||
if field != 'cluster_template':
|
if field != 'cluster_template':
|
||||||
bay[field] = db_bay[field]
|
cluster[field] = db_cluster[field]
|
||||||
|
|
||||||
# Note(eliqiao): The following line needs to be placed outside the
|
# Note(eliqiao): The following line needs to be placed outside the
|
||||||
# loop because there is a dependency from cluster_template to
|
# loop because there is a dependency from cluster_template to
|
||||||
# baymodel_id. The baymodel_id must be populated first in the loop
|
# cluster_template_id. The cluster_template_id must be populated
|
||||||
# before it can be used to find the cluster_template.
|
# first in the loop before it can be used to find the cluster_template.
|
||||||
bay['cluster_template'] = cluster_template.ClusterTemplate.get_by_uuid(
|
cluster['cluster_template'] = ClusterTemplate.get_by_uuid(
|
||||||
bay._context, bay.baymodel_id)
|
cluster._context, cluster.cluster_template_id)
|
||||||
|
|
||||||
bay.obj_reset_changes()
|
cluster.obj_reset_changes()
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _from_db_object_list(db_objects, cls, context):
|
def _from_db_object_list(db_objects, cls, context):
|
||||||
"""Converts a list of database entities to a list of formal objects."""
|
"""Converts a list of database entities to a list of formal objects."""
|
||||||
return [Bay._from_db_object(cls(context), obj) for obj in db_objects]
|
return [Cluster._from_db_object(cls(context), obj)
|
||||||
|
for obj in db_objects]
|
||||||
|
|
||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def get(cls, context, bay_id):
|
def get(cls, context, cluster_id):
|
||||||
"""Find a bay based on its id or uuid and return a Bay object.
|
"""Find a cluster based on its id or uuid and return a Cluster object.
|
||||||
|
|
||||||
:param bay_id: the id *or* uuid of a bay.
|
:param cluster_id: the id *or* uuid of a cluster.
|
||||||
:param context: Security context
|
:param context: Security context
|
||||||
:returns: a :class:`Bay` object.
|
:returns: a :class:`Cluster` object.
|
||||||
"""
|
"""
|
||||||
if strutils.is_int_like(bay_id):
|
if strutils.is_int_like(cluster_id):
|
||||||
return cls.get_by_id(context, bay_id)
|
return cls.get_by_id(context, cluster_id)
|
||||||
elif uuidutils.is_uuid_like(bay_id):
|
elif uuidutils.is_uuid_like(cluster_id):
|
||||||
return cls.get_by_uuid(context, bay_id)
|
return cls.get_by_uuid(context, cluster_id)
|
||||||
else:
|
else:
|
||||||
raise exception.InvalidIdentity(identity=bay_id)
|
raise exception.InvalidIdentity(identity=cluster_id)
|
||||||
|
|
||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def get_by_id(cls, context, bay_id):
|
def get_by_id(cls, context, cluster_id):
|
||||||
"""Find a bay based on its integer id and return a Bay object.
|
"""Find a cluster based on its integer id and return a Cluster object.
|
||||||
|
|
||||||
:param bay_id: the id of a bay.
|
:param cluster_id: the id of a cluster.
|
||||||
:param context: Security context
|
:param context: Security context
|
||||||
:returns: a :class:`Bay` object.
|
:returns: a :class:`Cluster` object.
|
||||||
"""
|
"""
|
||||||
db_bay = cls.dbapi.get_bay_by_id(context, bay_id)
|
db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id)
|
||||||
bay = Bay._from_db_object(cls(context), db_bay)
|
cluster = Cluster._from_db_object(cls(context), db_cluster)
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def get_by_uuid(cls, context, uuid):
|
def get_by_uuid(cls, context, uuid):
|
||||||
"""Find a bay based on uuid and return a :class:`Bay` object.
|
"""Find a cluster based on uuid and return a :class:`Cluster` object.
|
||||||
|
|
||||||
:param uuid: the uuid of a bay.
|
:param uuid: the uuid of a cluster.
|
||||||
:param context: Security context
|
:param context: Security context
|
||||||
:returns: a :class:`Bay` object.
|
:returns: a :class:`Cluster` object.
|
||||||
"""
|
"""
|
||||||
db_bay = cls.dbapi.get_bay_by_uuid(context, uuid)
|
db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid)
|
||||||
bay = Bay._from_db_object(cls(context), db_bay)
|
cluster = Cluster._from_db_object(cls(context), db_cluster)
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def get_by_name(cls, context, name):
|
def get_by_name(cls, context, name):
|
||||||
"""Find a bay based on name and return a Bay object.
|
"""Find a cluster based on name and return a Cluster object.
|
||||||
|
|
||||||
:param name: the logical name of a bay.
|
:param name: the logical name of a cluster.
|
||||||
:param context: Security context
|
:param context: Security context
|
||||||
:returns: a :class:`Bay` object.
|
:returns: a :class:`Cluster` object.
|
||||||
"""
|
"""
|
||||||
db_bay = cls.dbapi.get_bay_by_name(context, name)
|
db_cluster = cls.dbapi.get_cluster_by_name(context, name)
|
||||||
bay = Bay._from_db_object(cls(context), db_bay)
|
cluster = Cluster._from_db_object(cls(context), db_cluster)
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@base.remotable_classmethod
|
@base.remotable_classmethod
|
||||||
def list(cls, context, limit=None, marker=None,
|
def list(cls, context, limit=None, marker=None,
|
||||||
sort_key=None, sort_dir=None, filters=None):
|
sort_key=None, sort_dir=None, filters=None):
|
||||||
"""Return a list of Bay objects.
|
"""Return a list of Cluster objects.
|
||||||
|
|
||||||
:param context: Security context.
|
:param context: Security context.
|
||||||
:param limit: maximum number of resources to return in a single result.
|
:param limit: maximum number of resources to return in a single result.
|
||||||
:param marker: pagination marker for large data sets.
|
:param marker: pagination marker for large data sets.
|
||||||
:param sort_key: column to sort results by.
|
:param sort_key: column to sort results by.
|
||||||
:param sort_dir: direction to sort. "asc" or "desc".
|
:param sort_dir: direction to sort. "asc" or "desc".
|
||||||
:param filters: filter dict, can includes 'baymodel_id', 'name',
|
:param filters: filter dict, can includes 'cluster_template_id',
|
||||||
'node_count', 'stack_id', 'api_address',
|
'name', 'node_count', 'stack_id', 'api_address',
|
||||||
'node_addresses', 'project_id', 'user_id',
|
'node_addresses', 'project_id', 'user_id',
|
||||||
'status'(should be a status list), 'master_count'.
|
'status'(should be a status list), 'master_count'.
|
||||||
:returns: a list of :class:`Bay` object.
|
:returns: a list of :class:`Cluster` object.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
db_bays = cls.dbapi.get_bay_list(context, limit=limit,
|
db_clusters = cls.dbapi.get_cluster_list(context, limit=limit,
|
||||||
marker=marker,
|
marker=marker,
|
||||||
sort_key=sort_key,
|
sort_key=sort_key,
|
||||||
sort_dir=sort_dir,
|
sort_dir=sort_dir,
|
||||||
filters=filters)
|
filters=filters)
|
||||||
return Bay._from_db_object_list(db_bays, cls, context)
|
return Cluster._from_db_object_list(db_clusters, cls, context)
|
||||||
|
|
||||||
@base.remotable
|
@base.remotable
|
||||||
def create(self, context=None):
|
def create(self, context=None):
|
||||||
"""Create a Bay record in the DB.
|
"""Create a Cluster record in the DB.
|
||||||
|
|
||||||
:param context: Security context. NOTE: This should only
|
:param context: Security context. NOTE: This should only
|
||||||
be used internally by the indirection_api.
|
be used internally by the indirection_api.
|
||||||
Unfortunately, RPC requires context as the first
|
Unfortunately, RPC requires context as the first
|
||||||
argument, even though we don't use it.
|
argument, even though we don't use it.
|
||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: Bay(context)
|
object, e.g.: Cluster(context)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
values = self.obj_get_changes()
|
values = self.obj_get_changes()
|
||||||
db_bay = self.dbapi.create_bay(values)
|
db_cluster = self.dbapi.create_cluster(values)
|
||||||
self._from_db_object(self, db_bay)
|
self._from_db_object(self, db_cluster)
|
||||||
|
|
||||||
@base.remotable
|
@base.remotable
|
||||||
def destroy(self, context=None):
|
def destroy(self, context=None):
|
||||||
"""Delete the Bay from the DB.
|
"""Delete the Cluster from the DB.
|
||||||
|
|
||||||
:param context: Security context. NOTE: This should only
|
:param context: Security context. NOTE: This should only
|
||||||
be used internally by the indirection_api.
|
be used internally by the indirection_api.
|
||||||
Unfortunately, RPC requires context as the first
|
Unfortunately, RPC requires context as the first
|
||||||
argument, even though we don't use it.
|
argument, even though we don't use it.
|
||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: Bay(context)
|
object, e.g.: Cluster(context)
|
||||||
"""
|
"""
|
||||||
self.dbapi.destroy_bay(self.uuid)
|
self.dbapi.destroy_cluster(self.uuid)
|
||||||
self.obj_reset_changes()
|
self.obj_reset_changes()
|
||||||
|
|
||||||
@base.remotable
|
@base.remotable
|
||||||
def save(self, context=None):
|
def save(self, context=None):
|
||||||
"""Save updates to this Bay.
|
"""Save updates to this Cluster.
|
||||||
|
|
||||||
Updates will be made column by column based on the result
|
Updates will be made column by column based on the result
|
||||||
of self.what_changed().
|
of self.what_changed().
|
||||||
@ -210,27 +214,27 @@ class Bay(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
Unfortunately, RPC requires context as the first
|
Unfortunately, RPC requires context as the first
|
||||||
argument, even though we don't use it.
|
argument, even though we don't use it.
|
||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: Bay(context)
|
object, e.g.: Cluster(context)
|
||||||
"""
|
"""
|
||||||
updates = self.obj_get_changes()
|
updates = self.obj_get_changes()
|
||||||
self.dbapi.update_bay(self.uuid, updates)
|
self.dbapi.update_cluster(self.uuid, updates)
|
||||||
|
|
||||||
self.obj_reset_changes()
|
self.obj_reset_changes()
|
||||||
|
|
||||||
@base.remotable
|
@base.remotable
|
||||||
def refresh(self, context=None):
|
def refresh(self, context=None):
|
||||||
"""Loads updates for this Bay.
|
"""Loads updates for this Cluster.
|
||||||
|
|
||||||
Loads a bay with the same uuid from the database and
|
Loads a Cluster with the same uuid from the database and
|
||||||
checks for updated attributes. Updates are applied from
|
checks for updated attributes. Updates are applied from
|
||||||
the loaded bay column by column, if there are any updates.
|
the loaded Cluster column by column, if there are any updates.
|
||||||
|
|
||||||
:param context: Security context. NOTE: This should only
|
:param context: Security context. NOTE: This should only
|
||||||
be used internally by the indirection_api.
|
be used internally by the indirection_api.
|
||||||
Unfortunately, RPC requires context as the first
|
Unfortunately, RPC requires context as the first
|
||||||
argument, even though we don't use it.
|
argument, even though we don't use it.
|
||||||
A context should be set when instantiating the
|
A context should be set when instantiating the
|
||||||
object, e.g.: Bay(context)
|
object, e.g.: Cluster(context)
|
||||||
"""
|
"""
|
||||||
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
|
current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
|
||||||
for field in self.fields:
|
for field in self.fields:
|
@ -40,7 +40,8 @@ class ClusterTemplate(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
# Version 1.14: Added 'fixed_subnet' field
|
# Version 1.14: Added 'fixed_subnet' field
|
||||||
# Version 1.15: Added 'floating_ip_enabled' field
|
# Version 1.15: Added 'floating_ip_enabled' field
|
||||||
# Version 1.16: Renamed the class from "BayModel' to 'ClusterTemplate'
|
# Version 1.16: Renamed the class from "BayModel' to 'ClusterTemplate'
|
||||||
VERSION = '1.16'
|
# Version 1.17: 'coe' field type change to ClusterTypeField
|
||||||
|
VERSION = '1.17'
|
||||||
|
|
||||||
dbapi = dbapi.get_instance()
|
dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
@ -65,7 +66,7 @@ class ClusterTemplate(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
'docker_storage_driver': m_fields.DockerStorageDriverField(
|
'docker_storage_driver': m_fields.DockerStorageDriverField(
|
||||||
nullable=True),
|
nullable=True),
|
||||||
'cluster_distro': fields.StringField(nullable=True),
|
'cluster_distro': fields.StringField(nullable=True),
|
||||||
'coe': m_fields.BayTypeField(nullable=True),
|
'coe': m_fields.ClusterTypeField(nullable=True),
|
||||||
'http_proxy': fields.StringField(nullable=True),
|
'http_proxy': fields.StringField(nullable=True),
|
||||||
'https_proxy': fields.StringField(nullable=True),
|
'https_proxy': fields.StringField(nullable=True),
|
||||||
'no_proxy': fields.StringField(nullable=True),
|
'no_proxy': fields.StringField(nullable=True),
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
from oslo_versionedobjects import fields
|
from oslo_versionedobjects import fields
|
||||||
|
|
||||||
|
|
||||||
class BayStatus(fields.Enum):
|
class ClusterStatus(fields.Enum):
|
||||||
CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS'
|
CREATE_IN_PROGRESS = 'CREATE_IN_PROGRESS'
|
||||||
CREATE_FAILED = 'CREATE_FAILED'
|
CREATE_FAILED = 'CREATE_FAILED'
|
||||||
CREATE_COMPLETE = 'CREATE_COMPLETE'
|
CREATE_COMPLETE = 'CREATE_COMPLETE'
|
||||||
@ -45,7 +45,7 @@ class BayStatus(fields.Enum):
|
|||||||
DELETE_FAILED, ROLLBACK_FAILED)
|
DELETE_FAILED, ROLLBACK_FAILED)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(BayStatus, self).__init__(valid_values=BayStatus.ALL)
|
super(ClusterStatus, self).__init__(valid_values=ClusterStatus.ALL)
|
||||||
|
|
||||||
|
|
||||||
class ContainerStatus(fields.Enum):
|
class ContainerStatus(fields.Enum):
|
||||||
@ -60,7 +60,7 @@ class ContainerStatus(fields.Enum):
|
|||||||
valid_values=ContainerStatus.ALL)
|
valid_values=ContainerStatus.ALL)
|
||||||
|
|
||||||
|
|
||||||
class BayType(fields.Enum):
|
class ClusterType(fields.Enum):
|
||||||
ALL = (
|
ALL = (
|
||||||
KUBERNETES, SWARM, MESOS,
|
KUBERNETES, SWARM, MESOS,
|
||||||
) = (
|
) = (
|
||||||
@ -68,7 +68,7 @@ class BayType(fields.Enum):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(BayType, self).__init__(valid_values=BayType.ALL)
|
super(ClusterType, self).__init__(valid_values=ClusterType.ALL)
|
||||||
|
|
||||||
|
|
||||||
class DockerStorageDriver(fields.Enum):
|
class DockerStorageDriver(fields.Enum):
|
||||||
@ -99,8 +99,8 @@ class ListOfDictsField(fields.AutoTypedField):
|
|||||||
AUTO_TYPE = fields.List(fields.Dict(fields.FieldType()))
|
AUTO_TYPE = fields.List(fields.Dict(fields.FieldType()))
|
||||||
|
|
||||||
|
|
||||||
class BayStatusField(fields.BaseEnumField):
|
class ClusterStatusField(fields.BaseEnumField):
|
||||||
AUTO_TYPE = BayStatus()
|
AUTO_TYPE = ClusterStatus()
|
||||||
|
|
||||||
|
|
||||||
class MagnumServiceField(fields.BaseEnumField):
|
class MagnumServiceField(fields.BaseEnumField):
|
||||||
@ -111,8 +111,8 @@ class ContainerStatusField(fields.BaseEnumField):
|
|||||||
AUTO_TYPE = ContainerStatus()
|
AUTO_TYPE = ContainerStatus()
|
||||||
|
|
||||||
|
|
||||||
class BayTypeField(fields.BaseEnumField):
|
class ClusterTypeField(fields.BaseEnumField):
|
||||||
AUTO_TYPE = BayType()
|
AUTO_TYPE = ClusterType()
|
||||||
|
|
||||||
|
|
||||||
class DockerStorageDriverField(fields.BaseEnumField):
|
class DockerStorageDriverField(fields.BaseEnumField):
|
||||||
|
@ -24,7 +24,7 @@ import magnum.common.exception
|
|||||||
import magnum.common.service
|
import magnum.common.service
|
||||||
import magnum.common.x509.config
|
import magnum.common.x509.config
|
||||||
import magnum.conductor.config
|
import magnum.conductor.config
|
||||||
import magnum.conductor.handlers.bay_conductor
|
import magnum.conductor.handlers.cluster_conductor
|
||||||
import magnum.db
|
import magnum.db
|
||||||
import magnum.drivers.common.template_def
|
import magnum.drivers.common.template_def
|
||||||
|
|
||||||
@ -52,7 +52,7 @@ def list_opts():
|
|||||||
('neutron_client', magnum.common.clients.neutron_client_opts),
|
('neutron_client', magnum.common.clients.neutron_client_opts),
|
||||||
('x509', magnum.common.x509.config.x509_opts),
|
('x509', magnum.common.x509.config.x509_opts),
|
||||||
('cluster_heat',
|
('cluster_heat',
|
||||||
magnum.conductor.handlers.bay_conductor.cluster_heat_opts),
|
magnum.conductor.handlers.cluster_conductor.cluster_heat_opts),
|
||||||
('certificates',
|
('certificates',
|
||||||
itertools.chain(magnum.common.cert_manager.cert_manager_opts,
|
itertools.chain(magnum.common.cert_manager.cert_manager_opts,
|
||||||
local_cert_manager.local_cert_manager_opts,
|
local_cert_manager.local_cert_manager_opts,
|
||||||
|
@ -69,140 +69,149 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
|||||||
|
|
||||||
@periodic_task.periodic_task(run_immediately=True)
|
@periodic_task.periodic_task(run_immediately=True)
|
||||||
@set_context
|
@set_context
|
||||||
def sync_bay_status(self, ctx):
|
def sync_cluster_status(self, ctx):
|
||||||
try:
|
try:
|
||||||
LOG.debug('Starting to sync up bay status')
|
LOG.debug('Starting to sync up cluster status')
|
||||||
osc = clients.OpenStackClients(ctx)
|
osc = clients.OpenStackClients(ctx)
|
||||||
status = [fields.BayStatus.CREATE_IN_PROGRESS,
|
status = [fields.ClusterStatus.CREATE_IN_PROGRESS,
|
||||||
fields.BayStatus.UPDATE_IN_PROGRESS,
|
fields.ClusterStatus.UPDATE_IN_PROGRESS,
|
||||||
fields.BayStatus.DELETE_IN_PROGRESS,
|
fields.ClusterStatus.DELETE_IN_PROGRESS,
|
||||||
fields.BayStatus.ROLLBACK_IN_PROGRESS]
|
fields.ClusterStatus.ROLLBACK_IN_PROGRESS]
|
||||||
filters = {'status': status}
|
filters = {'status': status}
|
||||||
bays = objects.Bay.list(ctx, filters=filters)
|
clusters = objects.Cluster.list(ctx, filters=filters)
|
||||||
if not bays:
|
if not clusters:
|
||||||
return
|
return
|
||||||
sid_to_bay_mapping = {bay.stack_id: bay for bay in bays}
|
sid_to_cluster_mapping = {cluster.stack_id:
|
||||||
bay_stack_ids = sid_to_bay_mapping.keys()
|
cluster for cluster in clusters}
|
||||||
|
cluster_stack_ids = sid_to_cluster_mapping.keys()
|
||||||
|
|
||||||
if CONF.periodic_global_stack_list:
|
if CONF.periodic_global_stack_list:
|
||||||
stacks = osc.heat().stacks.list(global_tenant=True,
|
stacks = osc.heat().stacks.list(
|
||||||
filters={'id': bay_stack_ids})
|
global_tenant=True, filters={'id': cluster_stack_ids})
|
||||||
else:
|
else:
|
||||||
ret = self._get_bay_stacks(bays, sid_to_bay_mapping,
|
ret = self._get_cluster_stacks(
|
||||||
bay_stack_ids)
|
clusters, sid_to_cluster_mapping, cluster_stack_ids)
|
||||||
[stacks, bays, bay_stack_ids, sid_to_bay_mapping] = ret
|
[stacks, clusters, cluster_stack_ids,
|
||||||
|
sid_to_cluster_mapping] = ret
|
||||||
|
|
||||||
sid_to_stack_mapping = {s.id: s for s in stacks}
|
sid_to_stack_mapping = {s.id: s for s in stacks}
|
||||||
|
|
||||||
# intersection of bays magnum has and heat has
|
# intersection of clusters magnum has and heat has
|
||||||
for sid in (six.viewkeys(sid_to_bay_mapping) &
|
for sid in (six.viewkeys(sid_to_cluster_mapping) &
|
||||||
six.viewkeys(sid_to_stack_mapping)):
|
six.viewkeys(sid_to_stack_mapping)):
|
||||||
stack = sid_to_stack_mapping[sid]
|
stack = sid_to_stack_mapping[sid]
|
||||||
bay = sid_to_bay_mapping[sid]
|
cluster = sid_to_cluster_mapping[sid]
|
||||||
self._sync_existing_bay(bay, stack)
|
self._sync_existing_cluster(cluster, stack)
|
||||||
|
|
||||||
# the stacks that magnum has but heat doesn't have
|
# the stacks that magnum has but heat doesn't have
|
||||||
for sid in (six.viewkeys(sid_to_bay_mapping) -
|
for sid in (six.viewkeys(sid_to_cluster_mapping) -
|
||||||
six.viewkeys(sid_to_stack_mapping)):
|
six.viewkeys(sid_to_stack_mapping)):
|
||||||
bay = sid_to_bay_mapping[sid]
|
cluster = sid_to_cluster_mapping[sid]
|
||||||
self._sync_missing_heat_stack(bay)
|
self._sync_missing_heat_stack(cluster)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(_LW(
|
||||||
"Ignore error [%s] when syncing up bay status."
|
"Ignore error [%s] when syncing up cluster status."
|
||||||
), e, exc_info=True)
|
), e, exc_info=True)
|
||||||
|
|
||||||
def _get_bay_stacks(self, bays, sid_to_bay_mapping, bay_stack_ids):
|
def _get_cluster_stacks(
|
||||||
|
self, clusters, sid_to_cluster_mapping, cluster_stack_ids):
|
||||||
stacks = []
|
stacks = []
|
||||||
|
|
||||||
_bays = bays
|
_clusters = clusters
|
||||||
_sid_to_bay_mapping = sid_to_bay_mapping
|
_sid_to_cluster_mapping = sid_to_cluster_mapping
|
||||||
_bay_stack_ids = bay_stack_ids
|
_cluster_stack_ids = cluster_stack_ids
|
||||||
|
|
||||||
for bay in _bays:
|
for cluster in _clusters:
|
||||||
try:
|
try:
|
||||||
# Create client with bay's trustee user context
|
# Create client with cluster's trustee user context
|
||||||
bosc = clients.OpenStackClients(
|
bosc = clients.OpenStackClients(
|
||||||
context.make_bay_context(bay))
|
context.make_cluster_context(cluster))
|
||||||
stack = bosc.heat().stacks.get(bay.stack_id)
|
stack = bosc.heat().stacks.get(cluster.stack_id)
|
||||||
stacks.append(stack)
|
stacks.append(stack)
|
||||||
# No need to do anything in this case
|
# No need to do anything in this case
|
||||||
except heat_exc.HTTPNotFound:
|
except heat_exc.HTTPNotFound:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Any other exception means we do not perform any
|
# Any other exception means we do not perform any
|
||||||
# action on this bay in the current sync run, so remove
|
# action on this cluster in the current sync run, so remove
|
||||||
# it from all records.
|
# it from all records.
|
||||||
LOG.warning(_LW("Exception while attempting to retrieve "
|
LOG.warning(
|
||||||
"Heat stack %(stack_id)s for bay %(bay_id)s. "
|
_LW("Exception while attempting to retrieve "
|
||||||
|
"Heat stack %(stack_id)s for cluster %(cluster_id)s. "
|
||||||
"Traceback follows."),
|
"Traceback follows."),
|
||||||
{'stack_id': bay.stack_id, 'bay_id': bay.id})
|
{'stack_id': cluster.stack_id, 'cluster_id': cluster.id})
|
||||||
LOG.warning(e)
|
LOG.warning(e)
|
||||||
_sid_to_bay_mapping.pop(bay.stack_id)
|
_sid_to_cluster_mapping.pop(cluster.stack_id)
|
||||||
_bay_stack_ids.remove(bay.stack_id)
|
_cluster_stack_ids.remove(cluster.stack_id)
|
||||||
_bays.remove(bay)
|
_clusters.remove(cluster)
|
||||||
return [stacks, _bays, _bay_stack_ids, _sid_to_bay_mapping]
|
return [stacks, _clusters, _cluster_stack_ids, _sid_to_cluster_mapping]
|
||||||
|
|
||||||
def _sync_existing_bay(self, bay, stack):
|
def _sync_existing_cluster(self, cluster, stack):
|
||||||
if bay.status != stack.stack_status:
|
if cluster.status != stack.stack_status:
|
||||||
old_status = bay.status
|
old_status = cluster.status
|
||||||
bay.status = stack.stack_status
|
cluster.status = stack.stack_status
|
||||||
bay.status_reason = stack.stack_status_reason
|
cluster.status_reason = stack.stack_status_reason
|
||||||
bay.save()
|
cluster.save()
|
||||||
LOG.info(_LI("Sync up bay with id %(id)s from "
|
LOG.info(_LI("Sync up cluster with id %(id)s from "
|
||||||
"%(old_status)s to %(status)s."),
|
"%(old_status)s to %(status)s."),
|
||||||
{'id': bay.id, 'old_status': old_status,
|
{'id': cluster.id, 'old_status': old_status,
|
||||||
'status': bay.status})
|
'status': cluster.status})
|
||||||
|
|
||||||
def _sync_missing_heat_stack(self, bay):
|
def _sync_missing_heat_stack(self, cluster):
|
||||||
if bay.status == fields.BayStatus.DELETE_IN_PROGRESS:
|
if cluster.status == fields.ClusterStatus.DELETE_IN_PROGRESS:
|
||||||
self._sync_deleted_stack(bay)
|
self._sync_deleted_stack(cluster)
|
||||||
elif bay.status == fields.BayStatus.CREATE_IN_PROGRESS:
|
elif cluster.status == fields.ClusterStatus.CREATE_IN_PROGRESS:
|
||||||
self._sync_missing_stack(bay, fields.BayStatus.CREATE_FAILED)
|
self._sync_missing_stack(cluster,
|
||||||
elif bay.status == fields.BayStatus.UPDATE_IN_PROGRESS:
|
fields.ClusterStatus.CREATE_FAILED)
|
||||||
self._sync_missing_stack(bay, fields.BayStatus.UPDATE_FAILED)
|
elif cluster.status == fields.ClusterStatus.UPDATE_IN_PROGRESS:
|
||||||
|
self._sync_missing_stack(cluster,
|
||||||
|
fields.ClusterStatus.UPDATE_FAILED)
|
||||||
|
|
||||||
def _sync_deleted_stack(self, bay):
|
def _sync_deleted_stack(self, cluster):
|
||||||
try:
|
try:
|
||||||
bay.destroy()
|
cluster.destroy()
|
||||||
except exception.ClusterNotFound:
|
except exception.ClusterNotFound:
|
||||||
LOG.info(_LI('The bay %s has been deleted by others.'), bay.uuid)
|
LOG.info(_LI('The cluster %s has been deleted by others.'),
|
||||||
|
cluster.uuid)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Bay with id %(id)s not found in heat "
|
LOG.info(_LI("cluster with id %(id)s not found in heat "
|
||||||
"with stack id %(sid)s, with status_reason: "
|
"with stack id %(sid)s, with status_reason: "
|
||||||
"%(reason)s."), {'id': bay.id, 'sid': bay.stack_id,
|
"%(reason)s."), {'id': cluster.id,
|
||||||
'reason': bay.status_reason})
|
'sid': cluster.stack_id,
|
||||||
|
'reason': cluster.status_reason})
|
||||||
|
|
||||||
def _sync_missing_stack(self, bay, new_status):
|
def _sync_missing_stack(self, cluster, new_status):
|
||||||
bay.status = new_status
|
cluster.status = new_status
|
||||||
bay.status_reason = _("Stack with id %s not found in "
|
cluster.status_reason = _("Stack with id %s not found in "
|
||||||
"Heat.") % bay.stack_id
|
"Heat.") % cluster.stack_id
|
||||||
bay.save()
|
cluster.save()
|
||||||
LOG.info(_LI("Bay with id %(id)s has been set to "
|
LOG.info(_LI("Cluster with id %(id)s has been set to "
|
||||||
"%(status)s due to stack with id %(sid)s "
|
"%(status)s due to stack with id %(sid)s "
|
||||||
"not found in Heat."),
|
"not found in Heat."),
|
||||||
{'id': bay.id, 'status': bay.status,
|
{'id': cluster.id, 'status': cluster.status,
|
||||||
'sid': bay.stack_id})
|
'sid': cluster.stack_id})
|
||||||
|
|
||||||
@periodic_task.periodic_task(run_immediately=True)
|
@periodic_task.periodic_task(run_immediately=True)
|
||||||
@set_context
|
@set_context
|
||||||
def _send_bay_metrics(self, ctx):
|
def _send_cluster_metrics(self, ctx):
|
||||||
LOG.debug('Starting to send bay metrics')
|
LOG.debug('Starting to send cluster metrics')
|
||||||
for bay in objects.Bay.list(ctx):
|
for cluster in objects.Cluster.list(ctx):
|
||||||
if bay.status not in [fields.BayStatus.CREATE_COMPLETE,
|
if cluster.status not in [fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
fields.BayStatus.UPDATE_COMPLETE]:
|
fields.ClusterStatus.UPDATE_COMPLETE]:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
monitor = monitors.create_monitor(ctx, bay)
|
monitor = monitors.create_monitor(ctx, cluster)
|
||||||
if monitor is None:
|
if monitor is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
monitor.pull_data()
|
monitor.pull_data()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Skip pulling data from bay %(bay)s due to "
|
LOG.warning(
|
||||||
|
_LW("Skip pulling data from cluster %(cluster)s due to "
|
||||||
"error: %(e)s"),
|
"error: %(e)s"),
|
||||||
{'e': e, 'bay': bay.uuid}, exc_info=True)
|
{'e': e, 'cluster': cluster.uuid}, exc_info=True)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
metrics = list()
|
metrics = list()
|
||||||
@ -220,11 +229,11 @@ class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
|
|||||||
{'e': e, 'name': name}, exc_info=True)
|
{'e': e, 'name': name}, exc_info=True)
|
||||||
|
|
||||||
message = dict(metrics=metrics,
|
message = dict(metrics=metrics,
|
||||||
user_id=bay.user_id,
|
user_id=cluster.user_id,
|
||||||
project_id=bay.project_id,
|
project_id=cluster.project_id,
|
||||||
resource_id=bay.uuid)
|
resource_id=cluster.uuid)
|
||||||
LOG.debug("About to send notification: '%s'", message)
|
LOG.debug("About to send notification: '%s'", message)
|
||||||
self.notifier.info(ctx, "magnum.bay.metrics.update",
|
self.notifier.info(ctx, "magnum.cluster.metrics.update",
|
||||||
message)
|
message)
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ echo "Magnum's copy_instance_logs.sh was called..."
|
|||||||
SSH_IP=$1
|
SSH_IP=$1
|
||||||
COE=${2-kubernetes}
|
COE=${2-kubernetes}
|
||||||
NODE_TYPE=${3-master}
|
NODE_TYPE=${3-master}
|
||||||
LOG_PATH=/opt/stack/logs/bay-nodes/${NODE_TYPE}-${SSH_IP}
|
LOG_PATH=/opt/stack/logs/cluster-nodes/${NODE_TYPE}-${SSH_IP}
|
||||||
KEYPAIR=${4-default}
|
KEYPAIR=${4-default}
|
||||||
PRIVATE_KEY=
|
PRIVATE_KEY=
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ function function_exists {
|
|||||||
# Set up all necessary test data
|
# Set up all necessary test data
|
||||||
function create_test_data {
|
function create_test_data {
|
||||||
# First we test Magnum's command line to see if we can stand up
|
# First we test Magnum's command line to see if we can stand up
|
||||||
# a baymodel, bay and a pod
|
# a cluster_template, cluster and a pod
|
||||||
|
|
||||||
coe=$1
|
coe=$1
|
||||||
special=$2
|
special=$2
|
||||||
|
@ -121,7 +121,7 @@ class TestRootController(api_base.FunctionalTest):
|
|||||||
response = app.get('/v1/')
|
response = app.get('/v1/')
|
||||||
self.assertEqual(self.v1_expected, response.json)
|
self.assertEqual(self.v1_expected, response.json)
|
||||||
|
|
||||||
response = app.get('/v1/baymodels')
|
response = app.get('/v1/clustertemplates')
|
||||||
self.assertEqual(200, response.status_int)
|
self.assertEqual(200, response.status_int)
|
||||||
|
|
||||||
def test_auth_with_no_public_routes(self):
|
def test_auth_with_no_public_routes(self):
|
||||||
@ -146,7 +146,7 @@ class TestRootController(api_base.FunctionalTest):
|
|||||||
response = app.get('/v1/', expect_errors=True)
|
response = app.get('/v1/', expect_errors=True)
|
||||||
self.assertEqual(401, response.status_int)
|
self.assertEqual(401, response.status_int)
|
||||||
|
|
||||||
response = app.get('/v1/baymodels', expect_errors=True)
|
response = app.get('/v1/clustermodels', expect_errors=True)
|
||||||
self.assertEqual(401, response.status_int)
|
self.assertEqual(401, response.status_int)
|
||||||
|
|
||||||
def test_auth_with_v1_access(self):
|
def test_auth_with_v1_access(self):
|
||||||
@ -160,7 +160,7 @@ class TestRootController(api_base.FunctionalTest):
|
|||||||
response = app.get('/v1/')
|
response = app.get('/v1/')
|
||||||
self.assertEqual(self.v1_expected, response.json)
|
self.assertEqual(self.v1_expected, response.json)
|
||||||
|
|
||||||
response = app.get('/v1/baymodels', expect_errors=True)
|
response = app.get('/v1/clustertemplates', expect_errors=True)
|
||||||
self.assertEqual(401, response.status_int)
|
self.assertEqual(401, response.status_int)
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ import mock
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
from wsme import types as wtypes
|
||||||
|
|
||||||
from magnum.api import attr_validator
|
from magnum.api import attr_validator
|
||||||
from magnum.api.controllers.v1 import bay as api_bay
|
from magnum.api.controllers.v1 import bay as api_bay
|
||||||
@ -40,6 +41,29 @@ class TestBayObject(base.TestCase):
|
|||||||
self.assertEqual(1, bay.master_count)
|
self.assertEqual(1, bay.master_count)
|
||||||
self.assertEqual(60, bay.bay_create_timeout)
|
self.assertEqual(60, bay.bay_create_timeout)
|
||||||
|
|
||||||
|
# test unset value for baymodel_id
|
||||||
|
bay.baymodel_id = wtypes.Unset
|
||||||
|
self.assertEqual(wtypes.Unset, bay.baymodel_id)
|
||||||
|
|
||||||
|
# test backwards compatibility of bay fields with new objects
|
||||||
|
bay_dict['bay_create_timeout'] = 15
|
||||||
|
bay_dict['bay_faults'] = {'testfault': 'fault'}
|
||||||
|
bay = api_bay.Bay(**bay_dict)
|
||||||
|
self.assertEqual(15, bay.bay_create_timeout)
|
||||||
|
self.assertEqual(15, bay.create_timeout)
|
||||||
|
self.assertIn('testfault', bay.bay_faults)
|
||||||
|
self.assertIn('testfault', bay.faults)
|
||||||
|
|
||||||
|
def test_as_dict_faults(self):
|
||||||
|
bay_dict = apiutils.bay_post_data(baymodel_id=None)
|
||||||
|
del bay_dict['node_count']
|
||||||
|
del bay_dict['master_count']
|
||||||
|
del bay_dict['bay_create_timeout']
|
||||||
|
bay = api_bay.Bay(**bay_dict)
|
||||||
|
bay.bay_faults = {'testfault': 'fault'}
|
||||||
|
dict = bay.as_dict()
|
||||||
|
self.assertEqual({'testfault': 'fault'}, dict['faults'])
|
||||||
|
|
||||||
|
|
||||||
class TestListBay(api_base.FunctionalTest):
|
class TestListBay(api_base.FunctionalTest):
|
||||||
|
|
||||||
@ -60,7 +84,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
self.assertEqual([], response['bays'])
|
self.assertEqual([], response['bays'])
|
||||||
|
|
||||||
def test_one(self):
|
def test_one(self):
|
||||||
bay = obj_utils.create_test_bay(self.context)
|
bay = obj_utils.create_test_cluster(self.context)
|
||||||
response = self.get_json('/bays')
|
response = self.get_json('/bays')
|
||||||
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
|
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
|
||||||
self._verify_attrs(self._bay_attrs, response['bays'][0])
|
self._verify_attrs(self._bay_attrs, response['bays'][0])
|
||||||
@ -70,7 +94,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
self._verify_attrs(none_attrs, response['bays'][0], positive=False)
|
self._verify_attrs(none_attrs, response['bays'][0], positive=False)
|
||||||
|
|
||||||
def test_get_one(self):
|
def test_get_one(self):
|
||||||
bay = obj_utils.create_test_bay(self.context)
|
bay = obj_utils.create_test_cluster(self.context)
|
||||||
response = self.get_json('/bays/%s' % bay['uuid'])
|
response = self.get_json('/bays/%s' % bay['uuid'])
|
||||||
self.assertEqual(bay.uuid, response['uuid'])
|
self.assertEqual(bay.uuid, response['uuid'])
|
||||||
self._verify_attrs(self._expand_bay_attrs, response)
|
self._verify_attrs(self._expand_bay_attrs, response)
|
||||||
@ -85,7 +109,8 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
ht.resources.list.return_value = [fake_resources]
|
ht.resources.list.return_value = [fake_resources]
|
||||||
mock_heat.return_value = ht
|
mock_heat.return_value = ht
|
||||||
|
|
||||||
bay = obj_utils.create_test_bay(self.context, status='CREATE_FAILED')
|
bay = obj_utils.create_test_cluster(self.context,
|
||||||
|
status='CREATE_FAILED')
|
||||||
response = self.get_json('/bays/%s' % bay['uuid'])
|
response = self.get_json('/bays/%s' % bay['uuid'])
|
||||||
self.assertEqual(bay.uuid, response['uuid'])
|
self.assertEqual(bay.uuid, response['uuid'])
|
||||||
self.assertEqual({'fake_name': 'fake_reason'}, response['bay_faults'])
|
self.assertEqual({'fake_name': 'fake_reason'}, response['bay_faults'])
|
||||||
@ -93,13 +118,14 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
@mock.patch('magnum.common.clients.OpenStackClients.heat')
|
@mock.patch('magnum.common.clients.OpenStackClients.heat')
|
||||||
def test_get_one_failed_bay_heatclient_exception(self, mock_heat):
|
def test_get_one_failed_bay_heatclient_exception(self, mock_heat):
|
||||||
mock_heat.resources.list.side_effect = Exception('fake')
|
mock_heat.resources.list.side_effect = Exception('fake')
|
||||||
bay = obj_utils.create_test_bay(self.context, status='CREATE_FAILED')
|
bay = obj_utils.create_test_cluster(self.context,
|
||||||
|
status='CREATE_FAILED')
|
||||||
response = self.get_json('/bays/%s' % bay['uuid'])
|
response = self.get_json('/bays/%s' % bay['uuid'])
|
||||||
self.assertEqual(bay.uuid, response['uuid'])
|
self.assertEqual(bay.uuid, response['uuid'])
|
||||||
self.assertEqual({}, response['bay_faults'])
|
self.assertEqual({}, response['bay_faults'])
|
||||||
|
|
||||||
def test_get_one_by_name(self):
|
def test_get_one_by_name(self):
|
||||||
bay = obj_utils.create_test_bay(self.context)
|
bay = obj_utils.create_test_cluster(self.context)
|
||||||
response = self.get_json('/bays/%s' % bay['name'])
|
response = self.get_json('/bays/%s' % bay['name'])
|
||||||
self.assertEqual(bay.uuid, response['uuid'])
|
self.assertEqual(bay.uuid, response['uuid'])
|
||||||
self._verify_attrs(self._expand_bay_attrs, response)
|
self._verify_attrs(self._expand_bay_attrs, response)
|
||||||
@ -113,9 +139,9 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
self.assertTrue(response.json['errors'])
|
self.assertTrue(response.json['errors'])
|
||||||
|
|
||||||
def test_get_one_by_name_multiple_bay(self):
|
def test_get_one_by_name_multiple_bay(self):
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
response = self.get_json('/bays/test_bay', expect_errors=True)
|
response = self.get_json('/bays/test_bay', expect_errors=True)
|
||||||
self.assertEqual(409, response.status_int)
|
self.assertEqual(409, response.status_int)
|
||||||
@ -125,7 +151,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
def test_get_all_with_pagination_marker(self):
|
def test_get_all_with_pagination_marker(self):
|
||||||
bay_list = []
|
bay_list = []
|
||||||
for id_ in range(4):
|
for id_ in range(4):
|
||||||
bay = obj_utils.create_test_bay(self.context, id=id_,
|
bay = obj_utils.create_test_cluster(self.context, id=id_,
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
bay_list.append(bay)
|
bay_list.append(bay)
|
||||||
|
|
||||||
@ -135,7 +161,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid'])
|
self.assertEqual(bay_list[-1].uuid, response['bays'][0]['uuid'])
|
||||||
|
|
||||||
def test_detail(self):
|
def test_detail(self):
|
||||||
bay = obj_utils.create_test_bay(self.context)
|
bay = obj_utils.create_test_cluster(self.context)
|
||||||
response = self.get_json('/bays/detail')
|
response = self.get_json('/bays/detail')
|
||||||
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
|
self.assertEqual(bay.uuid, response['bays'][0]["uuid"])
|
||||||
self._verify_attrs(self._expand_bay_attrs, response['bays'][0])
|
self._verify_attrs(self._expand_bay_attrs, response['bays'][0])
|
||||||
@ -143,7 +169,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
def test_detail_with_pagination_marker(self):
|
def test_detail_with_pagination_marker(self):
|
||||||
bay_list = []
|
bay_list = []
|
||||||
for id_ in range(4):
|
for id_ in range(4):
|
||||||
bay = obj_utils.create_test_bay(self.context, id=id_,
|
bay = obj_utils.create_test_cluster(self.context, id=id_,
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
bay_list.append(bay)
|
bay_list.append(bay)
|
||||||
|
|
||||||
@ -154,7 +180,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
self._verify_attrs(self._expand_bay_attrs, response['bays'][0])
|
self._verify_attrs(self._expand_bay_attrs, response['bays'][0])
|
||||||
|
|
||||||
def test_detail_against_single(self):
|
def test_detail_against_single(self):
|
||||||
bay = obj_utils.create_test_bay(self.context)
|
bay = obj_utils.create_test_cluster(self.context)
|
||||||
response = self.get_json('/bays/%s/detail' % bay['uuid'],
|
response = self.get_json('/bays/%s/detail' % bay['uuid'],
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
self.assertEqual(404, response.status_int)
|
self.assertEqual(404, response.status_int)
|
||||||
@ -162,7 +188,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
def test_many(self):
|
def test_many(self):
|
||||||
bm_list = []
|
bm_list = []
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
bay = obj_utils.create_test_bay(self.context, id=id_,
|
bay = obj_utils.create_test_cluster(self.context, id=id_,
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
bm_list.append(bay.uuid)
|
bm_list.append(bay.uuid)
|
||||||
response = self.get_json('/bays')
|
response = self.get_json('/bays')
|
||||||
@ -172,7 +198,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_links(self):
|
def test_links(self):
|
||||||
uuid = uuidutils.generate_uuid()
|
uuid = uuidutils.generate_uuid()
|
||||||
obj_utils.create_test_bay(self.context, id=1, uuid=uuid)
|
obj_utils.create_test_cluster(self.context, id=1, uuid=uuid)
|
||||||
response = self.get_json('/bays/%s' % uuid)
|
response = self.get_json('/bays/%s' % uuid)
|
||||||
self.assertIn('links', response.keys())
|
self.assertIn('links', response.keys())
|
||||||
self.assertEqual(2, len(response['links']))
|
self.assertEqual(2, len(response['links']))
|
||||||
@ -183,7 +209,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_collection_links(self):
|
def test_collection_links(self):
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
obj_utils.create_test_bay(self.context, id=id_,
|
obj_utils.create_test_cluster(self.context, id=id_,
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
response = self.get_json('/bays/?limit=3')
|
response = self.get_json('/bays/?limit=3')
|
||||||
self.assertEqual(3, len(response['bays']))
|
self.assertEqual(3, len(response['bays']))
|
||||||
@ -194,7 +220,7 @@ class TestListBay(api_base.FunctionalTest):
|
|||||||
def test_collection_links_default_limit(self):
|
def test_collection_links_default_limit(self):
|
||||||
cfg.CONF.set_override('max_limit', 3, 'api')
|
cfg.CONF.set_override('max_limit', 3, 'api')
|
||||||
for id_ in range(5):
|
for id_ in range(5):
|
||||||
obj_utils.create_test_bay(self.context, id=id_,
|
obj_utils.create_test_cluster(self.context, id=id_,
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
response = self.get_json('/bays')
|
response = self.get_json('/bays')
|
||||||
self.assertEqual(3, len(response['bays']))
|
self.assertEqual(3, len(response['bays']))
|
||||||
@ -209,10 +235,10 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
super(TestPatch, self).setUp()
|
super(TestPatch, self).setUp()
|
||||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||||
self.context)
|
self.context)
|
||||||
self.bay = obj_utils.create_test_bay(self.context,
|
self.bay = obj_utils.create_test_cluster(self.context,
|
||||||
name='bay_example_A',
|
name='bay_example_A',
|
||||||
node_count=3)
|
node_count=3)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_update')
|
p = mock.patch.object(rpcapi.API, 'cluster_update')
|
||||||
self.mock_bay_update = p.start()
|
self.mock_bay_update = p.start()
|
||||||
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
|
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
@ -241,7 +267,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(test_time, return_updated_at)
|
self.assertEqual(test_time, return_updated_at)
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.bay.uuid, response['uuid'])
|
self.assertEqual(self.bay.uuid, response['uuid'])
|
||||||
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
|
self.assertEqual(self.bay.cluster_template_id, response['baymodel_id'])
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
def test_replace_ok_by_name(self, mock_utcnow):
|
def test_replace_ok_by_name(self, mock_utcnow):
|
||||||
@ -263,7 +289,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(test_time, return_updated_at)
|
self.assertEqual(test_time, return_updated_at)
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.bay.uuid, response['uuid'])
|
self.assertEqual(self.bay.uuid, response['uuid'])
|
||||||
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
|
self.assertEqual(self.bay.cluster_template_id, response['baymodel_id'])
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
def test_replace_ok_by_name_not_found(self, mock_utcnow):
|
def test_replace_ok_by_name_not_found(self, mock_utcnow):
|
||||||
@ -296,9 +322,9 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||||
mock_utcnow.return_value = test_time
|
mock_utcnow.return_value = test_time
|
||||||
|
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
|
|
||||||
response = self.patch_json('/bays/test_bay',
|
response = self.patch_json('/bays/test_bay',
|
||||||
@ -356,7 +382,17 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(400, response.status_int)
|
self.assertEqual(400, response.status_int)
|
||||||
self.assertTrue(response.json['errors'])
|
self.assertTrue(response.json['errors'])
|
||||||
|
|
||||||
@mock.patch.object(rpcapi.API, 'bay_update_async')
|
@mock.patch.object(rpcapi.API, 'cluster_update_async')
|
||||||
|
def test_update_bay_async(self, mock_update):
|
||||||
|
response = self.patch_json(
|
||||||
|
'/bays/%s' % self.bay.name,
|
||||||
|
[{'path': '/node_count', 'value': 4,
|
||||||
|
'op': 'replace'}],
|
||||||
|
headers={'OpenStack-API-Version': 'container-infra 1.2'})
|
||||||
|
|
||||||
|
self.assertEqual(202, response.status_code)
|
||||||
|
|
||||||
|
@mock.patch.object(rpcapi.API, 'cluster_update_async')
|
||||||
def test_update_bay_with_rollback_enabled(self, mock_update):
|
def test_update_bay_with_rollback_enabled(self, mock_update):
|
||||||
response = self.patch_json(
|
response = self.patch_json(
|
||||||
'/bays/%s/?rollback=True' % self.bay.name,
|
'/bays/%s/?rollback=True' % self.bay.name,
|
||||||
@ -381,7 +417,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(1, response['node_count'])
|
self.assertEqual(1, response['node_count'])
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.bay.uuid, response['uuid'])
|
self.assertEqual(self.bay.uuid, response['uuid'])
|
||||||
self.assertEqual(self.bay.baymodel_id, response['baymodel_id'])
|
self.assertEqual(self.bay.cluster_template_id, response['baymodel_id'])
|
||||||
self.assertEqual(self.bay.name, response['name'])
|
self.assertEqual(self.bay.name, response['name'])
|
||||||
self.assertEqual(self.bay.master_count, response['master_count'])
|
self.assertEqual(self.bay.master_count, response['master_count'])
|
||||||
|
|
||||||
@ -411,7 +447,7 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
super(TestPost, self).setUp()
|
super(TestPost, self).setUp()
|
||||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||||
self.context)
|
self.context)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_create')
|
p = mock.patch.object(rpcapi.API, 'cluster_create')
|
||||||
self.mock_bay_create = p.start()
|
self.mock_bay_create = p.start()
|
||||||
self.mock_bay_create.side_effect = self._simulate_rpc_bay_create
|
self.mock_bay_create.side_effect = self._simulate_rpc_bay_create
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
@ -455,8 +491,8 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
self.post_json('/bays', bdict)
|
self.post_json('/bays', bdict)
|
||||||
|
|
||||||
def test_create_bay_doesnt_contain_id(self):
|
def test_create_bay_doesnt_contain_id(self):
|
||||||
with mock.patch.object(self.dbapi, 'create_bay',
|
with mock.patch.object(self.dbapi, 'create_cluster',
|
||||||
wraps=self.dbapi.create_bay) as cc_mock:
|
wraps=self.dbapi.create_cluster) as cc_mock:
|
||||||
bdict = apiutils.bay_post_data(name='bay_example_A')
|
bdict = apiutils.bay_post_data(name='bay_example_A')
|
||||||
response = self.post_json('/bays', bdict)
|
response = self.post_json('/bays', bdict)
|
||||||
self.assertEqual(bdict['name'], response.json['name'])
|
self.assertEqual(bdict['name'], response.json['name'])
|
||||||
@ -759,14 +795,14 @@ class TestDelete(api_base.FunctionalTest):
|
|||||||
super(TestDelete, self).setUp()
|
super(TestDelete, self).setUp()
|
||||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||||
self.context)
|
self.context)
|
||||||
self.bay = obj_utils.create_test_bay(self.context)
|
self.bay = obj_utils.create_test_cluster(self.context)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_delete')
|
p = mock.patch.object(rpcapi.API, 'cluster_delete')
|
||||||
self.mock_bay_delete = p.start()
|
self.mock_bay_delete = p.start()
|
||||||
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
def _simulate_rpc_bay_delete(self, bay_uuid):
|
def _simulate_rpc_bay_delete(self, bay_uuid):
|
||||||
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
|
bay = objects.Cluster.get_by_uuid(self.context, bay_uuid)
|
||||||
bay.destroy()
|
bay.destroy()
|
||||||
|
|
||||||
def test_delete_bay(self):
|
def test_delete_bay(self):
|
||||||
@ -796,9 +832,9 @@ class TestDelete(api_base.FunctionalTest):
|
|||||||
self.assertEqual(204, response.status_int)
|
self.assertEqual(204, response.status_int)
|
||||||
|
|
||||||
def test_delete_multiple_bay_by_name(self):
|
def test_delete_multiple_bay_by_name(self):
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
obj_utils.create_test_bay(self.context, name='test_bay',
|
obj_utils.create_test_cluster(self.context, name='test_bay',
|
||||||
uuid=uuidutils.generate_uuid())
|
uuid=uuidutils.generate_uuid())
|
||||||
response = self.delete('/bays/test_bay', expect_errors=True)
|
response = self.delete('/bays/test_bay', expect_errors=True)
|
||||||
self.assertEqual(409, response.status_int)
|
self.assertEqual(409, response.status_int)
|
||||||
@ -826,7 +862,7 @@ class TestBayPolicyEnforcement(api_base.FunctionalTest):
|
|||||||
"bay:get_all", self.get_json, '/bays', expect_errors=True)
|
"bay:get_all", self.get_json, '/bays', expect_errors=True)
|
||||||
|
|
||||||
def test_policy_disallow_get_one(self):
|
def test_policy_disallow_get_one(self):
|
||||||
self.bay = obj_utils.create_test_bay(self.context)
|
self.bay = obj_utils.create_test_cluster(self.context)
|
||||||
self._common_policy_check(
|
self._common_policy_check(
|
||||||
"bay:get", self.get_json, '/bays/%s' % self.bay.uuid,
|
"bay:get", self.get_json, '/bays/%s' % self.bay.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
@ -838,7 +874,7 @@ class TestBayPolicyEnforcement(api_base.FunctionalTest):
|
|||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
def test_policy_disallow_update(self):
|
def test_policy_disallow_update(self):
|
||||||
self.bay = obj_utils.create_test_bay(self.context,
|
self.bay = obj_utils.create_test_cluster(self.context,
|
||||||
name='bay_example_A',
|
name='bay_example_A',
|
||||||
node_count=3)
|
node_count=3)
|
||||||
self._common_policy_check(
|
self._common_policy_check(
|
||||||
@ -852,15 +888,15 @@ class TestBayPolicyEnforcement(api_base.FunctionalTest):
|
|||||||
"bay:create", self.post_json, '/bays', bdict, expect_errors=True)
|
"bay:create", self.post_json, '/bays', bdict, expect_errors=True)
|
||||||
|
|
||||||
def _simulate_rpc_bay_delete(self, bay_uuid):
|
def _simulate_rpc_bay_delete(self, bay_uuid):
|
||||||
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
|
bay = objects.Cluster.get_by_uuid(self.context, bay_uuid)
|
||||||
bay.destroy()
|
bay.destroy()
|
||||||
|
|
||||||
def test_policy_disallow_delete(self):
|
def test_policy_disallow_delete(self):
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_delete')
|
p = mock.patch.object(rpcapi.API, 'cluster_delete')
|
||||||
self.mock_bay_delete = p.start()
|
self.mock_bay_delete = p.start()
|
||||||
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
self.bay = obj_utils.create_test_bay(self.context)
|
self.bay = obj_utils.create_test_cluster(self.context)
|
||||||
self._common_policy_check(
|
self._common_policy_check(
|
||||||
"bay:delete", self.delete, '/bays/%s' % self.bay.uuid,
|
"bay:delete", self.delete, '/bays/%s' % self.bay.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
@ -875,18 +911,18 @@ class TestBayPolicyEnforcement(api_base.FunctionalTest):
|
|||||||
response.json['errors'][0]['detail'])
|
response.json['errors'][0]['detail'])
|
||||||
|
|
||||||
def test_policy_only_owner_get_one(self):
|
def test_policy_only_owner_get_one(self):
|
||||||
bay = obj_utils.create_test_bay(self.context, user_id='another')
|
bay = obj_utils.create_test_cluster(self.context, user_id='another')
|
||||||
self._owner_check("bay:get", self.get_json, '/bays/%s' % bay.uuid,
|
self._owner_check("bay:get", self.get_json, '/bays/%s' % bay.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
def test_policy_only_owner_update(self):
|
def test_policy_only_owner_update(self):
|
||||||
bay = obj_utils.create_test_bay(self.context, user_id='another')
|
bay = obj_utils.create_test_cluster(self.context, user_id='another')
|
||||||
self._owner_check(
|
self._owner_check(
|
||||||
"bay:update", self.patch_json, '/bays/%s' % bay.uuid,
|
"bay:update", self.patch_json, '/bays/%s' % bay.uuid,
|
||||||
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
|
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
def test_policy_only_owner_delete(self):
|
def test_policy_only_owner_delete(self):
|
||||||
bay = obj_utils.create_test_bay(self.context, user_id='another')
|
bay = obj_utils.create_test_cluster(self.context, user_id='another')
|
||||||
self._owner_check("bay:delete", self.delete, '/bays/%s' % bay.uuid,
|
self._owner_check("bay:delete", self.delete, '/bays/%s' % bay.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
@ -219,7 +219,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_baymodel_with_bay(self):
|
def test_update_baymodel_with_bay(self):
|
||||||
baymodel = obj_utils.create_test_cluster_template(self.context)
|
baymodel = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_bay(self.context, baymodel_id=baymodel.uuid)
|
obj_utils.create_test_cluster(self.context,
|
||||||
|
cluster_template_id=baymodel.uuid)
|
||||||
|
|
||||||
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
||||||
[{'path': '/name',
|
[{'path': '/name',
|
||||||
@ -253,7 +254,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_baymodel_with_bay_allow_update(self):
|
def test_update_baymodel_with_bay_allow_update(self):
|
||||||
baymodel = obj_utils.create_test_cluster_template(self.context)
|
baymodel = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_bay(self.context, baymodel_id=baymodel.uuid)
|
obj_utils.create_test_cluster(self.context,
|
||||||
|
cluster_template_id=baymodel.uuid)
|
||||||
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
||||||
[{'path': '/public',
|
[{'path': '/public',
|
||||||
'value': True,
|
'value': True,
|
||||||
@ -265,7 +267,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_baymodel_with_bay_not_allow_update(self):
|
def test_update_baymodel_with_bay_not_allow_update(self):
|
||||||
baymodel = obj_utils.create_test_cluster_template(self.context)
|
baymodel = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_bay(self.context, baymodel_id=baymodel.uuid)
|
obj_utils.create_test_cluster(self.context,
|
||||||
|
cluster_template_id=baymodel.uuid)
|
||||||
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
response = self.patch_json('/baymodels/%s' % baymodel.uuid,
|
||||||
[{'path': '/name',
|
[{'path': '/name',
|
||||||
'value': 'new_name',
|
'value': 'new_name',
|
||||||
@ -911,7 +914,8 @@ class TestDelete(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_delete_baymodel_with_bay(self):
|
def test_delete_baymodel_with_bay(self):
|
||||||
baymodel = obj_utils.create_test_cluster_template(self.context)
|
baymodel = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_bay(self.context, baymodel_id=baymodel.uuid)
|
obj_utils.create_test_cluster(self.context,
|
||||||
|
cluster_template_id=baymodel.uuid)
|
||||||
response = self.delete('/baymodels/%s' % baymodel.uuid,
|
response = self.delete('/baymodels/%s' % baymodel.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
self.assertEqual(400, response.status_int)
|
self.assertEqual(400, response.status_int)
|
||||||
|
@ -222,14 +222,14 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.context)
|
self.context)
|
||||||
self.cluster_obj = obj_utils.create_test_cluster(
|
self.cluster_obj = obj_utils.create_test_cluster(
|
||||||
self.context, name='cluster_example_A', node_count=3)
|
self.context, name='cluster_example_A', node_count=3)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_update_async')
|
p = mock.patch.object(rpcapi.API, 'cluster_update_async')
|
||||||
self.mock_bay_update = p.start()
|
self.mock_cluster_update = p.start()
|
||||||
self.mock_bay_update.side_effect = self._simulate_rpc_bay_update
|
self.mock_cluster_update.side_effect = self._sim_rpc_cluster_update
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
def _simulate_rpc_bay_update(self, bay, rollback=False):
|
def _sim_rpc_cluster_update(self, cluster, rollback=False):
|
||||||
bay.save()
|
cluster.save()
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
def test_replace_ok(self, mock_utcnow):
|
def test_replace_ok(self, mock_utcnow):
|
||||||
@ -251,7 +251,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(test_time, return_updated_at)
|
self.assertEqual(test_time, return_updated_at)
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
||||||
self.assertEqual(self.cluster_obj.baymodel_id,
|
self.assertEqual(self.cluster_obj.cluster_template_id,
|
||||||
response['cluster_template_id'])
|
response['cluster_template_id'])
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
@ -274,7 +274,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(test_time, return_updated_at)
|
self.assertEqual(test_time, return_updated_at)
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
||||||
self.assertEqual(self.cluster_obj.baymodel_id,
|
self.assertEqual(self.cluster_obj.cluster_template_id,
|
||||||
response['cluster_template_id'])
|
response['cluster_template_id'])
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
@ -385,7 +385,7 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
self.assertEqual(1, response['node_count'])
|
self.assertEqual(1, response['node_count'])
|
||||||
# Assert nothing else was changed
|
# Assert nothing else was changed
|
||||||
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
self.assertEqual(self.cluster_obj.uuid, response['uuid'])
|
||||||
self.assertEqual(self.cluster_obj.baymodel_id,
|
self.assertEqual(self.cluster_obj.cluster_template_id,
|
||||||
response['cluster_template_id'])
|
response['cluster_template_id'])
|
||||||
self.assertEqual(self.cluster_obj.name, response['name'])
|
self.assertEqual(self.cluster_obj.name, response['name'])
|
||||||
self.assertEqual(self.cluster_obj.master_count,
|
self.assertEqual(self.cluster_obj.master_count,
|
||||||
@ -416,17 +416,17 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
super(TestPost, self).setUp()
|
super(TestPost, self).setUp()
|
||||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||||
self.context)
|
self.context)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_create_async')
|
p = mock.patch.object(rpcapi.API, 'cluster_create_async')
|
||||||
self.mock_bay_create = p.start()
|
self.mock_cluster_create = p.start()
|
||||||
self.mock_bay_create.side_effect = self._simulate_rpc_bay_create
|
self.mock_cluster_create.side_effect = self._simulate_cluster_create
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
p = mock.patch.object(attr_validator, 'validate_os_resources')
|
p = mock.patch.object(attr_validator, 'validate_os_resources')
|
||||||
self.mock_valid_os_res = p.start()
|
self.mock_valid_os_res = p.start()
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
def _simulate_rpc_bay_create(self, bay, bay_create_timeout):
|
def _simulate_cluster_create(self, cluster, create_timeout):
|
||||||
bay.create()
|
cluster.create()
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||||
def test_create_cluster(self, mock_utcnow):
|
def test_create_cluster(self, mock_utcnow):
|
||||||
@ -442,19 +442,19 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
def test_create_cluster_set_project_id_and_user_id(self):
|
def test_create_cluster_set_project_id_and_user_id(self):
|
||||||
bdict = apiutils.cluster_post_data()
|
bdict = apiutils.cluster_post_data()
|
||||||
|
|
||||||
def _simulate_rpc_bay_create(bay, bay_create_timeout):
|
def _simulate_rpc_cluster_create(cluster, create_timeout):
|
||||||
self.assertEqual(self.context.project_id, bay.project_id)
|
self.assertEqual(self.context.project_id, cluster.project_id)
|
||||||
self.assertEqual(self.context.user_id, bay.user_id)
|
self.assertEqual(self.context.user_id, cluster.user_id)
|
||||||
bay.create()
|
cluster.create()
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
|
self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create
|
||||||
|
|
||||||
self.post_json('/clusters', bdict)
|
self.post_json('/clusters', bdict)
|
||||||
|
|
||||||
def test_create_cluster_doesnt_contain_id(self):
|
def test_create_cluster_doesnt_contain_id(self):
|
||||||
with mock.patch.object(self.dbapi, 'create_bay',
|
with mock.patch.object(self.dbapi, 'create_cluster',
|
||||||
wraps=self.dbapi.create_bay) as cc_mock:
|
wraps=self.dbapi.create_cluster) as cc_mock:
|
||||||
bdict = apiutils.cluster_post_data(name='cluster_example_A')
|
bdict = apiutils.cluster_post_data(name='cluster_example_A')
|
||||||
response = self.post_json('/clusters', bdict)
|
response = self.post_json('/clusters', bdict)
|
||||||
cc_mock.assert_called_once_with(mock.ANY)
|
cc_mock.assert_called_once_with(mock.ANY)
|
||||||
@ -643,12 +643,12 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
self.assertEqual(202, response.status_int)
|
self.assertEqual(202, response.status_int)
|
||||||
|
|
||||||
def test_create_cluster_with_no_timeout(self):
|
def test_create_cluster_with_no_timeout(self):
|
||||||
def _simulate_rpc_bay_create(bay, bay_create_timeout):
|
def _simulate_rpc_cluster_create(cluster, create_timeout):
|
||||||
self.assertEqual(60, bay_create_timeout)
|
self.assertEqual(60, create_timeout)
|
||||||
bay.create()
|
cluster.create()
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
self.mock_bay_create.side_effect = _simulate_rpc_bay_create
|
self.mock_cluster_create.side_effect = _simulate_rpc_cluster_create
|
||||||
bdict = apiutils.cluster_post_data()
|
bdict = apiutils.cluster_post_data()
|
||||||
del bdict['create_timeout']
|
del bdict['create_timeout']
|
||||||
response = self.post_json('/clusters', bdict, expect_errors=True)
|
response = self.post_json('/clusters', bdict, expect_errors=True)
|
||||||
@ -748,14 +748,14 @@ class TestDelete(api_base.FunctionalTest):
|
|||||||
self.cluster_template = obj_utils.create_test_cluster_template(
|
self.cluster_template = obj_utils.create_test_cluster_template(
|
||||||
self.context)
|
self.context)
|
||||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_delete_async')
|
p = mock.patch.object(rpcapi.API, 'cluster_delete_async')
|
||||||
self.mock_bay_delete = p.start()
|
self.mock_cluster_delete = p.start()
|
||||||
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
self.mock_cluster_delete.side_effect = self._simulate_cluster_delete
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
def _simulate_rpc_bay_delete(self, bay_uuid):
|
def _simulate_cluster_delete(self, cluster_uuid):
|
||||||
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
|
cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid)
|
||||||
bay.destroy()
|
cluster.destroy()
|
||||||
|
|
||||||
def test_delete_cluster(self):
|
def test_delete_cluster(self):
|
||||||
self.delete('/clusters/%s' % self.cluster.uuid)
|
self.delete('/clusters/%s' % self.cluster.uuid)
|
||||||
@ -840,14 +840,14 @@ class TestClusterPolicyEnforcement(api_base.FunctionalTest):
|
|||||||
"cluster:create", self.post_json, '/clusters', bdict,
|
"cluster:create", self.post_json, '/clusters', bdict,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
|
|
||||||
def _simulate_rpc_bay_delete(self, bay_uuid):
|
def _simulate_cluster_delete(self, cluster_uuid):
|
||||||
bay = objects.Bay.get_by_uuid(self.context, bay_uuid)
|
cluster = objects.Cluster.get_by_uuid(self.context, cluster_uuid)
|
||||||
bay.destroy()
|
cluster.destroy()
|
||||||
|
|
||||||
def test_policy_disallow_delete(self):
|
def test_policy_disallow_delete(self):
|
||||||
p = mock.patch.object(rpcapi.API, 'bay_delete')
|
p = mock.patch.object(rpcapi.API, 'cluster_delete')
|
||||||
self.mock_bay_delete = p.start()
|
self.mock_cluster_delete = p.start()
|
||||||
self.mock_bay_delete.side_effect = self._simulate_rpc_bay_delete
|
self.mock_cluster_delete.side_effect = self._simulate_cluster_delete
|
||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
self.cluster = obj_utils.create_test_cluster(self.context)
|
self.cluster = obj_utils.create_test_cluster(self.context)
|
||||||
self._common_policy_check(
|
self._common_policy_check(
|
||||||
|
@ -228,8 +228,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_cluster_template_with_cluster(self):
|
def test_update_cluster_template_with_cluster(self):
|
||||||
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_cluster(self.context,
|
obj_utils.create_test_cluster(
|
||||||
baymodel_id=cluster_template.uuid)
|
self.context, cluster_template_id=cluster_template.uuid)
|
||||||
|
|
||||||
response = self.patch_json('/clustertemplates/%s' %
|
response = self.patch_json('/clustertemplates/%s' %
|
||||||
cluster_template.uuid,
|
cluster_template.uuid,
|
||||||
@ -267,8 +267,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_cluster_template_with_cluster_allow_update(self):
|
def test_update_cluster_template_with_cluster_allow_update(self):
|
||||||
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_cluster(self.context,
|
obj_utils.create_test_cluster(
|
||||||
baymodel_id=cluster_template.uuid)
|
self.context, cluster_template_id=cluster_template.uuid)
|
||||||
response = self.patch_json('/clustertemplates/%s' %
|
response = self.patch_json('/clustertemplates/%s' %
|
||||||
cluster_template.uuid,
|
cluster_template.uuid,
|
||||||
[{'path': '/public',
|
[{'path': '/public',
|
||||||
@ -282,8 +282,8 @@ class TestPatch(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_update_cluster_template_with_cluster_not_allow_update(self):
|
def test_update_cluster_template_with_cluster_not_allow_update(self):
|
||||||
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_cluster(self.context,
|
obj_utils.create_test_cluster(
|
||||||
baymodel_id=cluster_template.uuid)
|
self.context, cluster_template_id=cluster_template.uuid)
|
||||||
response = self.patch_json('/clustertemplates/%s' %
|
response = self.patch_json('/clustertemplates/%s' %
|
||||||
cluster_template.uuid,
|
cluster_template.uuid,
|
||||||
[{'path': '/name',
|
[{'path': '/name',
|
||||||
@ -963,8 +963,8 @@ class TestDelete(api_base.FunctionalTest):
|
|||||||
|
|
||||||
def test_delete_cluster_template_with_cluster(self):
|
def test_delete_cluster_template_with_cluster(self):
|
||||||
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
cluster_template = obj_utils.create_test_cluster_template(self.context)
|
||||||
obj_utils.create_test_cluster(self.context,
|
obj_utils.create_test_cluster(
|
||||||
baymodel_id=cluster_template.uuid)
|
self.context, cluster_template_id=cluster_template.uuid)
|
||||||
response = self.delete('/clustertemplates/%s' % cluster_template.uuid,
|
response = self.delete('/clustertemplates/%s' % cluster_template.uuid,
|
||||||
expect_errors=True)
|
expect_errors=True)
|
||||||
self.assertEqual(400, response.status_int)
|
self.assertEqual(400, response.status_int)
|
||||||
|
@ -53,40 +53,40 @@ class TestApiUtils(base.FunctionalTest):
|
|||||||
'fake-sort')
|
'fake-sort')
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_name')
|
@mock.patch('magnum.objects.Cluster.get_by_name')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_get_resource_with_uuid(
|
def test_get_resource_with_uuid(
|
||||||
self,
|
self,
|
||||||
mock_get_by_uuid,
|
mock_get_by_uuid,
|
||||||
mock_get_by_name,
|
mock_get_by_name,
|
||||||
mock_request):
|
mock_request):
|
||||||
mock_bay = mock.MagicMock
|
mock_cluster = mock.MagicMock
|
||||||
mock_get_by_uuid.return_value = mock_bay
|
mock_get_by_uuid.return_value = mock_cluster
|
||||||
uuid = uuidutils.generate_uuid()
|
uuid = uuidutils.generate_uuid()
|
||||||
|
|
||||||
returned_bay = utils.get_resource('Bay', uuid)
|
returned_cluster = utils.get_resource('Cluster', uuid)
|
||||||
|
|
||||||
mock_get_by_uuid.assert_called_once_with(mock_request.context, uuid)
|
mock_get_by_uuid.assert_called_once_with(mock_request.context, uuid)
|
||||||
self.assertFalse(mock_get_by_name.called)
|
self.assertFalse(mock_get_by_name.called)
|
||||||
self.assertEqual(mock_bay, returned_bay)
|
self.assertEqual(mock_cluster, returned_cluster)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_name')
|
@mock.patch('magnum.objects.Cluster.get_by_name')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_get_resource_with_name(
|
def test_get_resource_with_name(
|
||||||
self,
|
self,
|
||||||
mock_get_by_uuid,
|
mock_get_by_uuid,
|
||||||
mock_get_by_name,
|
mock_get_by_name,
|
||||||
mock_request):
|
mock_request):
|
||||||
mock_bay = mock.MagicMock
|
mock_cluster = mock.MagicMock
|
||||||
mock_get_by_name.return_value = mock_bay
|
mock_get_by_name.return_value = mock_cluster
|
||||||
|
|
||||||
returned_bay = utils.get_resource('Bay', 'fake-name')
|
returned_cluster = utils.get_resource('Cluster', 'fake-name')
|
||||||
|
|
||||||
self.assertFalse(mock_get_by_uuid.called)
|
self.assertFalse(mock_get_by_uuid.called)
|
||||||
mock_get_by_name.assert_called_once_with(mock_request.context,
|
mock_get_by_name.assert_called_once_with(mock_request.context,
|
||||||
'fake-name')
|
'fake-name')
|
||||||
self.assertEqual(mock_bay, returned_bay)
|
self.assertEqual(mock_cluster, returned_cluster)
|
||||||
|
|
||||||
@mock.patch.object(uuidutils, 'is_uuid_like', return_value=True)
|
@mock.patch.object(uuidutils, 'is_uuid_like', return_value=True)
|
||||||
def test_get_openstack_resource_by_uuid(self, fake_is_uuid_like):
|
def test_get_openstack_resource_by_uuid(self, fake_is_uuid_like):
|
||||||
@ -125,13 +125,13 @@ class TestApiUtils(base.FunctionalTest):
|
|||||||
|
|
||||||
@mock.patch.object(jsonpatch, 'apply_patch')
|
@mock.patch.object(jsonpatch, 'apply_patch')
|
||||||
def test_apply_jsonpatch(self, mock_jsonpatch):
|
def test_apply_jsonpatch(self, mock_jsonpatch):
|
||||||
doc = {'bay_uuid': 'id', 'node_count': 1}
|
doc = {'cluster_uuid': 'id', 'node_count': 1}
|
||||||
patch = [{"path": "/node_count", "value": 2, "op": "replace"}]
|
patch = [{"path": "/node_count", "value": 2, "op": "replace"}]
|
||||||
utils.apply_jsonpatch(doc, patch)
|
utils.apply_jsonpatch(doc, patch)
|
||||||
mock_jsonpatch.assert_called_once_with(doc, patch)
|
mock_jsonpatch.assert_called_once_with(doc, patch)
|
||||||
|
|
||||||
def test_apply_jsonpatch_add_attr_not_exist(self):
|
def test_apply_jsonpatch_add_attr_not_exist(self):
|
||||||
doc = {'bay_uuid': 'id', 'node_count': 1}
|
doc = {'cluster_uuid': 'id', 'node_count': 1}
|
||||||
patch = [{"path": "/fake", "value": 2, "op": "add"}]
|
patch = [{"path": "/fake", "value": 2, "op": "add"}]
|
||||||
exc = self.assertRaises(wsme.exc.ClientSideError,
|
exc = self.assertRaises(wsme.exc.ClientSideError,
|
||||||
utils.apply_jsonpatch,
|
utils.apply_jsonpatch,
|
||||||
@ -141,7 +141,7 @@ class TestApiUtils(base.FunctionalTest):
|
|||||||
"not allowed.", exc.faultstring)
|
"not allowed.", exc.faultstring)
|
||||||
|
|
||||||
def test_apply_jsonpatch_add_attr_already_exist(self):
|
def test_apply_jsonpatch_add_attr_already_exist(self):
|
||||||
doc = {'bay_uuid': 'id', 'node_count': 1}
|
doc = {'cluster_uuid': 'id', 'node_count': 1}
|
||||||
patch = [{"path": "/node_count", "value": 2, "op": "add"}]
|
patch = [{"path": "/node_count", "value": 2, "op": "add"}]
|
||||||
exc = self.assertRaises(wsme.exc.ClientSideError,
|
exc = self.assertRaises(wsme.exc.ClientSideError,
|
||||||
utils.apply_jsonpatch,
|
utils.apply_jsonpatch,
|
||||||
|
@ -26,148 +26,148 @@ from magnum.tests.unit.objects import utils as obj_utils
|
|||||||
|
|
||||||
class TestValidation(base.BaseTestCase):
|
class TestValidation(base.BaseTestCase):
|
||||||
|
|
||||||
def _test_enforce_bay_types(
|
def _test_enforce_cluster_types(
|
||||||
self,
|
self,
|
||||||
mock_bay_get_by_uuid,
|
mock_cluster_get_by_uuid,
|
||||||
mock_pecan_request,
|
mock_pecan_request,
|
||||||
bay_type,
|
cluster_type,
|
||||||
allowed_bay_types,
|
allowed_cluster_types,
|
||||||
assert_raised=False,
|
assert_raised=False,
|
||||||
*args):
|
*args):
|
||||||
|
|
||||||
@v.enforce_bay_types(*allowed_bay_types)
|
@v.enforce_cluster_types(*allowed_cluster_types)
|
||||||
def test(self, *args):
|
def test(self, *args):
|
||||||
if hasattr(args[0], 'bay_uuid'):
|
if hasattr(args[0], 'cluster_uuid'):
|
||||||
return args[0].name
|
return args[0].name
|
||||||
else:
|
else:
|
||||||
return args[1]
|
return args[1]
|
||||||
|
|
||||||
context = mock_pecan_request.context
|
context = mock_pecan_request.context
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
bay.baymodel_id = 'cluster_template_id'
|
cluster.cluster_template_id = 'cluster_template_id'
|
||||||
cluster_template = obj_utils.get_test_cluster_template(
|
cluster_template = obj_utils.get_test_cluster_template(
|
||||||
context, uuid='cluster_template_id', coe=bay_type)
|
context, uuid='cluster_template_id', coe=cluster_type)
|
||||||
bay.cluster_template = cluster_template
|
cluster.cluster_template = cluster_template
|
||||||
|
|
||||||
mock_bay_get_by_uuid.return_value = bay
|
mock_cluster_get_by_uuid.return_value = cluster
|
||||||
|
|
||||||
if assert_raised:
|
if assert_raised:
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InvalidParameterValue, test, self, *args)
|
exception.InvalidParameterValue, test, self, *args)
|
||||||
else:
|
else:
|
||||||
ret = test(self, *args)
|
ret = test(self, *args)
|
||||||
if hasattr(args[0], 'bay_uuid'):
|
if hasattr(args[0], 'cluster_uuid'):
|
||||||
mock_bay_get_by_uuid.assert_called_once_with(context,
|
mock_cluster_get_by_uuid.assert_called_once_with(
|
||||||
args[0].bay_uuid)
|
context, args[0].cluster_uuid)
|
||||||
self.assertEqual(args[0].name, ret)
|
self.assertEqual(args[0].name, ret)
|
||||||
else:
|
else:
|
||||||
mock_bay_get_by_uuid.assert_called_once_with(context, args[1])
|
mock_cluster_get_by_uuid.assert_called_once_with(
|
||||||
|
context, args[1])
|
||||||
self.assertEqual(args[1], ret)
|
self.assertEqual(args[1], ret)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_enforce_bay_types_one_allowed(
|
def test_enforce_cluster_types_one_allowed(
|
||||||
self,
|
self,
|
||||||
mock_bay_get_by_uuid,
|
mock_cluster_get_by_uuid,
|
||||||
mock_pecan_request):
|
mock_pecan_request):
|
||||||
|
|
||||||
obj = mock.MagicMock()
|
obj = mock.MagicMock()
|
||||||
obj.name = 'test_object'
|
obj.name = 'test_object'
|
||||||
obj.bay_uuid = 'bay_uuid'
|
obj.cluster_uuid = 'cluster_uuid'
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['swarm']
|
allowed_cluster_types = ['swarm']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, False, obj)
|
cluster_type, allowed_cluster_types, False, obj)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_enforce_bay_types_two_allowed(
|
def test_enforce_cluster_types_two_allowed(
|
||||||
self,
|
self,
|
||||||
mock_bay_get_by_uuid,
|
mock_cluster_get_by_uuid,
|
||||||
mock_pecan_request):
|
mock_pecan_request):
|
||||||
|
|
||||||
obj = mock.MagicMock()
|
obj = mock.MagicMock()
|
||||||
obj.name = 'test_object'
|
obj.name = 'test_object'
|
||||||
obj.bay_uuid = 'bay_uuid'
|
obj.cluster_uuid = 'cluster_uuid'
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['swarm', 'mesos']
|
allowed_cluster_types = ['swarm', 'mesos']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, False, obj)
|
cluster_type, allowed_cluster_types, False, obj)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_enforce_bay_types_not_allowed(
|
def test_enforce_cluster_types_not_allowed(
|
||||||
self,
|
self,
|
||||||
mock_bay_get_by_uuid,
|
mock_cluster_get_by_uuid,
|
||||||
mock_pecan_request):
|
mock_pecan_request):
|
||||||
|
|
||||||
obj = mock.MagicMock()
|
obj = mock.MagicMock()
|
||||||
obj.name = 'test_object'
|
obj.name = 'test_object'
|
||||||
obj.bay_uuid = 'bay_uuid'
|
obj.cluster_uuid = 'cluster_uuid'
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['mesos']
|
allowed_cluster_types = ['mesos']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types,
|
cluster_type, allowed_cluster_types,
|
||||||
True, obj)
|
True, obj)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_enforce_bay_types_with_bay_uuid(self, mock_bay_get_by_uuid,
|
def test_enforce_cluster_types_with_cluster_uuid(self,
|
||||||
|
mock_cluster_get_by_uuid,
|
||||||
mock_pecan_request):
|
mock_pecan_request):
|
||||||
|
|
||||||
bay_ident = 'e74c40e0-d825-11e2-a28f-0800200c9a66'
|
cluster_ident = 'e74c40e0-d825-11e2-a28f-0800200c9a66'
|
||||||
|
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['swarm']
|
allowed_cluster_types = ['swarm']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, False,
|
cluster_type, allowed_cluster_types, False,
|
||||||
None, bay_ident)
|
None, cluster_ident)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_enforce_bay_types_with_bay_uuid_not_allowed(self,
|
def test_enforce_cluster_types_with_cluster_uuid_not_allowed(
|
||||||
mock_bay_get_by_uuid,
|
self, mock_cluster_get_by_uuid, mock_pecan_request):
|
||||||
mock_pecan_request):
|
|
||||||
|
|
||||||
bay_ident = 'e74c40e0-d825-11e2-a28f-0800200c9a66'
|
cluster_ident = 'e74c40e0-d825-11e2-a28f-0800200c9a66'
|
||||||
|
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['mesos']
|
allowed_cluster_types = ['mesos']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, True,
|
cluster_type, allowed_cluster_types, True,
|
||||||
None, bay_ident)
|
None, cluster_ident)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_name')
|
@mock.patch('magnum.objects.Cluster.get_by_name')
|
||||||
def test_enforce_bay_types_with_bay_name(self, mock_bay_get_by_uuid,
|
def test_enforce_cluster_types_with_cluster_name(
|
||||||
mock_pecan_request):
|
self, mock_cluster_get_by_uuid, mock_pecan_request):
|
||||||
|
|
||||||
bay_ident = 'bay_name'
|
cluster_ident = 'cluster_name'
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['swarm']
|
allowed_cluster_types = ['swarm']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, False,
|
cluster_type, allowed_cluster_types, False,
|
||||||
None, bay_ident)
|
None, cluster_ident)
|
||||||
|
|
||||||
@mock.patch('pecan.request')
|
@mock.patch('pecan.request')
|
||||||
@mock.patch('magnum.objects.Bay.get_by_name')
|
@mock.patch('magnum.objects.Cluster.get_by_name')
|
||||||
def test_enforce_bay_types_with_bay_name_not_allowed(self,
|
def test_enforce_cluster_types_with_cluster_name_not_allowed(
|
||||||
mock_bay_get_by_uuid,
|
self, mock_cluster_get_by_uuid, mock_pecan_request):
|
||||||
mock_pecan_request):
|
|
||||||
|
|
||||||
bay_ident = 'bay_name'
|
cluster_ident = 'cluster_name'
|
||||||
bay_type = 'swarm'
|
cluster_type = 'swarm'
|
||||||
allowed_bay_types = ['mesos']
|
allowed_cluster_types = ['mesos']
|
||||||
self._test_enforce_bay_types(
|
self._test_enforce_cluster_types(
|
||||||
mock_bay_get_by_uuid, mock_pecan_request,
|
mock_cluster_get_by_uuid, mock_pecan_request,
|
||||||
bay_type, allowed_bay_types, True,
|
cluster_type, allowed_cluster_types, True,
|
||||||
None, bay_ident)
|
None, cluster_ident)
|
||||||
|
|
||||||
def _test_enforce_network_driver_types_create(
|
def _test_enforce_network_driver_types_create(
|
||||||
self,
|
self,
|
||||||
@ -413,11 +413,11 @@ class TestValidation(base.BaseTestCase):
|
|||||||
volume_driver_type='cinder',
|
volume_driver_type='cinder',
|
||||||
op='remove')
|
op='remove')
|
||||||
|
|
||||||
def test_validate_bay_properties(self):
|
def test_validate_cluster_properties(self):
|
||||||
allowed_properties = v.bay_update_allowed_properties
|
allowed_properties = v.cluster_update_allowed_properties
|
||||||
for field in objects.Bay.fields:
|
for field in objects.Cluster.fields:
|
||||||
if field in allowed_properties:
|
if field in allowed_properties:
|
||||||
v.validate_bay_properties(set([field]))
|
v.validate_cluster_properties(set([field]))
|
||||||
else:
|
else:
|
||||||
self.assertRaises(exception.InvalidParameterValue,
|
self.assertRaises(exception.InvalidParameterValue,
|
||||||
v.validate_bay_properties, set([field]))
|
v.validate_cluster_properties, set([field]))
|
||||||
|
@ -42,19 +42,18 @@ def cluster_template_post_data(**kw):
|
|||||||
|
|
||||||
|
|
||||||
def bay_post_data(**kw):
|
def bay_post_data(**kw):
|
||||||
bay = utils.get_test_bay(**kw)
|
bay = utils.get_test_cluster(**kw)
|
||||||
|
bay['baymodel_id'] = kw.get('baymodel_id', bay['cluster_template_id'])
|
||||||
bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15)
|
bay['bay_create_timeout'] = kw.get('bay_create_timeout', 15)
|
||||||
|
del bay['cluster_template_id']
|
||||||
|
del bay['create_timeout']
|
||||||
internal = bay_controller.BayPatchType.internal_attrs()
|
internal = bay_controller.BayPatchType.internal_attrs()
|
||||||
return remove_internal(bay, internal)
|
return remove_internal(bay, internal)
|
||||||
|
|
||||||
|
|
||||||
def cluster_post_data(**kw):
|
def cluster_post_data(**kw):
|
||||||
cluster = utils.get_test_bay(**kw)
|
cluster = utils.get_test_cluster(**kw)
|
||||||
cluster['create_timeout'] = kw.get('create_timeout', 15)
|
cluster['create_timeout'] = kw.get('create_timeout', 15)
|
||||||
cluster['cluster_template_id'] = kw.get('cluster_template_id',
|
|
||||||
cluster['baymodel_id'])
|
|
||||||
del cluster['bay_create_timeout']
|
|
||||||
del cluster['baymodel_id']
|
|
||||||
internal = cluster_controller.ClusterPatchType.internal_attrs()
|
internal = cluster_controller.ClusterPatchType.internal_attrs()
|
||||||
return remove_internal(cluster, internal)
|
return remove_internal(cluster, internal)
|
||||||
|
|
||||||
|
@ -112,18 +112,18 @@ class KeystoneClientTest(base.TestCase):
|
|||||||
def test_delete_trust(self, mock_ks):
|
def test_delete_trust(self, mock_ks):
|
||||||
mock_ks.return_value.trusts.delete.return_value = None
|
mock_ks.return_value.trusts.delete.return_value = None
|
||||||
ks_client = keystone.KeystoneClientV3(self.ctx)
|
ks_client = keystone.KeystoneClientV3(self.ctx)
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
bay.trust_id = 'atrust123'
|
cluster.trust_id = 'atrust123'
|
||||||
self.assertIsNone(ks_client.delete_trust(self.ctx, bay))
|
self.assertIsNone(ks_client.delete_trust(self.ctx, cluster))
|
||||||
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
|
mock_ks.return_value.trusts.delete.assert_called_once_with('atrust123')
|
||||||
|
|
||||||
def test_delete_trust_not_found(self, mock_ks):
|
def test_delete_trust_not_found(self, mock_ks):
|
||||||
mock_delete = mock_ks.return_value.trusts.delete
|
mock_delete = mock_ks.return_value.trusts.delete
|
||||||
mock_delete.side_effect = kc_exception.NotFound()
|
mock_delete.side_effect = kc_exception.NotFound()
|
||||||
ks_client = keystone.KeystoneClientV3(self.ctx)
|
ks_client = keystone.KeystoneClientV3(self.ctx)
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
bay.trust_id = 'atrust123'
|
cluster.trust_id = 'atrust123'
|
||||||
self.assertIsNone(ks_client.delete_trust(self.ctx, bay))
|
self.assertIsNone(ks_client.delete_trust(self.ctx, cluster))
|
||||||
|
|
||||||
@mock.patch('keystoneauth1.session.Session')
|
@mock.patch('keystoneauth1.session.Session')
|
||||||
def test_create_trust_with_all_roles(self, mock_session, mock_ks):
|
def test_create_trust_with_all_roles(self, mock_session, mock_ks):
|
||||||
|
@ -35,8 +35,8 @@ class TrustManagerTestCase(base.BaseTestCase):
|
|||||||
def test_create_trustee_and_trust(self, mock_generate_password):
|
def test_create_trustee_and_trust(self, mock_generate_password):
|
||||||
mock_password = "password_mock"
|
mock_password = "password_mock"
|
||||||
mock_generate_password.return_value = mock_password
|
mock_generate_password.return_value = mock_password
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.uuid = 'mock_bay_uuid'
|
mock_cluster.uuid = 'mock_cluster_uuid'
|
||||||
mock_keystone = mock.MagicMock()
|
mock_keystone = mock.MagicMock()
|
||||||
mock_trustee = mock.MagicMock()
|
mock_trustee = mock.MagicMock()
|
||||||
mock_trustee.id = 'mock_trustee_id'
|
mock_trustee.id = 'mock_trustee_id'
|
||||||
@ -49,75 +49,75 @@ class TrustManagerTestCase(base.BaseTestCase):
|
|||||||
mock_keystone.create_trustee.return_value = mock_trustee
|
mock_keystone.create_trustee.return_value = mock_trustee
|
||||||
mock_keystone.create_trust.return_value = mock_trust
|
mock_keystone.create_trust.return_value = mock_trust
|
||||||
|
|
||||||
trust_manager.create_trustee_and_trust(self.osc, mock_bay)
|
trust_manager.create_trustee_and_trust(self.osc, mock_cluster)
|
||||||
|
|
||||||
mock_keystone.create_trustee.assert_called_once_with(
|
mock_keystone.create_trustee.assert_called_once_with(
|
||||||
mock_bay.uuid,
|
mock_cluster.uuid,
|
||||||
mock_password,
|
mock_password,
|
||||||
)
|
)
|
||||||
mock_keystone.create_trust.assert_called_once_with(
|
mock_keystone.create_trust.assert_called_once_with(
|
||||||
mock_trustee.id,
|
mock_trustee.id,
|
||||||
)
|
)
|
||||||
self.assertEqual(mock_trustee.name, mock_bay.trustee_username)
|
self.assertEqual(mock_trustee.name, mock_cluster.trustee_username)
|
||||||
self.assertEqual(mock_trustee.id, mock_bay.trustee_user_id)
|
self.assertEqual(mock_trustee.id, mock_cluster.trustee_user_id)
|
||||||
self.assertEqual(mock_password, mock_bay.trustee_password)
|
self.assertEqual(mock_password, mock_cluster.trustee_password)
|
||||||
self.assertEqual(mock_trust.id, mock_bay.trust_id)
|
self.assertEqual(mock_trust.id, mock_cluster.trust_id)
|
||||||
|
|
||||||
@patch('magnum.common.utils.generate_password')
|
@patch('magnum.common.utils.generate_password')
|
||||||
def test_create_trustee_and_trust_with_error(self, mock_generate_password):
|
def test_create_trustee_and_trust_with_error(self, mock_generate_password):
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_generate_password.side_effect = exception.MagnumException()
|
mock_generate_password.side_effect = exception.MagnumException()
|
||||||
|
|
||||||
self.assertRaises(exception.TrusteeOrTrustToBayFailed,
|
self.assertRaises(exception.TrusteeOrTrustToClusterFailed,
|
||||||
trust_manager.create_trustee_and_trust,
|
trust_manager.create_trustee_and_trust,
|
||||||
self.osc,
|
self.osc,
|
||||||
mock_bay)
|
mock_cluster)
|
||||||
|
|
||||||
def test_delete_trustee_and_trust(self):
|
def test_delete_trustee_and_trust(self):
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.trust_id = 'trust_id'
|
mock_cluster.trust_id = 'trust_id'
|
||||||
mock_bay.trustee_user_id = 'trustee_user_id'
|
mock_cluster.trustee_user_id = 'trustee_user_id'
|
||||||
mock_keystone = mock.MagicMock()
|
mock_keystone = mock.MagicMock()
|
||||||
self.osc.keystone.return_value = mock_keystone
|
self.osc.keystone.return_value = mock_keystone
|
||||||
context = mock.MagicMock()
|
context = mock.MagicMock()
|
||||||
|
|
||||||
trust_manager.delete_trustee_and_trust(self.osc, context,
|
trust_manager.delete_trustee_and_trust(self.osc, context,
|
||||||
mock_bay)
|
mock_cluster)
|
||||||
|
|
||||||
mock_keystone.delete_trust.assert_called_once_with(
|
mock_keystone.delete_trust.assert_called_once_with(
|
||||||
context, mock_bay
|
context, mock_cluster
|
||||||
)
|
)
|
||||||
mock_keystone.delete_trustee.assert_called_once_with(
|
mock_keystone.delete_trustee.assert_called_once_with(
|
||||||
mock_bay.trustee_user_id,
|
mock_cluster.trustee_user_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_delete_trustee_and_trust_without_trust_id(self):
|
def test_delete_trustee_and_trust_without_trust_id(self):
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.trust_id = None
|
mock_cluster.trust_id = None
|
||||||
mock_bay.trustee_user_id = 'trustee_user_id'
|
mock_cluster.trustee_user_id = 'trustee_user_id'
|
||||||
mock_keystone = mock.MagicMock()
|
mock_keystone = mock.MagicMock()
|
||||||
self.osc.keystone.return_value = mock_keystone
|
self.osc.keystone.return_value = mock_keystone
|
||||||
context = mock.MagicMock()
|
context = mock.MagicMock()
|
||||||
|
|
||||||
trust_manager.delete_trustee_and_trust(self.osc, context,
|
trust_manager.delete_trustee_and_trust(self.osc, context,
|
||||||
mock_bay)
|
mock_cluster)
|
||||||
|
|
||||||
self.assertEqual(0, mock_keystone.delete_trust.call_count)
|
self.assertEqual(0, mock_keystone.delete_trust.call_count)
|
||||||
mock_keystone.delete_trustee.assert_called_once_with(
|
mock_keystone.delete_trustee.assert_called_once_with(
|
||||||
mock_bay.trustee_user_id,
|
mock_cluster.trustee_user_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_delete_trustee_and_trust_without_trustee_user_id(self):
|
def test_delete_trustee_and_trust_without_trustee_user_id(self):
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.trust_id = 'trust_id'
|
mock_cluster.trust_id = 'trust_id'
|
||||||
mock_bay.trustee_user_id = None
|
mock_cluster.trustee_user_id = None
|
||||||
mock_keystone = mock.MagicMock()
|
mock_keystone = mock.MagicMock()
|
||||||
self.osc.keystone.return_value = mock_keystone
|
self.osc.keystone.return_value = mock_keystone
|
||||||
context = mock.MagicMock()
|
context = mock.MagicMock()
|
||||||
|
|
||||||
trust_manager.delete_trustee_and_trust(self.osc, context, mock_bay)
|
trust_manager.delete_trustee_and_trust(self.osc, context, mock_cluster)
|
||||||
|
|
||||||
mock_keystone.delete_trust.assert_called_once_with(
|
mock_keystone.delete_trust.assert_called_once_with(
|
||||||
context, mock_bay
|
context, mock_cluster
|
||||||
)
|
)
|
||||||
self.assertEqual(0, mock_keystone.delete_trustee.call_count)
|
self.assertEqual(0, mock_keystone.delete_trustee.call_count)
|
||||||
|
@ -24,9 +24,9 @@ from oslo_service import loopingcall
|
|||||||
from pycadf import cadftaxonomy as taxonomy
|
from pycadf import cadftaxonomy as taxonomy
|
||||||
|
|
||||||
from magnum.common import exception
|
from magnum.common import exception
|
||||||
from magnum.conductor.handlers import bay_conductor
|
from magnum.conductor.handlers import cluster_conductor
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects.fields import ClusterStatus as cluster_status
|
||||||
from magnum.tests import base
|
from magnum.tests import base
|
||||||
from magnum.tests import fake_notifier
|
from magnum.tests import fake_notifier
|
||||||
from magnum.tests.unit.db import base as db_base
|
from magnum.tests.unit.db import base as db_base
|
||||||
@ -37,92 +37,95 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestHandler, self).setUp()
|
super(TestHandler, self).setUp()
|
||||||
self.handler = bay_conductor.Handler()
|
self.handler = cluster_conductor.Handler()
|
||||||
cluster_template_dict = utils.get_test_cluster_template()
|
cluster_template_dict = utils.get_test_cluster_template()
|
||||||
self.cluster_template = objects.ClusterTemplate(
|
self.cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **cluster_template_dict)
|
self.context, **cluster_template_dict)
|
||||||
self.cluster_template.create()
|
self.cluster_template.create()
|
||||||
bay_dict = utils.get_test_bay(node_count=1)
|
cluster_dict = utils.get_test_cluster(node_count=1)
|
||||||
self.bay = objects.Bay(self.context, **bay_dict)
|
self.cluster = objects.Cluster(self.context, **cluster_dict)
|
||||||
self.bay.create()
|
self.cluster.create()
|
||||||
|
|
||||||
@patch('magnum.conductor.scale_manager.ScaleManager')
|
@patch('magnum.conductor.scale_manager.ScaleManager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.Handler._poll_and_check')
|
@patch(
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._update_stack')
|
'magnum.conductor.handlers.cluster_conductor.Handler._poll_and_check')
|
||||||
|
@patch('magnum.conductor.handlers.cluster_conductor._update_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_update_node_count_success(
|
def test_update_node_count_success(
|
||||||
self, mock_openstack_client_class,
|
self, mock_openstack_client_class,
|
||||||
mock_update_stack, mock_poll_and_check,
|
mock_update_stack, mock_poll_and_check,
|
||||||
mock_scale_manager):
|
mock_scale_manager):
|
||||||
def side_effect(*args, **kwargs):
|
def side_effect(*args, **kwargs):
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.bay.save()
|
self.cluster.save()
|
||||||
mock_poll_and_check.side_effect = side_effect
|
mock_poll_and_check.side_effect = side_effect
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
||||||
mock_openstack_client = mock_openstack_client_class.return_value
|
mock_openstack_client = mock_openstack_client_class.return_value
|
||||||
mock_openstack_client.heat.return_value = mock_heat_client
|
mock_openstack_client.heat.return_value = mock_heat_client
|
||||||
|
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.handler.bay_update(self.context, self.bay)
|
self.handler.cluster_update(self.context, self.cluster)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.update', notifications[0].event_type)
|
'magnum.cluster.update', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
mock_update_stack.assert_called_once_with(
|
mock_update_stack.assert_called_once_with(
|
||||||
self.context, mock_openstack_client, self.bay,
|
self.context, mock_openstack_client, self.cluster,
|
||||||
mock_scale_manager.return_value, False)
|
mock_scale_manager.return_value, False)
|
||||||
bay = objects.Bay.get(self.context, self.bay.uuid)
|
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.Handler._poll_and_check')
|
@patch(
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._update_stack')
|
'magnum.conductor.handlers.cluster_conductor.Handler._poll_and_check')
|
||||||
|
@patch('magnum.conductor.handlers.cluster_conductor._update_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_update_node_count_failure(
|
def test_update_node_count_failure(
|
||||||
self, mock_openstack_client_class,
|
self, mock_openstack_client_class,
|
||||||
mock_update_stack, mock_poll_and_check):
|
mock_update_stack, mock_poll_and_check):
|
||||||
def side_effect(*args, **kwargs):
|
def side_effect(*args, **kwargs):
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.bay.save()
|
self.cluster.save()
|
||||||
mock_poll_and_check.side_effect = side_effect
|
mock_poll_and_check.side_effect = side_effect
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
||||||
mock_openstack_client = mock_openstack_client_class.return_value
|
mock_openstack_client = mock_openstack_client_class.return_value
|
||||||
mock_openstack_client.heat.return_value = mock_heat_client
|
mock_openstack_client.heat.return_value = mock_heat_client
|
||||||
|
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.assertRaises(exception.NotSupported, self.handler.bay_update,
|
self.assertRaises(exception.NotSupported, self.handler.cluster_update,
|
||||||
self.context, self.bay)
|
self.context, self.cluster)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.update', notifications[0].event_type)
|
'magnum.cluster.update', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
bay = objects.Bay.get(self.context, self.bay.uuid)
|
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
|
|
||||||
@patch('magnum.conductor.scale_manager.ScaleManager')
|
@patch('magnum.conductor.scale_manager.ScaleManager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.Handler._poll_and_check')
|
@patch(
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._update_stack')
|
'magnum.conductor.handlers.cluster_conductor.Handler._poll_and_check')
|
||||||
|
@patch('magnum.conductor.handlers.cluster_conductor._update_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def _test_update_bay_status_complete(
|
def _test_update_cluster_status_complete(
|
||||||
self, expect_status, mock_openstack_client_class,
|
self, expect_status, mock_openstack_client_class,
|
||||||
mock_update_stack, mock_poll_and_check,
|
mock_update_stack, mock_poll_and_check,
|
||||||
mock_scale_manager):
|
mock_scale_manager):
|
||||||
def side_effect(*args, **kwargs):
|
def side_effect(*args, **kwargs):
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.bay.save()
|
self.cluster.save()
|
||||||
mock_poll_and_check.side_effect = side_effect
|
mock_poll_and_check.side_effect = side_effect
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
mock_heat_stack.stack_status = expect_status
|
mock_heat_stack.stack_status = expect_status
|
||||||
@ -131,47 +134,54 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
mock_openstack_client = mock_openstack_client_class.return_value
|
mock_openstack_client = mock_openstack_client_class.return_value
|
||||||
mock_openstack_client.heat.return_value = mock_heat_client
|
mock_openstack_client.heat.return_value = mock_heat_client
|
||||||
|
|
||||||
self.bay.node_count = 2
|
self.cluster.node_count = 2
|
||||||
self.handler.bay_update(self.context, self.bay)
|
self.handler.cluster_update(self.context, self.cluster)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.update', notifications[0].event_type)
|
'magnum.cluster.update', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
mock_update_stack.assert_called_once_with(
|
mock_update_stack.assert_called_once_with(
|
||||||
self.context, mock_openstack_client, self.bay,
|
self.context, mock_openstack_client, self.cluster,
|
||||||
mock_scale_manager.return_value, False)
|
mock_scale_manager.return_value, False)
|
||||||
bay = objects.Bay.get(self.context, self.bay.uuid)
|
cluster = objects.Cluster.get(self.context, self.cluster.uuid)
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
||||||
|
|
||||||
def test_update_bay_status_update_compelete(self):
|
def test_update_cluster_status_update_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.UPDATE_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.UPDATE_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_resume_compelete(self):
|
def test_update_cluster_status_resume_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.RESUME_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.RESUME_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_restore_compelete(self):
|
def test_update_cluster_status_restore_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.RESTORE_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.RESTORE_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_rollback_compelete(self):
|
def test_update_cluster_status_rollback_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.ROLLBACK_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.ROLLBACK_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_snapshot_compelete(self):
|
def test_update_cluster_status_snapshot_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.SNAPSHOT_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.SNAPSHOT_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_check_compelete(self):
|
def test_update_cluster_status_check_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.CHECK_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.CHECK_COMPLETE)
|
||||||
|
|
||||||
def test_update_bay_status_adopt_compelete(self):
|
def test_update_cluster_status_adopt_compelete(self):
|
||||||
self._test_update_bay_status_complete(bay_status.ADOPT_COMPLETE)
|
self._test_update_cluster_status_complete(
|
||||||
|
cluster_status.ADOPT_COMPLETE)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.HeatPoller')
|
@patch('magnum.conductor.handlers.cluster_conductor.HeatPoller')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._create_stack')
|
@patch('magnum.conductor.handlers.cluster_conductor._create_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create(self, mock_openstack_client_class,
|
def test_create(self, mock_openstack_client_class,
|
||||||
mock_create_stack, mock_cm, mock_trust_manager,
|
mock_create_stack, mock_cm, mock_trust_manager,
|
||||||
@ -183,39 +193,40 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
osc = mock.sentinel.osc
|
osc = mock.sentinel.osc
|
||||||
mock_openstack_client_class.return_value = osc
|
mock_openstack_client_class.return_value = osc
|
||||||
|
|
||||||
def create_stack_side_effect(context, osc, bay, timeout):
|
def create_stack_side_effect(context, osc, cluster, timeout):
|
||||||
return {'stack': {'id': 'stack-id'}}
|
return {'stack': {'id': 'stack-id'}}
|
||||||
|
|
||||||
mock_create_stack.side_effect = create_stack_side_effect
|
mock_create_stack.side_effect = create_stack_side_effect
|
||||||
|
|
||||||
# FixMe(eliqiao): bay_create will call bay.create() again, this so bad
|
# FixMe(eliqiao): cluster_create will call cluster.create()
|
||||||
# because we have already called it in setUp since other test case will
|
# again, this so bad because we have already called it in setUp
|
||||||
# share the codes in setUp()
|
# since other test case will share the codes in setUp()
|
||||||
# But in self.handler.bay_create, we update bay.uuid and bay.stack_id
|
# But in self.handler.cluster_create, we update cluster.uuid and
|
||||||
# so bay.create will create a new recored with baymodel_id None,
|
# cluster.stack_id so cluster.create will create a new recored with
|
||||||
# this is bad because we load BayModel object in Bay object by
|
# clustermodel_id None, this is bad because we load clusterModel
|
||||||
# baymodel_id. Here update self.bay.baymodel_id so bay.obj_get_changes
|
# object in cluster object by clustermodel_id. Here update
|
||||||
# will get notice that baymodel_id is updated and will update it
|
# self.cluster.clustermodel_id so cluster.obj_get_changes will get
|
||||||
|
# notice that clustermodel_id is updated and will update it
|
||||||
# in db.
|
# in db.
|
||||||
self.bay.baymodel_id = self.cluster_template.uuid
|
self.cluster.cluster_template_id = self.cluster_template.uuid
|
||||||
bay = self.handler.bay_create(self.context,
|
cluster = self.handler.cluster_create(self.context,
|
||||||
self.bay, timeout)
|
self.cluster, timeout)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
mock_create_stack.assert_called_once_with(self.context,
|
mock_create_stack.assert_called_once_with(self.context,
|
||||||
mock.sentinel.osc,
|
mock.sentinel.osc,
|
||||||
self.bay, timeout)
|
self.cluster, timeout)
|
||||||
mock_cm.generate_certificates_to_cluster.assert_called_once_with(
|
mock_cm.generate_certificates_to_cluster.assert_called_once_with(
|
||||||
self.bay, context=self.context)
|
self.cluster, context=self.context)
|
||||||
self.assertEqual(bay_status.CREATE_IN_PROGRESS, bay.status)
|
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
|
||||||
mock_trust_manager.create_trustee_and_trust.assert_called_once_with(
|
mock_trust_manager.create_trustee_and_trust.assert_called_once_with(
|
||||||
osc, self.bay)
|
osc, self.cluster)
|
||||||
|
|
||||||
def _test_create_failed(self,
|
def _test_create_failed(self,
|
||||||
mock_openstack_client_class,
|
mock_openstack_client_class,
|
||||||
@ -230,29 +241,29 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
expected_exception,
|
expected_exception,
|
||||||
self.handler.bay_create,
|
self.handler.cluster_create,
|
||||||
self.context,
|
self.context,
|
||||||
self.bay, timeout
|
self.cluster, timeout
|
||||||
)
|
)
|
||||||
|
|
||||||
gctb = mock_cert_manager.generate_certificates_to_cluster
|
gctb = mock_cert_manager.generate_certificates_to_cluster
|
||||||
if is_create_cert_called:
|
if is_create_cert_called:
|
||||||
gctb.assert_called_once_with(self.bay, context=self.context)
|
gctb.assert_called_once_with(self.cluster, context=self.context)
|
||||||
else:
|
else:
|
||||||
gctb.assert_not_called()
|
gctb.assert_not_called()
|
||||||
ctat = mock_trust_manager.create_trustee_and_trust
|
ctat = mock_trust_manager.create_trustee_and_trust
|
||||||
if is_create_trust_called:
|
if is_create_trust_called:
|
||||||
ctat.assert_called_once_with(osc, self.bay)
|
ctat.assert_called_once_with(osc, self.cluster)
|
||||||
else:
|
else:
|
||||||
ctat.assert_not_called()
|
ctat.assert_not_called()
|
||||||
|
|
||||||
mock_cert_manager.delete_certificates_from_cluster(self.bay)
|
mock_cert_manager.delete_certificates_from_cluster(self.cluster)
|
||||||
mock_trust_manager.delete_trustee_and_trust.assert_called_once_with(
|
mock_trust_manager.delete_trustee_and_trust.assert_called_once_with(
|
||||||
osc, self.context, self.bay)
|
osc, self.context, self.cluster)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._create_stack')
|
@patch('magnum.conductor.handlers.cluster_conductor._create_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create_handles_bad_request(self, mock_openstack_client_class,
|
def test_create_handles_bad_request(self, mock_openstack_client_class,
|
||||||
mock_create_stack,
|
mock_create_stack,
|
||||||
@ -270,16 +281,16 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(2, len(notifications))
|
self.assertEqual(2, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[1].event_type)
|
'magnum.cluster.create', notifications[1].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create_with_cert_failed(self, mock_openstack_client_class,
|
def test_create_with_cert_failed(self, mock_openstack_client_class,
|
||||||
mock_cert_manager,
|
mock_cert_manager,
|
||||||
@ -297,39 +308,39 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._create_stack')
|
@patch('magnum.conductor.handlers.cluster_conductor._create_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create_with_trust_failed(self, mock_openstack_client_class,
|
def test_create_with_trust_failed(self, mock_openstack_client_class,
|
||||||
mock_create_stack,
|
mock_create_stack,
|
||||||
mock_cert_manager,
|
mock_cert_manager,
|
||||||
mock_trust_manager):
|
mock_trust_manager):
|
||||||
e = exception.TrusteeOrTrustToBayFailed(bay_uuid='uuid')
|
e = exception.TrusteeOrTrustToClusterFailed(cluster_uuid='uuid')
|
||||||
mock_trust_manager.create_trustee_and_trust.side_effect = e
|
mock_trust_manager.create_trustee_and_trust.side_effect = e
|
||||||
|
|
||||||
self._test_create_failed(
|
self._test_create_failed(
|
||||||
mock_openstack_client_class,
|
mock_openstack_client_class,
|
||||||
mock_cert_manager,
|
mock_cert_manager,
|
||||||
mock_trust_manager,
|
mock_trust_manager,
|
||||||
exception.TrusteeOrTrustToBayFailed,
|
exception.TrusteeOrTrustToClusterFailed,
|
||||||
False
|
False
|
||||||
)
|
)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(1, len(notifications))
|
self.assertEqual(1, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[0].payload['outcome'])
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor._create_stack')
|
@patch('magnum.conductor.handlers.cluster_conductor._create_stack')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create_with_invalid_unicode_name(self,
|
def test_create_with_invalid_unicode_name(self,
|
||||||
mock_openstack_client_class,
|
mock_openstack_client_class,
|
||||||
@ -351,23 +362,23 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(2, len(notifications))
|
self.assertEqual(2, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[1].event_type)
|
'magnum.cluster.create', notifications[1].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.HeatPoller')
|
@patch('magnum.conductor.handlers.cluster_conductor.HeatPoller')
|
||||||
@patch('heatclient.common.template_utils'
|
@patch('heatclient.common.template_utils'
|
||||||
'.process_multiple_environments_and_files')
|
'.process_multiple_environments_and_files')
|
||||||
@patch('heatclient.common.template_utils.get_template_contents')
|
@patch('heatclient.common.template_utils.get_template_contents')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor'
|
@patch('magnum.conductor.handlers.cluster_conductor'
|
||||||
'._extract_template_definition')
|
'._extract_template_definition')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.short_id')
|
@patch('magnum.conductor.handlers.cluster_conductor.short_id')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_create_with_environment(self,
|
def test_create_with_environment(self,
|
||||||
mock_openstack_client_class,
|
mock_openstack_client_class,
|
||||||
@ -379,8 +390,8 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
mock_process_mult,
|
mock_process_mult,
|
||||||
mock_heat_poller_class):
|
mock_heat_poller_class):
|
||||||
timeout = 15
|
timeout = 15
|
||||||
self.bay.baymodel_id = self.cluster_template.uuid
|
self.cluster.cluster_template_id = self.cluster_template.uuid
|
||||||
bay_name = self.bay.name
|
cluster_name = self.cluster.name
|
||||||
mock_short_id.generate_id.return_value = 'short_id'
|
mock_short_id.generate_id.return_value = 'short_id'
|
||||||
mock_poller = mock.MagicMock()
|
mock_poller = mock.MagicMock()
|
||||||
mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone()
|
mock_poller.poll_and_check.return_value = loopingcall.LoopingCallDone()
|
||||||
@ -412,9 +423,10 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
osc.heat.return_value = mock_hc
|
osc.heat.return_value = mock_hc
|
||||||
mock_openstack_client_class.return_value = osc
|
mock_openstack_client_class.return_value = osc
|
||||||
|
|
||||||
self.handler.bay_create(self.context, self.bay, timeout)
|
self.handler.cluster_create(self.context, self.cluster, timeout)
|
||||||
|
|
||||||
mock_extract_tmpl_def.assert_called_once_with(self.context, self.bay)
|
mock_extract_tmpl_def.assert_called_once_with(self.context,
|
||||||
|
self.cluster)
|
||||||
mock_get_template_contents.assert_called_once_with(
|
mock_get_template_contents.assert_called_once_with(
|
||||||
'the/template/path.yaml')
|
'the/template/path.yaml')
|
||||||
mock_process_mult.assert_called_once_with(
|
mock_process_mult.assert_called_once_with(
|
||||||
@ -432,54 +444,54 @@ class TestHandler(db_base.DbTestCase):
|
|||||||
'content of file:///the/template/env_file_2'
|
'content of file:///the/template/env_file_2'
|
||||||
},
|
},
|
||||||
parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'},
|
parameters={'heat_param_1': 'foo', 'heat_param_2': 'bar'},
|
||||||
stack_name=('%s-short_id' % bay_name),
|
stack_name=('%s-short_id' % cluster_name),
|
||||||
template='some template yaml',
|
template='some template yaml',
|
||||||
timeout_mins=timeout)
|
timeout_mins=timeout)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_bay_delete(self, mock_openstack_client_class, cert_manager):
|
def test_cluster_delete(self, mock_openstack_client_class, cert_manager):
|
||||||
osc = mock.MagicMock()
|
osc = mock.MagicMock()
|
||||||
mock_openstack_client_class.return_value = osc
|
mock_openstack_client_class.return_value = osc
|
||||||
osc.heat.side_effect = exc.HTTPNotFound
|
osc.heat.side_effect = exc.HTTPNotFound
|
||||||
self.handler.bay_delete(self.context, self.bay.uuid)
|
self.handler.cluster_delete(self.context, self.cluster.uuid)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(2, len(notifications))
|
self.assertEqual(2, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[0].event_type)
|
'magnum.cluster.delete', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[1].event_type)
|
'magnum.cluster.delete', notifications[1].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome'])
|
taxonomy.OUTCOME_SUCCESS, notifications[1].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
1, cert_manager.delete_certificates_from_cluster.call_count)
|
1, cert_manager.delete_certificates_from_cluster.call_count)
|
||||||
# The cluster has been destroyed
|
# The cluster has been destroyed
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
objects.Bay.get, self.context, self.bay.uuid)
|
objects.Cluster.get, self.context, self.cluster.uuid)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
@patch('magnum.common.clients.OpenStackClients')
|
@patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_bay_delete_conflict(self, mock_openstack_client_class,
|
def test_cluster_delete_conflict(self, mock_openstack_client_class,
|
||||||
cert_manager):
|
cert_manager):
|
||||||
osc = mock.MagicMock()
|
osc = mock.MagicMock()
|
||||||
mock_openstack_client_class.return_value = osc
|
mock_openstack_client_class.return_value = osc
|
||||||
osc.heat.side_effect = exc.HTTPConflict
|
osc.heat.side_effect = exc.HTTPConflict
|
||||||
self.assertRaises(exception.OperationInProgress,
|
self.assertRaises(exception.OperationInProgress,
|
||||||
self.handler.bay_delete,
|
self.handler.cluster_delete,
|
||||||
self.context,
|
self.context,
|
||||||
self.bay.uuid)
|
self.cluster.uuid)
|
||||||
|
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(2, len(notifications))
|
self.assertEqual(2, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[0].event_type)
|
'magnum.cluster.delete', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_PENDING, notifications[0].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[1].event_type)
|
'magnum.cluster.delete', notifications[1].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
@ -495,7 +507,7 @@ class TestHeatPoller(base.TestCase):
|
|||||||
mock_retrieve_cluster_template):
|
mock_retrieve_cluster_template):
|
||||||
cfg.CONF.cluster_heat.max_attempts = 10
|
cfg.CONF.cluster_heat.max_attempts = 10
|
||||||
|
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
cluster_template_dict = utils.get_test_cluster_template(
|
cluster_template_dict = utils.get_test_cluster_template(
|
||||||
coe='kubernetes')
|
coe='kubernetes')
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
@ -505,183 +517,183 @@ class TestHeatPoller(base.TestCase):
|
|||||||
cluster_template = objects.ClusterTemplate(self.context,
|
cluster_template = objects.ClusterTemplate(self.context,
|
||||||
**cluster_template_dict)
|
**cluster_template_dict)
|
||||||
mock_retrieve_cluster_template.return_value = cluster_template
|
mock_retrieve_cluster_template.return_value = cluster_template
|
||||||
poller = bay_conductor.HeatPoller(mock_openstack_client, bay)
|
poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster)
|
||||||
poller.get_version_info = mock.MagicMock()
|
poller.get_version_info = mock.MagicMock()
|
||||||
return (mock_heat_stack, bay, poller)
|
return (mock_heat_stack, cluster, poller)
|
||||||
|
|
||||||
def test_poll_and_check_send_notification(self):
|
def test_poll_and_check_send_notification(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_FAILED
|
mock_heat_stack.stack_status = cluster_status.DELETE_FAILED
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(6, poller.attempts)
|
self.assertEqual(6, poller.attempts)
|
||||||
notifications = fake_notifier.NOTIFICATIONS
|
notifications = fake_notifier.NOTIFICATIONS
|
||||||
self.assertEqual(6, len(notifications))
|
self.assertEqual(6, len(notifications))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[0].event_type)
|
'magnum.cluster.create', notifications[0].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_SUCCESS, notifications[0].payload['outcome'])
|
taxonomy.OUTCOME_SUCCESS, notifications[0].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.create', notifications[1].event_type)
|
'magnum.cluster.create', notifications[1].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[1].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[2].event_type)
|
'magnum.cluster.delete', notifications[2].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_SUCCESS, notifications[2].payload['outcome'])
|
taxonomy.OUTCOME_SUCCESS, notifications[2].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.delete', notifications[3].event_type)
|
'magnum.cluster.delete', notifications[3].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[3].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[3].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.update', notifications[4].event_type)
|
'magnum.cluster.update', notifications[4].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_SUCCESS, notifications[4].payload['outcome'])
|
taxonomy.OUTCOME_SUCCESS, notifications[4].payload['outcome'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
'magnum.bay.update', notifications[5].event_type)
|
'magnum.cluster.update', notifications[5].event_type)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
taxonomy.OUTCOME_FAILURE, notifications[5].payload['outcome'])
|
taxonomy.OUTCOME_FAILURE, notifications[5].payload['outcome'])
|
||||||
|
|
||||||
def test_poll_no_save(self):
|
def test_poll_no_save(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
bay.status = bay_status.CREATE_IN_PROGRESS
|
cluster.status = cluster_status.CREATE_IN_PROGRESS
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
self.assertEqual(0, bay.save.call_count)
|
self.assertEqual(0, cluster.save.call_count)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_save(self):
|
def test_poll_save(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
bay.status = bay_status.CREATE_IN_PROGRESS
|
cluster.status = cluster_status.CREATE_IN_PROGRESS
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
mock_heat_stack.stack_status_reason = 'Create failed'
|
mock_heat_stack.stack_status_reason = 'Create failed'
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.save.call_count)
|
self.assertEqual(2, cluster.save.call_count)
|
||||||
self.assertEqual(bay_status.CREATE_FAILED, bay.status)
|
self.assertEqual(cluster_status.CREATE_FAILED, cluster.status)
|
||||||
self.assertEqual('Create failed', bay.status_reason)
|
self.assertEqual('Create failed', cluster.status_reason)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_done(self):
|
def test_poll_done(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
self.assertEqual(2, poller.attempts)
|
self.assertEqual(2, poller.attempts)
|
||||||
|
|
||||||
def test_poll_done_by_update(self):
|
def test_poll_done_by_update(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 2}
|
mock_heat_stack.parameters = {'number_of_minions': 2}
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(1, bay.save.call_count)
|
self.assertEqual(1, cluster.save.call_count)
|
||||||
self.assertEqual(bay_status.UPDATE_COMPLETE, bay.status)
|
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_done_by_update_failed(self):
|
def test_poll_done_by_update_failed(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.UPDATE_FAILED
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 2}
|
mock_heat_stack.parameters = {'number_of_minions': 2}
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.save.call_count)
|
self.assertEqual(2, cluster.save.call_count)
|
||||||
self.assertEqual(bay_status.UPDATE_FAILED, bay.status)
|
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_done_by_rollback_complete(self):
|
def test_poll_done_by_rollback_complete(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.ROLLBACK_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.ROLLBACK_COMPLETE
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 1}
|
mock_heat_stack.parameters = {'number_of_minions': 1}
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.save.call_count)
|
self.assertEqual(2, cluster.save.call_count)
|
||||||
self.assertEqual(bay_status.ROLLBACK_COMPLETE, bay.status)
|
self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status)
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_done_by_rollback_failed(self):
|
def test_poll_done_by_rollback_failed(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.ROLLBACK_FAILED
|
mock_heat_stack.stack_status = cluster_status.ROLLBACK_FAILED
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 1}
|
mock_heat_stack.parameters = {'number_of_minions': 1}
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.save.call_count)
|
self.assertEqual(2, cluster.save.call_count)
|
||||||
self.assertEqual(bay_status.ROLLBACK_FAILED, bay.status)
|
self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status)
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
self.assertEqual(1, poller.attempts)
|
self.assertEqual(1, poller.attempts)
|
||||||
|
|
||||||
def test_poll_destroy(self):
|
def test_poll_destroy(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_FAILED
|
mock_heat_stack.stack_status = cluster_status.DELETE_FAILED
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
# Destroy method is not called when stack delete failed
|
# Destroy method is not called when stack delete failed
|
||||||
self.assertEqual(0, bay.destroy.call_count)
|
self.assertEqual(0, cluster.destroy.call_count)
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
self.assertEqual(0, bay.destroy.call_count)
|
self.assertEqual(0, cluster.destroy.call_count)
|
||||||
self.assertEqual(bay_status.DELETE_IN_PROGRESS, bay.status)
|
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.DELETE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
# The bay status should still be DELETE_IN_PROGRESS, because
|
# The cluster status should still be DELETE_IN_PROGRESS, because
|
||||||
# the destroy() method may be failed. If success, this bay record
|
# the destroy() method may be failed. If success, this cluster record
|
||||||
# will delete directly, change status is meaningless.
|
# will delete directly, change status is meaningless.
|
||||||
self.assertEqual(bay_status.DELETE_IN_PROGRESS, bay.status)
|
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
|
||||||
self.assertEqual(1, bay.destroy.call_count)
|
self.assertEqual(1, cluster.destroy.call_count)
|
||||||
|
|
||||||
def test_poll_delete_in_progress_timeout_set(self):
|
def test_poll_delete_in_progress_timeout_set(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
|
||||||
mock_heat_stack.timeout_mins = 60
|
mock_heat_stack.timeout_mins = 60
|
||||||
# timeout only affects stack creation so expecting this
|
# timeout only affects stack creation so expecting this
|
||||||
# to process normally
|
# to process normally
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
def test_poll_delete_in_progress_max_attempts_reached(self):
|
def test_poll_delete_in_progress_max_attempts_reached(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.DELETE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.DELETE_IN_PROGRESS
|
||||||
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_reached_no_timeout(self):
|
def test_poll_create_in_prog_max_att_reached_no_timeout(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
||||||
mock_heat_stack.timeout_mins = None
|
mock_heat_stack.timeout_mins = None
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_reached_timeout_set(self):
|
def test_poll_create_in_prog_max_att_reached_timeout_set(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
||||||
mock_heat_stack.timeout_mins = 60
|
mock_heat_stack.timeout_mins = 60
|
||||||
# since the timeout is set the max attempts gets ignored since
|
# since the timeout is set the max attempts gets ignored since
|
||||||
@ -690,83 +702,83 @@ class TestHeatPoller(base.TestCase):
|
|||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_reached_timed_out(self):
|
def test_poll_create_in_prog_max_att_reached_timed_out(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
poller.attempts = cfg.CONF.cluster_heat.max_attempts
|
||||||
mock_heat_stack.timeout_mins = 60
|
mock_heat_stack.timeout_mins = 60
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_not_reached_no_timeout(self):
|
def test_poll_create_in_prog_max_att_not_reached_no_timeout(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
mock_heat_stack.timeout.mins = None
|
mock_heat_stack.timeout.mins = None
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_not_reached_timeout_set(self):
|
def test_poll_create_in_prog_max_att_not_reached_timeout_set(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
mock_heat_stack.timeout_mins = 60
|
mock_heat_stack.timeout_mins = 60
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
def test_poll_create_in_prog_max_att_not_reached_timed_out(self):
|
def test_poll_create_in_prog_max_att_not_reached_timed_out(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_FAILED
|
mock_heat_stack.stack_status = cluster_status.CREATE_FAILED
|
||||||
mock_heat_stack.timeout_mins = 60
|
mock_heat_stack.timeout_mins = 60
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
def test_poll_node_count(self):
|
def test_poll_node_count(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 1}
|
mock_heat_stack.parameters = {'number_of_minions': 1}
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
|
|
||||||
def test_poll_node_count_by_update(self):
|
def test_poll_node_count_by_update(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_minions': 2}
|
mock_heat_stack.parameters = {'number_of_minions': 2}
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.trust_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.trust_manager')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.cert_manager')
|
@patch('magnum.conductor.handlers.cluster_conductor.cert_manager')
|
||||||
def test_delete_complete(self, cert_manager, trust_manager):
|
def test_delete_complete(self, cert_manager, trust_manager):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
poller._delete_complete()
|
poller._delete_complete()
|
||||||
self.assertEqual(1, bay.destroy.call_count)
|
self.assertEqual(1, cluster.destroy.call_count)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
1, cert_manager.delete_certificates_from_cluster.call_count)
|
1, cert_manager.delete_certificates_from_cluster.call_count)
|
||||||
self.assertEqual(1,
|
self.assertEqual(1,
|
||||||
trust_manager.delete_trustee_and_trust.call_count)
|
trust_manager.delete_trustee_and_trust.call_count)
|
||||||
|
|
||||||
def test_create_or_complete(self):
|
def test_create_or_complete(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.CREATE_COMPLETE
|
||||||
mock_heat_stack.stack_status_reason = 'stack complete'
|
mock_heat_stack.stack_status_reason = 'stack complete'
|
||||||
poller._sync_bay_and_template_status(mock_heat_stack)
|
poller._sync_cluster_and_template_status(mock_heat_stack)
|
||||||
self.assertEqual('stack complete', bay.status_reason)
|
self.assertEqual('stack complete', cluster.status_reason)
|
||||||
self.assertEqual(bay_status.CREATE_COMPLETE, bay.status)
|
self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status)
|
||||||
self.assertEqual(1, bay.save.call_count)
|
self.assertEqual(1, cluster.save.call_count)
|
||||||
|
|
||||||
def test_sync_bay_status(self):
|
def test_sync_cluster_status(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
mock_heat_stack.stack_status_reason = 'stack incomplete'
|
mock_heat_stack.stack_status_reason = 'stack incomplete'
|
||||||
poller._sync_bay_status(mock_heat_stack)
|
poller._sync_cluster_status(mock_heat_stack)
|
||||||
self.assertEqual('stack incomplete', bay.status_reason)
|
self.assertEqual('stack incomplete', cluster.status_reason)
|
||||||
self.assertEqual(bay_status.CREATE_IN_PROGRESS, bay.status)
|
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
|
||||||
|
|
||||||
@patch('magnum.conductor.handlers.bay_conductor.LOG')
|
@patch('magnum.conductor.handlers.cluster_conductor.LOG')
|
||||||
def test_bay_failed(self, logger):
|
def test_cluster_failed(self, logger):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
poller._sync_bay_and_template_status(mock_heat_stack)
|
poller._sync_cluster_and_template_status(mock_heat_stack)
|
||||||
poller._bay_failed(mock_heat_stack)
|
poller._cluster_failed(mock_heat_stack)
|
||||||
self.assertEqual(1, logger.error.call_count)
|
self.assertEqual(1, logger.error.call_count)
|
@ -16,14 +16,14 @@ import mock
|
|||||||
from mock import patch
|
from mock import patch
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from magnum.conductor.handlers import bay_conductor
|
from magnum.conductor.handlers import cluster_conductor
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.tests import base
|
from magnum.tests import base
|
||||||
|
|
||||||
|
|
||||||
class TestBayConductorWithK8s(base.TestCase):
|
class TestClusterConductorWithK8s(base.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBayConductorWithK8s, self).setUp()
|
super(TestClusterConductorWithK8s, self).setUp()
|
||||||
self.cluster_template_dict = {
|
self.cluster_template_dict = {
|
||||||
'image_id': 'image_id',
|
'image_id': 'image_id',
|
||||||
'flavor_id': 'flavor_id',
|
'flavor_id': 'flavor_id',
|
||||||
@ -51,10 +51,10 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'master_lb_enabled': False,
|
'master_lb_enabled': False,
|
||||||
'floating_ip_enabled': False,
|
'floating_ip_enabled': False,
|
||||||
}
|
}
|
||||||
self.bay_dict = {
|
self.cluster_dict = {
|
||||||
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'baymodel_id': 'xx-xx-xx-xx',
|
'cluster_template_id': 'xx-xx-xx-xx',
|
||||||
'name': 'bay1',
|
'name': 'cluster1',
|
||||||
'stack_id': 'xx-xx-xx-xx',
|
'stack_id': 'xx-xx-xx-xx',
|
||||||
'api_address': '172.17.2.3',
|
'api_address': '172.17.2.3',
|
||||||
'node_addresses': ['172.17.2.4'],
|
'node_addresses': ['172.17.2.4'],
|
||||||
@ -100,8 +100,8 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
missing_attr=None):
|
missing_attr=None):
|
||||||
if missing_attr in self.cluster_template_dict:
|
if missing_attr in self.cluster_template_dict:
|
||||||
self.cluster_template_dict[missing_attr] = None
|
self.cluster_template_dict[missing_attr] = None
|
||||||
elif missing_attr in self.bay_dict:
|
elif missing_attr in self.cluster_dict:
|
||||||
self.bay_dict[missing_attr] = None
|
self.cluster_dict[missing_attr] = None
|
||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
@ -111,12 +111,12 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
mapping = {
|
mapping = {
|
||||||
'dns_nameserver': 'dns_nameserver',
|
'dns_nameserver': 'dns_nameserver',
|
||||||
@ -137,7 +137,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'bay_uuid': self.bay_dict['uuid'],
|
'cluster_uuid': self.cluster_dict['uuid'],
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'insecure_registry': '10.0.0.1:5000',
|
'insecure_registry': '10.0.0.1:5000',
|
||||||
@ -164,7 +164,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'tenant_name': 'fake_tenant',
|
'tenant_name': 'fake_tenant',
|
||||||
'username': 'fake_user',
|
'username': 'fake_user',
|
||||||
'bay_uuid': self.bay_dict['uuid'],
|
'cluster_uuid': self.cluster_dict['uuid'],
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'region_name': self.mock_osc.cinder_region_name.return_value,
|
'region_name': self.mock_osc.cinder_region_name.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
@ -203,7 +203,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
cfg.CONF.set_override('swift_region',
|
cfg.CONF.set_override('swift_region',
|
||||||
'RegionOne',
|
'RegionOne',
|
||||||
@ -211,12 +211,12 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'auth_url': 'http://192.168.10.10:5000/v3',
|
'auth_url': 'http://192.168.10.10:5000/v3',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'discovery_url': 'https://discovery.etcd.io/test',
|
'discovery_url': 'https://discovery.etcd.io/test',
|
||||||
'dns_nameserver': 'dns_nameserver',
|
'dns_nameserver': 'dns_nameserver',
|
||||||
'docker_storage_driver': 'devicemapper',
|
'docker_storage_driver': 'devicemapper',
|
||||||
@ -275,12 +275,12 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -308,7 +308,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656',
|
'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656',
|
||||||
'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de',
|
'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de',
|
||||||
'auth_url': 'http://192.168.10.10:5000/v3',
|
'auth_url': 'http://192.168.10.10:5000/v3',
|
||||||
'bay_uuid': self.bay_dict['uuid'],
|
'cluster_uuid': self.cluster_dict['uuid'],
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'insecure_registry_url': '10.0.0.1:5000',
|
'insecure_registry_url': '10.0.0.1:5000',
|
||||||
'kube_version': 'fake-version',
|
'kube_version': 'fake-version',
|
||||||
@ -325,19 +325,19 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_objects_cluster_template_get_by_uuid,
|
mock_objects_cluster_template_get_by_uuid,
|
||||||
reqget):
|
reqget):
|
||||||
self.cluster_template_dict['cluster_distro'] = 'coreos'
|
self.cluster_template_dict['cluster_distro'] = 'coreos'
|
||||||
self.bay_dict['discovery_url'] = None
|
self.cluster_dict['discovery_url'] = None
|
||||||
mock_req = mock.MagicMock(text='http://tokentest/h1/h2/h3')
|
mock_req = mock.MagicMock(text='http://tokentest/h1/h2/h3')
|
||||||
reqget.return_value = mock_req
|
reqget.return_value = mock_req
|
||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -365,7 +365,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656',
|
'trustee_user_id': '7b489f04-b458-4541-8179-6a48a553e656',
|
||||||
'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de',
|
'trust_id': 'bd11efc5-d4e2-4dac-bbce-25e348ddf7de',
|
||||||
'auth_url': 'http://192.168.10.10:5000/v3',
|
'auth_url': 'http://192.168.10.10:5000/v3',
|
||||||
'bay_uuid': self.bay_dict['uuid'],
|
'cluster_uuid': self.cluster_dict['uuid'],
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'insecure_registry_url': '10.0.0.1:5000',
|
'insecure_registry_url': '10.0.0.1:5000',
|
||||||
'kube_version': 'fake-version',
|
'kube_version': 'fake-version',
|
||||||
@ -484,9 +484,9 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay_dict = self.bay_dict
|
cluster_dict = self.cluster_dict
|
||||||
bay_dict['discovery_url'] = None
|
cluster_dict['discovery_url'] = None
|
||||||
bay = objects.Bay(self.context, **bay_dict)
|
cluster = objects.Cluster(self.context, **cluster_dict)
|
||||||
|
|
||||||
cfg.CONF.set_override('etcd_discovery_service_endpoint_format',
|
cfg.CONF.set_override('etcd_discovery_service_endpoint_format',
|
||||||
'http://etcd/test?size=%(size)d',
|
'http://etcd/test?size=%(size)d',
|
||||||
@ -496,8 +496,8 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -521,7 +521,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
'flannel_backend': 'vxlan',
|
'flannel_backend': 'vxlan',
|
||||||
'tenant_name': 'fake_tenant',
|
'tenant_name': 'fake_tenant',
|
||||||
'username': 'fake_user',
|
'username': 'fake_user',
|
||||||
'bay_uuid': self.bay_dict['uuid'],
|
'cluster_uuid': self.cluster_dict['uuid'],
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'region_name': self.mock_osc.cinder_region_name.return_value,
|
'region_name': self.mock_osc.cinder_region_name.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
@ -544,7 +544,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
|
|
||||||
@patch('magnum.common.short_id.generate_id')
|
@patch('magnum.common.short_id.generate_id')
|
||||||
@patch('heatclient.common.template_utils.get_template_contents')
|
@patch('heatclient.common.template_utils.get_template_contents')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor'
|
@patch('magnum.conductor.handlers.cluster_conductor'
|
||||||
'._extract_template_definition')
|
'._extract_template_definition')
|
||||||
def test_create_stack(self,
|
def test_create_stack(self,
|
||||||
mock_extract_template_definition,
|
mock_extract_template_definition,
|
||||||
@ -554,7 +554,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
||||||
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
||||||
expected_template_contents = 'template_contents'
|
expected_template_contents = 'template_contents'
|
||||||
dummy_bay_name = 'expected_stack_name'
|
dummy_cluster_name = 'expected_stack_name'
|
||||||
expected_timeout = 15
|
expected_timeout = 15
|
||||||
|
|
||||||
mock_tpl_files = {}
|
mock_tpl_files = {}
|
||||||
@ -565,11 +565,11 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.name = dummy_bay_name
|
mock_cluster.name = dummy_cluster_name
|
||||||
|
|
||||||
bay_conductor._create_stack(self.context, mock_osc,
|
cluster_conductor._create_stack(self.context, mock_osc,
|
||||||
mock_bay, expected_timeout)
|
mock_cluster, expected_timeout)
|
||||||
|
|
||||||
expected_args = {
|
expected_args = {
|
||||||
'stack_name': expected_stack_name,
|
'stack_name': expected_stack_name,
|
||||||
@ -583,7 +583,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
|
|
||||||
@patch('magnum.common.short_id.generate_id')
|
@patch('magnum.common.short_id.generate_id')
|
||||||
@patch('heatclient.common.template_utils.get_template_contents')
|
@patch('heatclient.common.template_utils.get_template_contents')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor'
|
@patch('magnum.conductor.handlers.cluster_conductor'
|
||||||
'._extract_template_definition')
|
'._extract_template_definition')
|
||||||
def test_create_stack_no_timeout_specified(
|
def test_create_stack_no_timeout_specified(
|
||||||
self,
|
self,
|
||||||
@ -594,7 +594,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
||||||
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
||||||
expected_template_contents = 'template_contents'
|
expected_template_contents = 'template_contents'
|
||||||
dummy_bay_name = 'expected_stack_name'
|
dummy_cluster_name = 'expected_stack_name'
|
||||||
expected_timeout = cfg.CONF.cluster_heat.create_timeout
|
expected_timeout = cfg.CONF.cluster_heat.create_timeout
|
||||||
|
|
||||||
mock_tpl_files = {}
|
mock_tpl_files = {}
|
||||||
@ -605,11 +605,11 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.name = dummy_bay_name
|
mock_cluster.name = dummy_cluster_name
|
||||||
|
|
||||||
bay_conductor._create_stack(self.context, mock_osc,
|
cluster_conductor._create_stack(self.context, mock_osc,
|
||||||
mock_bay, None)
|
mock_cluster, None)
|
||||||
|
|
||||||
expected_args = {
|
expected_args = {
|
||||||
'stack_name': expected_stack_name,
|
'stack_name': expected_stack_name,
|
||||||
@ -623,7 +623,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
|
|
||||||
@patch('magnum.common.short_id.generate_id')
|
@patch('magnum.common.short_id.generate_id')
|
||||||
@patch('heatclient.common.template_utils.get_template_contents')
|
@patch('heatclient.common.template_utils.get_template_contents')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor'
|
@patch('magnum.conductor.handlers.cluster_conductor'
|
||||||
'._extract_template_definition')
|
'._extract_template_definition')
|
||||||
def test_create_stack_timeout_is_zero(
|
def test_create_stack_timeout_is_zero(
|
||||||
self,
|
self,
|
||||||
@ -634,8 +634,8 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
mock_generate_id.return_value = 'xx-xx-xx-xx'
|
||||||
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
expected_stack_name = 'expected_stack_name-xx-xx-xx-xx'
|
||||||
expected_template_contents = 'template_contents'
|
expected_template_contents = 'template_contents'
|
||||||
dummy_bay_name = 'expected_stack_name'
|
dummy_cluster_name = 'expected_stack_name'
|
||||||
bay_timeout = 0
|
cluster_timeout = 0
|
||||||
expected_timeout = cfg.CONF.cluster_heat.create_timeout
|
expected_timeout = cfg.CONF.cluster_heat.create_timeout
|
||||||
|
|
||||||
mock_tpl_files = {}
|
mock_tpl_files = {}
|
||||||
@ -646,11 +646,11 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.name = dummy_bay_name
|
mock_cluster.name = dummy_cluster_name
|
||||||
|
|
||||||
bay_conductor._create_stack(self.context, mock_osc,
|
cluster_conductor._create_stack(self.context, mock_osc,
|
||||||
mock_bay, bay_timeout)
|
mock_cluster, cluster_timeout)
|
||||||
|
|
||||||
expected_args = {
|
expected_args = {
|
||||||
'stack_name': expected_stack_name,
|
'stack_name': expected_stack_name,
|
||||||
@ -663,7 +663,7 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_heat_client.stacks.create.assert_called_once_with(**expected_args)
|
mock_heat_client.stacks.create.assert_called_once_with(**expected_args)
|
||||||
|
|
||||||
@patch('heatclient.common.template_utils.get_template_contents')
|
@patch('heatclient.common.template_utils.get_template_contents')
|
||||||
@patch('magnum.conductor.handlers.bay_conductor'
|
@patch('magnum.conductor.handlers.cluster_conductor'
|
||||||
'._extract_template_definition')
|
'._extract_template_definition')
|
||||||
def test_update_stack(self,
|
def test_update_stack(self,
|
||||||
mock_extract_template_definition,
|
mock_extract_template_definition,
|
||||||
@ -680,10 +680,10 @@ class TestBayConductorWithK8s(base.TestCase):
|
|||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.stack_id = mock_stack_id
|
mock_cluster.stack_id = mock_stack_id
|
||||||
|
|
||||||
bay_conductor._update_stack({}, mock_osc, mock_bay)
|
cluster_conductor._update_stack({}, mock_osc, mock_cluster)
|
||||||
|
|
||||||
expected_args = {
|
expected_args = {
|
||||||
'parameters': {},
|
'parameters': {},
|
@ -16,15 +16,15 @@ import mock
|
|||||||
from mock import patch
|
from mock import patch
|
||||||
from oslo_service import loopingcall
|
from oslo_service import loopingcall
|
||||||
|
|
||||||
from magnum.conductor.handlers import bay_conductor
|
from magnum.conductor.handlers import cluster_conductor
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects.fields import ClusterStatus as cluster_status
|
||||||
from magnum.tests import base
|
from magnum.tests import base
|
||||||
|
|
||||||
|
|
||||||
class TestBayConductorWithMesos(base.TestCase):
|
class TestClusterConductorWithMesos(base.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBayConductorWithMesos, self).setUp()
|
super(TestClusterConductorWithMesos, self).setUp()
|
||||||
self.cluster_template_dict = {
|
self.cluster_template_dict = {
|
||||||
'image_id': 'image_id',
|
'image_id': 'image_id',
|
||||||
'flavor_id': 'flavor_id',
|
'flavor_id': 'flavor_id',
|
||||||
@ -48,11 +48,11 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
},
|
},
|
||||||
'master_lb_enabled': False,
|
'master_lb_enabled': False,
|
||||||
}
|
}
|
||||||
self.bay_dict = {
|
self.cluster_dict = {
|
||||||
'id': 1,
|
'id': 1,
|
||||||
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'baymodel_id': 'xx-xx-xx-xx',
|
'cluster_template_id': 'xx-xx-xx-xx',
|
||||||
'name': 'bay1',
|
'name': 'cluster1',
|
||||||
'stack_id': 'xx-xx-xx-xx',
|
'stack_id': 'xx-xx-xx-xx',
|
||||||
'api_address': '172.17.2.3',
|
'api_address': '172.17.2.3',
|
||||||
'node_addresses': ['172.17.2.4'],
|
'node_addresses': ['172.17.2.4'],
|
||||||
@ -85,12 +85,12 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -104,7 +104,7 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'cluster_name': 'bay1',
|
'cluster_name': 'cluster1',
|
||||||
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
||||||
'trustee_username': 'fake_trustee',
|
'trustee_username': 'fake_trustee',
|
||||||
'trustee_password': 'fake_trustee_password',
|
'trustee_password': 'fake_trustee_password',
|
||||||
@ -141,19 +141,19 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
'external_network': 'external_network_id',
|
'external_network': 'external_network_id',
|
||||||
'number_of_slaves': 1,
|
'number_of_slaves': 1,
|
||||||
'number_of_masters': 1,
|
'number_of_masters': 1,
|
||||||
'cluster_name': 'bay1',
|
'cluster_name': 'cluster1',
|
||||||
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
||||||
'trustee_username': 'fake_trustee',
|
'trustee_username': 'fake_trustee',
|
||||||
'trustee_password': 'fake_trustee_password',
|
'trustee_password': 'fake_trustee_password',
|
||||||
@ -184,12 +184,12 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -203,7 +203,7 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'cluster_name': 'bay1',
|
'cluster_name': 'cluster1',
|
||||||
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
||||||
'trustee_username': 'fake_trustee',
|
'trustee_username': 'fake_trustee',
|
||||||
'trustee_password': 'fake_trustee_password',
|
'trustee_password': 'fake_trustee_password',
|
||||||
@ -231,17 +231,17 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
self,
|
self,
|
||||||
mock_objects_cluster_template_get_by_uuid):
|
mock_objects_cluster_template_get_by_uuid):
|
||||||
self.cluster_template_dict['master_lb_enabled'] = True
|
self.cluster_template_dict['master_lb_enabled'] = True
|
||||||
self.bay_dict['master_count'] = 2
|
self.cluster_dict['master_count'] = 2
|
||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -255,7 +255,7 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'cluster_name': 'bay1',
|
'cluster_name': 'cluster1',
|
||||||
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
'trustee_domain_id': self.mock_keystone.trustee_domain_id,
|
||||||
'trustee_username': 'fake_trustee',
|
'trustee_username': 'fake_trustee',
|
||||||
'trustee_password': 'fake_trustee_password',
|
'trustee_password': 'fake_trustee_password',
|
||||||
@ -285,7 +285,7 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
mock_retrieve_cluster_template):
|
mock_retrieve_cluster_template):
|
||||||
cfg.CONF.cluster_heat.max_attempts = 10
|
cfg.CONF.cluster_heat.max_attempts = 10
|
||||||
|
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
||||||
@ -293,24 +293,24 @@ class TestBayConductorWithMesos(base.TestCase):
|
|||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_retrieve_cluster_template.return_value = cluster_template
|
mock_retrieve_cluster_template.return_value = cluster_template
|
||||||
poller = bay_conductor.HeatPoller(mock_openstack_client, bay)
|
poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster)
|
||||||
poller.get_version_info = mock.MagicMock()
|
poller.get_version_info = mock.MagicMock()
|
||||||
return (mock_heat_stack, bay, poller)
|
return (mock_heat_stack, cluster, poller)
|
||||||
|
|
||||||
def test_poll_node_count(self):
|
def test_poll_node_count(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_slaves': 1}
|
mock_heat_stack.parameters = {'number_of_slaves': 1}
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
|
|
||||||
def test_poll_node_count_by_update(self):
|
def test_poll_node_count_by_update(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_slaves': 2}
|
mock_heat_stack.parameters = {'number_of_slaves': 2}
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
@ -17,15 +17,15 @@ from mock import patch
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_service import loopingcall
|
from oslo_service import loopingcall
|
||||||
|
|
||||||
from magnum.conductor.handlers import bay_conductor
|
from magnum.conductor.handlers import cluster_conductor
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects.fields import ClusterStatus as cluster_status
|
||||||
from magnum.tests import base
|
from magnum.tests import base
|
||||||
|
|
||||||
|
|
||||||
class TestBayConductorWithSwarm(base.TestCase):
|
class TestClusterConductorWithSwarm(base.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBayConductorWithSwarm, self).setUp()
|
super(TestClusterConductorWithSwarm, self).setUp()
|
||||||
self.cluster_template_dict = {
|
self.cluster_template_dict = {
|
||||||
'image_id': 'image_id',
|
'image_id': 'image_id',
|
||||||
'flavor_id': 'flavor_id',
|
'flavor_id': 'flavor_id',
|
||||||
@ -51,11 +51,11 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'master_lb_enabled': False,
|
'master_lb_enabled': False,
|
||||||
'volume_driver': 'rexray'
|
'volume_driver': 'rexray'
|
||||||
}
|
}
|
||||||
self.bay_dict = {
|
self.cluster_dict = {
|
||||||
'id': 1,
|
'id': 1,
|
||||||
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'baymodel_id': 'xx-xx-xx-xx',
|
'cluster_template_id': 'xx-xx-xx-xx',
|
||||||
'name': 'bay1',
|
'name': 'cluster1',
|
||||||
'stack_id': 'xx-xx-xx-xx',
|
'stack_id': 'xx-xx-xx-xx',
|
||||||
'api_address': '172.17.2.3',
|
'api_address': '172.17.2.3',
|
||||||
'node_addresses': ['172.17.2.4'],
|
'node_addresses': ['172.17.2.4'],
|
||||||
@ -94,12 +94,12 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -116,7 +116,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'registry_enabled': False,
|
'registry_enabled': False,
|
||||||
@ -155,7 +155,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
cfg.CONF.set_override('swift_region',
|
cfg.CONF.set_override('swift_region',
|
||||||
'RegionOne',
|
'RegionOne',
|
||||||
@ -163,8 +163,8 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -180,7 +180,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'registry_enabled': True,
|
'registry_enabled': True,
|
||||||
@ -220,7 +220,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'volume_driver', 'rexray_preempt']
|
'volume_driver', 'rexray_preempt']
|
||||||
for key in not_required:
|
for key in not_required:
|
||||||
self.cluster_template_dict[key] = None
|
self.cluster_template_dict[key] = None
|
||||||
self.bay_dict['discovery_url'] = 'https://discovery.etcd.io/test'
|
self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test'
|
||||||
|
|
||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
@ -231,12 +231,12 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -244,7 +244,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'number_of_masters': 1,
|
'number_of_masters': 1,
|
||||||
'number_of_nodes': 1,
|
'number_of_nodes': 1,
|
||||||
'discovery_url': 'https://discovery.etcd.io/test',
|
'discovery_url': 'https://discovery.etcd.io/test',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'registry_enabled': False,
|
'registry_enabled': False,
|
||||||
@ -281,12 +281,12 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -303,7 +303,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'registry_enabled': False,
|
'registry_enabled': False,
|
||||||
@ -333,7 +333,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_objects_cluster_template_get_by_uuid,
|
mock_objects_cluster_template_get_by_uuid,
|
||||||
mock_get):
|
mock_get):
|
||||||
self.cluster_template_dict['master_lb_enabled'] = True
|
self.cluster_template_dict['master_lb_enabled'] = True
|
||||||
self.bay_dict['master_count'] = 2
|
self.cluster_dict['master_count'] = 2
|
||||||
cluster_template = objects.ClusterTemplate(
|
cluster_template = objects.ClusterTemplate(
|
||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_objects_cluster_template_get_by_uuid.return_value = \
|
mock_objects_cluster_template_get_by_uuid.return_value = \
|
||||||
@ -343,12 +343,12 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_result
|
mock_resp.text = expected_result
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
bay = objects.Bay(self.context, **self.bay_dict)
|
cluster = objects.Cluster(self.context, **self.cluster_dict)
|
||||||
|
|
||||||
(template_path,
|
(template_path,
|
||||||
definition,
|
definition,
|
||||||
env_files) = bay_conductor._extract_template_definition(self.context,
|
env_files) = cluster_conductor._extract_template_definition(
|
||||||
bay)
|
self.context, cluster)
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'ssh_key_name': 'keypair_id',
|
'ssh_key_name': 'keypair_id',
|
||||||
@ -365,7 +365,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
'http_proxy': 'http_proxy',
|
'http_proxy': 'http_proxy',
|
||||||
'https_proxy': 'https_proxy',
|
'https_proxy': 'https_proxy',
|
||||||
'no_proxy': 'no_proxy',
|
'no_proxy': 'no_proxy',
|
||||||
'bay_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'magnum_url': self.mock_osc.magnum_url.return_value,
|
'magnum_url': self.mock_osc.magnum_url.return_value,
|
||||||
'tls_disabled': False,
|
'tls_disabled': False,
|
||||||
'registry_enabled': False,
|
'registry_enabled': False,
|
||||||
@ -395,7 +395,7 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
mock_retrieve_cluster_template):
|
mock_retrieve_cluster_template):
|
||||||
cfg.CONF.cluster_heat.max_attempts = 10
|
cfg.CONF.cluster_heat.max_attempts = 10
|
||||||
|
|
||||||
bay = mock.MagicMock()
|
cluster = mock.MagicMock()
|
||||||
mock_heat_stack = mock.MagicMock()
|
mock_heat_stack = mock.MagicMock()
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
mock_heat_client.stacks.get.return_value = mock_heat_stack
|
||||||
@ -404,24 +404,24 @@ class TestBayConductorWithSwarm(base.TestCase):
|
|||||||
self.context, **self.cluster_template_dict)
|
self.context, **self.cluster_template_dict)
|
||||||
mock_retrieve_cluster_template.return_value = \
|
mock_retrieve_cluster_template.return_value = \
|
||||||
cluster_template
|
cluster_template
|
||||||
poller = bay_conductor.HeatPoller(mock_openstack_client, bay)
|
poller = cluster_conductor.HeatPoller(mock_openstack_client, cluster)
|
||||||
poller.get_version_info = mock.MagicMock()
|
poller.get_version_info = mock.MagicMock()
|
||||||
return (mock_heat_stack, bay, poller)
|
return (mock_heat_stack, cluster, poller)
|
||||||
|
|
||||||
def test_poll_node_count(self):
|
def test_poll_node_count(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_nodes': 1}
|
mock_heat_stack.parameters = {'number_of_nodes': 1}
|
||||||
mock_heat_stack.stack_status = bay_status.CREATE_IN_PROGRESS
|
mock_heat_stack.stack_status = cluster_status.CREATE_IN_PROGRESS
|
||||||
poller.poll_and_check()
|
poller.poll_and_check()
|
||||||
|
|
||||||
self.assertEqual(1, bay.node_count)
|
self.assertEqual(1, cluster.node_count)
|
||||||
|
|
||||||
def test_poll_node_count_by_update(self):
|
def test_poll_node_count_by_update(self):
|
||||||
mock_heat_stack, bay, poller = self.setup_poll_test()
|
mock_heat_stack, cluster, poller = self.setup_poll_test()
|
||||||
|
|
||||||
mock_heat_stack.parameters = {'number_of_nodes': 2}
|
mock_heat_stack.parameters = {'number_of_nodes': 2}
|
||||||
mock_heat_stack.stack_status = bay_status.UPDATE_COMPLETE
|
mock_heat_stack.stack_status = cluster_status.UPDATE_COMPLETE
|
||||||
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
self.assertRaises(loopingcall.LoopingCallDone, poller.poll_and_check)
|
||||||
|
|
||||||
self.assertEqual(2, bay.node_count)
|
self.assertEqual(2, cluster.node_count)
|
@ -42,14 +42,14 @@ class MonitorsTestCase(base.TestCase):
|
|||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(MonitorsTestCase, self).setUp()
|
super(MonitorsTestCase, self).setUp()
|
||||||
|
|
||||||
bay = utils.get_test_bay(node_addresses=['1.2.3.4'],
|
cluster = utils.get_test_cluster(node_addresses=['1.2.3.4'],
|
||||||
api_address='https://5.6.7.8:2376',
|
api_address='https://5.6.7.8:2376',
|
||||||
master_addresses=['10.0.0.6'])
|
master_addresses=['10.0.0.6'])
|
||||||
self.bay = objects.Bay(self.context, **bay)
|
self.cluster = objects.Cluster(self.context, **cluster)
|
||||||
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.bay)
|
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.cluster)
|
||||||
self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.bay)
|
self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.cluster)
|
||||||
self.mesos_monitor = mesos_monitor.MesosMonitor(self.context,
|
self.mesos_monitor = mesos_monitor.MesosMonitor(self.context,
|
||||||
self.bay)
|
self.cluster)
|
||||||
p = mock.patch('magnum.conductor.swarm_monitor.SwarmMonitor.'
|
p = mock.patch('magnum.conductor.swarm_monitor.SwarmMonitor.'
|
||||||
'metrics_spec', new_callable=mock.PropertyMock)
|
'metrics_spec', new_callable=mock.PropertyMock)
|
||||||
self.mock_metrics_spec = p.start()
|
self.mock_metrics_spec = p.start()
|
||||||
@ -57,31 +57,32 @@ class MonitorsTestCase(base.TestCase):
|
|||||||
self.addCleanup(p.stop)
|
self.addCleanup(p.stop)
|
||||||
|
|
||||||
def test_create_monitor_success(self):
|
def test_create_monitor_success(self):
|
||||||
self.bay.cluster_template = obj_utils.get_test_cluster_template(
|
self.cluster.cluster_template = obj_utils.get_test_cluster_template(
|
||||||
self.context, uuid=self.bay.baymodel_id, coe='swarm')
|
self.context, uuid=self.cluster.cluster_template_id, coe='swarm')
|
||||||
monitor = monitors.create_monitor(self.context, self.bay)
|
monitor = monitors.create_monitor(self.context, self.cluster)
|
||||||
self.assertIsInstance(monitor, swarm_monitor.SwarmMonitor)
|
self.assertIsInstance(monitor, swarm_monitor.SwarmMonitor)
|
||||||
|
|
||||||
def test_create_monitor_k8s_bay(self):
|
def test_create_monitor_k8s_cluster(self):
|
||||||
self.bay.cluster_template = obj_utils.get_test_cluster_template(
|
self.cluster.cluster_template = obj_utils.get_test_cluster_template(
|
||||||
self.context, uuid=self.bay.baymodel_id, coe='kubernetes')
|
self.context, uuid=self.cluster.cluster_template_id,
|
||||||
monitor = monitors.create_monitor(self.context, self.bay)
|
coe='kubernetes')
|
||||||
|
monitor = monitors.create_monitor(self.context, self.cluster)
|
||||||
self.assertIsInstance(monitor, k8s_monitor.K8sMonitor)
|
self.assertIsInstance(monitor, k8s_monitor.K8sMonitor)
|
||||||
|
|
||||||
def test_create_monitor_mesos_bay(self):
|
def test_create_monitor_mesos_cluster(self):
|
||||||
self.bay.cluster_template = obj_utils.get_test_cluster_template(
|
self.cluster.cluster_template = obj_utils.get_test_cluster_template(
|
||||||
self.context, uuid=self.bay.baymodel_id, coe='mesos')
|
self.context, uuid=self.cluster.cluster_template_id, coe='mesos')
|
||||||
monitor = monitors.create_monitor(self.context, self.bay)
|
monitor = monitors.create_monitor(self.context, self.cluster)
|
||||||
self.assertIsInstance(monitor, mesos_monitor.MesosMonitor)
|
self.assertIsInstance(monitor, mesos_monitor.MesosMonitor)
|
||||||
|
|
||||||
@mock.patch('magnum.common.docker_utils.docker_for_bay')
|
@mock.patch('magnum.common.docker_utils.docker_for_cluster')
|
||||||
def test_swarm_monitor_pull_data_success(self, mock_docker_for_bay):
|
def test_swarm_monitor_pull_data_success(self, mock_docker_cluster):
|
||||||
mock_docker = mock.MagicMock()
|
mock_docker = mock.MagicMock()
|
||||||
mock_docker.info.return_value = {'DriverStatus': [[
|
mock_docker.info.return_value = {'DriverStatus': [[
|
||||||
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
|
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
|
||||||
mock_docker.containers.return_value = [mock.MagicMock()]
|
mock_docker.containers.return_value = [mock.MagicMock()]
|
||||||
mock_docker.inspect_container.return_value = 'test_container'
|
mock_docker.inspect_container.return_value = 'test_container'
|
||||||
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
|
mock_docker_cluster.return_value.__enter__.return_value = mock_docker
|
||||||
|
|
||||||
self.monitor.pull_data()
|
self.monitor.pull_data()
|
||||||
|
|
||||||
@ -89,15 +90,15 @@ class MonitorsTestCase(base.TestCase):
|
|||||||
self.monitor.data['nodes'])
|
self.monitor.data['nodes'])
|
||||||
self.assertEqual(['test_container'], self.monitor.data['containers'])
|
self.assertEqual(['test_container'], self.monitor.data['containers'])
|
||||||
|
|
||||||
@mock.patch('magnum.common.docker_utils.docker_for_bay')
|
@mock.patch('magnum.common.docker_utils.docker_for_cluster')
|
||||||
def test_swarm_monitor_pull_data_raise(self, mock_docker_for_bay):
|
def test_swarm_monitor_pull_data_raise(self, mock_docker_cluster):
|
||||||
mock_container = mock.MagicMock()
|
mock_container = mock.MagicMock()
|
||||||
mock_docker = mock.MagicMock()
|
mock_docker = mock.MagicMock()
|
||||||
mock_docker.info.return_value = {'DriverStatus': [[
|
mock_docker.info.return_value = {'DriverStatus': [[
|
||||||
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
|
u' \u2514 Reserved Memory', u'0 B / 1 GiB']]}
|
||||||
mock_docker.containers.return_value = [mock_container]
|
mock_docker.containers.return_value = [mock_container]
|
||||||
mock_docker.inspect_container.side_effect = Exception("inspect error")
|
mock_docker.inspect_container.side_effect = Exception("inspect error")
|
||||||
mock_docker_for_bay.return_value.__enter__.return_value = mock_docker
|
mock_docker_cluster.return_value.__enter__.return_value = mock_docker
|
||||||
|
|
||||||
self.monitor.pull_data()
|
self.monitor.pull_data()
|
||||||
|
|
||||||
@ -288,7 +289,7 @@ class MonitorsTestCase(base.TestCase):
|
|||||||
|
|
||||||
@mock.patch('magnum.common.urlfetch.get')
|
@mock.patch('magnum.common.urlfetch.get')
|
||||||
def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get):
|
def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get):
|
||||||
self.bay.master_addresses = []
|
self.cluster.master_addresses = []
|
||||||
self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0, 0, 0)
|
self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0, 0, 0)
|
||||||
|
|
||||||
def test_mesos_monitor_get_metric_names(self):
|
def test_mesos_monitor_get_metric_names(self):
|
||||||
|
@ -27,9 +27,9 @@ class RPCAPITestCase(base.DbTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(RPCAPITestCase, self).setUp()
|
super(RPCAPITestCase, self).setUp()
|
||||||
self.fake_bay = dbutils.get_test_bay(driver='fake-driver')
|
self.fake_cluster = dbutils.get_test_cluster(driver='fake-driver')
|
||||||
self.fake_certificate = objects.Certificate.from_db_cluster(
|
self.fake_certificate = objects.Certificate.from_db_cluster(
|
||||||
self.fake_bay)
|
self.fake_cluster)
|
||||||
self.fake_certificate.csr = 'fake-csr'
|
self.fake_certificate.csr = 'fake-csr'
|
||||||
|
|
||||||
def _test_rpcapi(self, method, rpc_method, **kwargs):
|
def _test_rpcapi(self, method, rpc_method, **kwargs):
|
||||||
@ -73,29 +73,29 @@ class RPCAPITestCase(base.DbTestCase):
|
|||||||
for arg, expected_arg in zip(self.fake_args, expected_args):
|
for arg, expected_arg in zip(self.fake_args, expected_args):
|
||||||
self.assertEqual(expected_arg, arg)
|
self.assertEqual(expected_arg, arg)
|
||||||
|
|
||||||
def test_bay_create(self):
|
def test_cluster_create(self):
|
||||||
self._test_rpcapi('bay_create',
|
self._test_rpcapi('cluster_create',
|
||||||
'call',
|
'call',
|
||||||
version='1.0',
|
version='1.0',
|
||||||
bay=self.fake_bay,
|
cluster=self.fake_cluster,
|
||||||
bay_create_timeout=15)
|
create_timeout=15)
|
||||||
|
|
||||||
def test_bay_delete(self):
|
def test_cluster_delete(self):
|
||||||
self._test_rpcapi('bay_delete',
|
self._test_rpcapi('cluster_delete',
|
||||||
'call',
|
'call',
|
||||||
version='1.0',
|
version='1.0',
|
||||||
uuid=self.fake_bay['uuid'])
|
uuid=self.fake_cluster['uuid'])
|
||||||
|
|
||||||
self._test_rpcapi('bay_delete',
|
self._test_rpcapi('cluster_delete',
|
||||||
'call',
|
'call',
|
||||||
version='1.1',
|
version='1.1',
|
||||||
uuid=self.fake_bay['name'])
|
uuid=self.fake_cluster['name'])
|
||||||
|
|
||||||
def test_bay_update(self):
|
def test_cluster_update(self):
|
||||||
self._test_rpcapi('bay_update',
|
self._test_rpcapi('cluster_update',
|
||||||
'call',
|
'call',
|
||||||
version='1.1',
|
version='1.1',
|
||||||
bay=self.fake_bay['name'])
|
cluster=self.fake_cluster['name'])
|
||||||
|
|
||||||
def test_ping_conductor(self):
|
def test_ping_conductor(self):
|
||||||
self._test_rpcapi('ping_conductor',
|
self._test_rpcapi('ping_conductor',
|
||||||
@ -107,11 +107,11 @@ class RPCAPITestCase(base.DbTestCase):
|
|||||||
self._test_rpcapi('sign_certificate',
|
self._test_rpcapi('sign_certificate',
|
||||||
'call',
|
'call',
|
||||||
version='1.0',
|
version='1.0',
|
||||||
cluster=self.fake_bay,
|
cluster=self.fake_cluster,
|
||||||
certificate=self.fake_certificate)
|
certificate=self.fake_certificate)
|
||||||
|
|
||||||
def test_get_ca_certificate(self):
|
def test_get_ca_certificate(self):
|
||||||
self._test_rpcapi('get_ca_certificate',
|
self._test_rpcapi('get_ca_certificate',
|
||||||
'call',
|
'call',
|
||||||
version='1.0',
|
version='1.0',
|
||||||
cluster=self.fake_bay)
|
cluster=self.fake_cluster)
|
||||||
|
@ -49,9 +49,9 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
|
|
||||||
mock_context = mock.MagicMock()
|
mock_context = mock.MagicMock()
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
scale_mgr = scale_manager.ScaleManager(mock_context, mock_osc,
|
scale_mgr = scale_manager.ScaleManager(mock_context, mock_osc,
|
||||||
mock_bay)
|
mock_cluster)
|
||||||
|
|
||||||
if expected_removal_hosts is None:
|
if expected_removal_hosts is None:
|
||||||
self.assertRaises(exception.MagnumException,
|
self.assertRaises(exception.MagnumException,
|
||||||
@ -61,9 +61,9 @@ class TestScaleManager(base.TestCase):
|
|||||||
self.assertEqual(expected_removal_hosts, removal_hosts)
|
self.assertEqual(expected_removal_hosts, removal_hosts)
|
||||||
if num_of_removal > 0:
|
if num_of_removal > 0:
|
||||||
mock_create_k8s_api.assert_called_once_with(mock_context,
|
mock_create_k8s_api.assert_called_once_with(mock_context,
|
||||||
mock_bay)
|
mock_cluster)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
@ -82,7 +82,7 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
||||||
expected_removal_hosts)
|
expected_removal_hosts)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
@ -101,7 +101,7 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
||||||
expected_removal_hosts)
|
expected_removal_hosts)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
@ -120,7 +120,7 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
||||||
expected_removal_hosts)
|
expected_removal_hosts)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
@ -139,7 +139,7 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
||||||
expected_removal_hosts)
|
expected_removal_hosts)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
@ -158,7 +158,7 @@ class TestScaleManager(base.TestCase):
|
|||||||
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
mock_get_by_uuid, is_scale_down, num_of_removal, hosts, pods,
|
||||||
expected_removal_hosts)
|
expected_removal_hosts)
|
||||||
|
|
||||||
@mock.patch('magnum.objects.Bay.get_by_uuid')
|
@mock.patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager._is_scale_down')
|
||||||
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
@mock.patch('magnum.conductor.scale_manager.ScaleManager.'
|
||||||
'_get_num_of_removal')
|
'_get_num_of_removal')
|
||||||
|
@ -22,11 +22,12 @@ from magnum.tests import base
|
|||||||
|
|
||||||
class TestConductorUtils(base.TestCase):
|
class TestConductorUtils(base.TestCase):
|
||||||
|
|
||||||
def _test_retrieve_bay(self, expected_bay_uuid, mock_bay_get_by_uuid):
|
def _test_retrieve_cluster(self, expected_cluster_uuid,
|
||||||
|
mock_cluster_get_by_uuid):
|
||||||
expected_context = 'context'
|
expected_context = 'context'
|
||||||
utils.retrieve_bay(expected_context, expected_bay_uuid)
|
utils.retrieve_cluster(expected_context, expected_cluster_uuid)
|
||||||
mock_bay_get_by_uuid.assert_called_once_with(expected_context,
|
mock_cluster_get_by_uuid.assert_called_once_with(
|
||||||
expected_bay_uuid)
|
expected_context, expected_cluster_uuid)
|
||||||
|
|
||||||
def get_fake_id(self):
|
def get_fake_id(self):
|
||||||
return '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
return '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
||||||
@ -40,38 +41,39 @@ class TestConductorUtils(base.TestCase):
|
|||||||
expected_context = 'context'
|
expected_context = 'context'
|
||||||
expected_cluster_template_uuid = 'ClusterTemplate_uuid'
|
expected_cluster_template_uuid = 'ClusterTemplate_uuid'
|
||||||
|
|
||||||
bay = objects.Bay({})
|
cluster = objects.Cluster({})
|
||||||
bay.baymodel_id = expected_cluster_template_uuid
|
cluster.cluster_template_id = expected_cluster_template_uuid
|
||||||
|
|
||||||
utils.retrieve_cluster_template(expected_context, bay)
|
utils.retrieve_cluster_template(expected_context, cluster)
|
||||||
|
|
||||||
mock_cluster_template_get_by_uuid.assert_called_once_with(
|
mock_cluster_template_get_by_uuid.assert_called_once_with(
|
||||||
expected_context,
|
expected_context,
|
||||||
expected_cluster_template_uuid)
|
expected_cluster_template_uuid)
|
||||||
|
|
||||||
@patch('oslo_utils.uuidutils.is_uuid_like')
|
@patch('oslo_utils.uuidutils.is_uuid_like')
|
||||||
@patch('magnum.objects.Bay.get_by_name')
|
@patch('magnum.objects.Cluster.get_by_name')
|
||||||
def test_retrieve_bay_uuid_from_name(self, mock_bay_get_by_name,
|
def test_retrieve_cluster_uuid_from_name(self, mock_cluster_get_by_name,
|
||||||
mock_uuid_like):
|
mock_uuid_like):
|
||||||
bay = objects.Bay(uuid='5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
cluster = objects.Cluster(uuid='5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
||||||
mock_uuid_like.return_value = False
|
mock_uuid_like.return_value = False
|
||||||
mock_bay_get_by_name.return_value = bay
|
mock_cluster_get_by_name.return_value = cluster
|
||||||
bay_uuid = utils.retrieve_bay_uuid('context', 'fake_name')
|
cluster_uuid = utils.retrieve_cluster_uuid('context', 'fake_name')
|
||||||
self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', bay_uuid)
|
self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid)
|
||||||
|
|
||||||
mock_uuid_like.assert_called_once_with('fake_name')
|
mock_uuid_like.assert_called_once_with('fake_name')
|
||||||
mock_bay_get_by_name.assert_called_once_with('context', 'fake_name')
|
mock_cluster_get_by_name.assert_called_once_with('context',
|
||||||
|
'fake_name')
|
||||||
|
|
||||||
@patch('oslo_utils.uuidutils.is_uuid_like')
|
@patch('oslo_utils.uuidutils.is_uuid_like')
|
||||||
@patch('magnum.objects.Bay.get_by_name')
|
@patch('magnum.objects.Cluster.get_by_name')
|
||||||
def test_retrieve_bay_uuid_from_uuid(self, mock_bay_get_by_name,
|
def test_retrieve_cluster_uuid_from_uuid(self, mock_cluster_get_by_name,
|
||||||
mock_uuid_like):
|
mock_uuid_like):
|
||||||
bay_uuid = utils.retrieve_bay_uuid(
|
cluster_uuid = utils.retrieve_cluster_uuid(
|
||||||
'context',
|
'context',
|
||||||
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
||||||
self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', bay_uuid)
|
self.assertEqual('5d12f6fd-a196-4bf0-ae4c-1f639a523a52', cluster_uuid)
|
||||||
mock_uuid_like.return_value = True
|
mock_uuid_like.return_value = True
|
||||||
mock_bay_get_by_name.assert_not_called()
|
mock_cluster_get_by_name.assert_not_called()
|
||||||
|
|
||||||
def _get_heat_stacks_get_mock_obj(self, status):
|
def _get_heat_stacks_get_mock_obj(self, status):
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
@ -84,45 +86,46 @@ class TestConductorUtils(base.TestCase):
|
|||||||
mock_osc.heat.return_value = mock_stack
|
mock_osc.heat.return_value = mock_stack
|
||||||
return mock_osc
|
return mock_osc
|
||||||
|
|
||||||
@patch('magnum.conductor.utils.retrieve_bay')
|
@patch('magnum.conductor.utils.retrieve_cluster')
|
||||||
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
||||||
def test_object_has_stack_invalid_status(self, mock_oscs,
|
def test_object_has_stack_invalid_status(self, mock_oscs,
|
||||||
mock_retrieve_bay):
|
mock_retrieve_cluster):
|
||||||
|
|
||||||
mock_osc = self._get_heat_stacks_get_mock_obj("INVALID_STATUS")
|
mock_osc = self._get_heat_stacks_get_mock_obj("INVALID_STATUS")
|
||||||
mock_oscs.return_value = mock_osc
|
mock_oscs.return_value = mock_osc
|
||||||
self.assertTrue(utils.object_has_stack('context', self.get_fake_id()))
|
self.assertTrue(utils.object_has_stack('context', self.get_fake_id()))
|
||||||
mock_retrieve_bay.assert_called_with('context', self.get_fake_id())
|
mock_retrieve_cluster.assert_called_with('context', self.get_fake_id())
|
||||||
|
|
||||||
@patch('magnum.conductor.utils.retrieve_bay')
|
@patch('magnum.conductor.utils.retrieve_cluster')
|
||||||
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
||||||
def test_object_has_stack_delete_in_progress(self, mock_oscs,
|
def test_object_has_stack_delete_in_progress(self, mock_oscs,
|
||||||
mock_retrieve_bay):
|
mock_retrieve_cluster):
|
||||||
|
|
||||||
mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_IN_PROGRESS")
|
mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_IN_PROGRESS")
|
||||||
mock_oscs.return_value = mock_osc
|
mock_oscs.return_value = mock_osc
|
||||||
self.assertFalse(utils.object_has_stack('context', self.get_fake_id()))
|
self.assertFalse(utils.object_has_stack('context', self.get_fake_id()))
|
||||||
mock_retrieve_bay.assert_called_with('context', self.get_fake_id())
|
mock_retrieve_cluster.assert_called_with('context', self.get_fake_id())
|
||||||
|
|
||||||
@patch('magnum.conductor.utils.retrieve_bay')
|
@patch('magnum.conductor.utils.retrieve_cluster')
|
||||||
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
@patch('magnum.conductor.utils.clients.OpenStackClients')
|
||||||
def test_object_has_stack_delete_complete_status(self, mock_oscs,
|
def test_object_has_stack_delete_complete_status(self, mock_oscs,
|
||||||
mock_retrieve_bay):
|
mock_retrieve_cluster):
|
||||||
mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_COMPLETE")
|
mock_osc = self._get_heat_stacks_get_mock_obj("DELETE_COMPLETE")
|
||||||
mock_oscs.return_value = mock_osc
|
mock_oscs.return_value = mock_osc
|
||||||
self.assertFalse(utils.object_has_stack('context', self.get_fake_id()))
|
self.assertFalse(utils.object_has_stack('context', self.get_fake_id()))
|
||||||
mock_retrieve_bay.assert_called_with('context', self.get_fake_id())
|
mock_retrieve_cluster.assert_called_with('context', self.get_fake_id())
|
||||||
|
|
||||||
@patch('magnum.objects.Bay.get_by_uuid')
|
@patch('magnum.objects.Cluster.get_by_uuid')
|
||||||
def test_retrieve_bay_uuid(self, mock_get_by_uuid):
|
def test_retrieve_cluster_uuid(self, mock_get_by_uuid):
|
||||||
mock_get_by_uuid.return_value = True
|
mock_get_by_uuid.return_value = True
|
||||||
utils.retrieve_bay('context', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
utils.retrieve_cluster('context',
|
||||||
|
'5d12f6fd-a196-4bf0-ae4c-1f639a523a52')
|
||||||
self.assertTrue(mock_get_by_uuid.called)
|
self.assertTrue(mock_get_by_uuid.called)
|
||||||
|
|
||||||
@patch('magnum.objects.Bay.get_by_name')
|
@patch('magnum.objects.Cluster.get_by_name')
|
||||||
def test_retrieve_bay_name(self, mock_get_by_name):
|
def test_retrieve_cluster_name(self, mock_get_by_name):
|
||||||
mock_get_by_name.return_value = mock.MagicMock()
|
mock_get_by_name.return_value = mock.MagicMock()
|
||||||
utils.retrieve_bay('context', '1')
|
utils.retrieve_cluster('context', '1')
|
||||||
self.assertTrue(mock_get_by_name.called)
|
self.assertTrue(mock_get_by_name.called)
|
||||||
|
|
||||||
@patch('magnum.conductor.utils.resource.Resource')
|
@patch('magnum.conductor.utils.resource.Resource')
|
||||||
|
@ -45,25 +45,25 @@ class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
|
|||||||
['this is not a dict']})
|
['this is not a dict']})
|
||||||
|
|
||||||
def test_JSONEncodedList_default_value(self):
|
def test_JSONEncodedList_default_value(self):
|
||||||
# Create bay w/o master_addresses
|
# Create cluster w/o master_addresses
|
||||||
bay1_id = uuidutils.generate_uuid()
|
cluster1_id = uuidutils.generate_uuid()
|
||||||
self.dbapi.create_bay({'uuid': bay1_id})
|
self.dbapi.create_cluster({'uuid': cluster1_id})
|
||||||
bay1 = sa_api.model_query(
|
cluster1 = sa_api.model_query(
|
||||||
models.Bay).filter_by(uuid=bay1_id).one()
|
models.Cluster).filter_by(uuid=cluster1_id).one()
|
||||||
self.assertEqual([], bay1.master_addresses)
|
self.assertEqual([], cluster1.master_addresses)
|
||||||
|
|
||||||
# Create bay with master_addresses
|
# Create cluster with master_addresses
|
||||||
bay2_id = uuidutils.generate_uuid()
|
cluster2_id = uuidutils.generate_uuid()
|
||||||
self.dbapi.create_bay({'uuid': bay2_id,
|
self.dbapi.create_cluster({'uuid': cluster2_id,
|
||||||
'master_addresses': ['mymaster_address1',
|
'master_addresses': ['mymaster_address1',
|
||||||
'mymaster_address2']})
|
'mymaster_address2']})
|
||||||
bay2 = sa_api.model_query(
|
cluster2 = sa_api.model_query(
|
||||||
models.Bay).filter_by(uuid=bay2_id).one()
|
models.Cluster).filter_by(uuid=cluster2_id).one()
|
||||||
self.assertEqual(['mymaster_address1', 'mymaster_address2'],
|
self.assertEqual(['mymaster_address1', 'mymaster_address2'],
|
||||||
bay2.master_addresses)
|
cluster2.master_addresses)
|
||||||
|
|
||||||
def test_JSONEncodedList_type_check(self):
|
def test_JSONEncodedList_type_check(self):
|
||||||
self.assertRaises(db_exc.DBError,
|
self.assertRaises(db_exc.DBError,
|
||||||
self.dbapi.create_bay,
|
self.dbapi.create_cluster,
|
||||||
{'master_addresses':
|
{'master_addresses':
|
||||||
{'this is not a list': 'test'}})
|
{'this is not a list': 'test'}})
|
||||||
|
@ -1,214 +0,0 @@
|
|||||||
# Copyright 2015 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""Tests for manipulating Bays via the DB API"""
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from magnum.common import context
|
|
||||||
from magnum.common import exception
|
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
|
||||||
from magnum.tests.unit.db import base
|
|
||||||
from magnum.tests.unit.db import utils
|
|
||||||
|
|
||||||
|
|
||||||
class DbBayTestCase(base.DbTestCase):
|
|
||||||
|
|
||||||
def test_create_bay(self):
|
|
||||||
utils.create_test_bay()
|
|
||||||
|
|
||||||
def test_create_bay_nullable_baymodel_id(self):
|
|
||||||
utils.create_test_bay(baymodel_id=None)
|
|
||||||
|
|
||||||
def test_create_bay_already_exists(self):
|
|
||||||
utils.create_test_bay()
|
|
||||||
self.assertRaises(exception.ClusterAlreadyExists,
|
|
||||||
utils.create_test_bay)
|
|
||||||
|
|
||||||
def test_get_bay_by_id(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
res = self.dbapi.get_bay_by_id(self.context, bay.id)
|
|
||||||
self.assertEqual(bay.id, res.id)
|
|
||||||
self.assertEqual(bay.uuid, res.uuid)
|
|
||||||
|
|
||||||
def test_get_bay_by_name(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
res = self.dbapi.get_bay_by_name(self.context, bay.name)
|
|
||||||
self.assertEqual(bay.name, res.name)
|
|
||||||
self.assertEqual(bay.uuid, res.uuid)
|
|
||||||
|
|
||||||
def test_get_bay_by_uuid(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
res = self.dbapi.get_bay_by_uuid(self.context, bay.uuid)
|
|
||||||
self.assertEqual(bay.id, res.id)
|
|
||||||
self.assertEqual(bay.uuid, res.uuid)
|
|
||||||
|
|
||||||
def test_get_bay_that_does_not_exist(self):
|
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
|
||||||
self.dbapi.get_bay_by_id,
|
|
||||||
self.context, 999)
|
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
|
||||||
self.dbapi.get_bay_by_uuid,
|
|
||||||
self.context,
|
|
||||||
'12345678-9999-0000-aaaa-123456789012')
|
|
||||||
|
|
||||||
def test_get_bay_list(self):
|
|
||||||
uuids = []
|
|
||||||
for i in range(1, 6):
|
|
||||||
bay = utils.create_test_bay(uuid=uuidutils.generate_uuid())
|
|
||||||
uuids.append(six.text_type(bay['uuid']))
|
|
||||||
res = self.dbapi.get_bay_list(self.context)
|
|
||||||
res_uuids = [r.uuid for r in res]
|
|
||||||
self.assertEqual(sorted(uuids), sorted(res_uuids))
|
|
||||||
|
|
||||||
def test_get_bay_list_sorted(self):
|
|
||||||
uuids = []
|
|
||||||
for _ in range(5):
|
|
||||||
bay = utils.create_test_bay(uuid=uuidutils.generate_uuid())
|
|
||||||
uuids.append(six.text_type(bay.uuid))
|
|
||||||
res = self.dbapi.get_bay_list(self.context, sort_key='uuid')
|
|
||||||
res_uuids = [r.uuid for r in res]
|
|
||||||
self.assertEqual(sorted(uuids), res_uuids)
|
|
||||||
|
|
||||||
self.assertRaises(exception.InvalidParameterValue,
|
|
||||||
self.dbapi.get_bay_list,
|
|
||||||
self.context,
|
|
||||||
sort_key='foo')
|
|
||||||
|
|
||||||
def test_get_bay_list_with_filters(self):
|
|
||||||
ct1 = utils.get_test_cluster_template(id=1,
|
|
||||||
uuid=uuidutils.generate_uuid())
|
|
||||||
ct2 = utils.get_test_cluster_template(id=2,
|
|
||||||
uuid=uuidutils.generate_uuid())
|
|
||||||
self.dbapi.create_cluster_template(ct1)
|
|
||||||
self.dbapi.create_cluster_template(ct2)
|
|
||||||
|
|
||||||
bay1 = utils.create_test_bay(
|
|
||||||
name='bay-one',
|
|
||||||
uuid=uuidutils.generate_uuid(),
|
|
||||||
baymodel_id=ct1['uuid'],
|
|
||||||
status=bay_status.CREATE_IN_PROGRESS)
|
|
||||||
bay2 = utils.create_test_bay(
|
|
||||||
name='bay-two',
|
|
||||||
uuid=uuidutils.generate_uuid(),
|
|
||||||
baymodel_id=ct2['uuid'],
|
|
||||||
node_count=1,
|
|
||||||
master_count=1,
|
|
||||||
status=bay_status.UPDATE_IN_PROGRESS)
|
|
||||||
bay3 = utils.create_test_bay(
|
|
||||||
name='bay-three',
|
|
||||||
node_count=2,
|
|
||||||
master_count=5,
|
|
||||||
status=bay_status.DELETE_IN_PROGRESS)
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'baymodel_id': ct1['uuid']})
|
|
||||||
self.assertEqual([bay1.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'baymodel_id': ct2['uuid']})
|
|
||||||
self.assertEqual([bay2.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'name': 'bay-one'})
|
|
||||||
self.assertEqual([bay1.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'name': 'bad-bay'})
|
|
||||||
self.assertEqual([], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'node_count': 3})
|
|
||||||
self.assertEqual([bay1.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'node_count': 1})
|
|
||||||
self.assertEqual([bay2.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'master_count': 3})
|
|
||||||
self.assertEqual([bay1.id], [r.id for r in res])
|
|
||||||
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters={'master_count': 1})
|
|
||||||
self.assertEqual([bay2.id], [r.id for r in res])
|
|
||||||
|
|
||||||
filters = {'status': [bay_status.CREATE_IN_PROGRESS,
|
|
||||||
bay_status.DELETE_IN_PROGRESS]}
|
|
||||||
res = self.dbapi.get_bay_list(self.context,
|
|
||||||
filters=filters)
|
|
||||||
self.assertEqual([bay1.id, bay3.id], [r.id for r in res])
|
|
||||||
|
|
||||||
def test_get_bay_list_by_admin_all_tenants(self):
|
|
||||||
uuids = []
|
|
||||||
for i in range(1, 6):
|
|
||||||
bay = utils.create_test_bay(
|
|
||||||
uuid=uuidutils.generate_uuid(),
|
|
||||||
project_id=uuidutils.generate_uuid(),
|
|
||||||
user_id=uuidutils.generate_uuid())
|
|
||||||
uuids.append(six.text_type(bay['uuid']))
|
|
||||||
ctx = context.make_admin_context(all_tenants=True)
|
|
||||||
res = self.dbapi.get_bay_list(ctx)
|
|
||||||
res_uuids = [r.uuid for r in res]
|
|
||||||
self.assertEqual(sorted(uuids), sorted(res_uuids))
|
|
||||||
|
|
||||||
def test_get_bay_list_baymodel_not_exist(self):
|
|
||||||
utils.create_test_bay()
|
|
||||||
self.assertEqual(1, len(self.dbapi.get_bay_list(self.context)))
|
|
||||||
res = self.dbapi.get_bay_list(self.context, filters={
|
|
||||||
'baymodel_id': uuidutils.generate_uuid()})
|
|
||||||
self.assertEqual(0, len(res))
|
|
||||||
|
|
||||||
def test_destroy_bay(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
self.assertIsNotNone(self.dbapi.get_bay_by_id(self.context,
|
|
||||||
bay.id))
|
|
||||||
self.dbapi.destroy_bay(bay.id)
|
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
|
||||||
self.dbapi.get_bay_by_id,
|
|
||||||
self.context, bay.id)
|
|
||||||
|
|
||||||
def test_destroy_bay_by_uuid(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
self.assertIsNotNone(self.dbapi.get_bay_by_uuid(self.context,
|
|
||||||
bay.uuid))
|
|
||||||
self.dbapi.destroy_bay(bay.uuid)
|
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
|
||||||
self.dbapi.get_bay_by_uuid, self.context,
|
|
||||||
bay.uuid)
|
|
||||||
|
|
||||||
def test_destroy_bay_that_does_not_exist(self):
|
|
||||||
self.assertRaises(exception.ClusterNotFound,
|
|
||||||
self.dbapi.destroy_bay,
|
|
||||||
'12345678-9999-0000-aaaa-123456789012')
|
|
||||||
|
|
||||||
def test_update_bay(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
old_nc = bay.node_count
|
|
||||||
new_nc = 5
|
|
||||||
self.assertNotEqual(old_nc, new_nc)
|
|
||||||
res = self.dbapi.update_bay(bay.id, {'node_count': new_nc})
|
|
||||||
self.assertEqual(new_nc, res.node_count)
|
|
||||||
|
|
||||||
def test_update_bay_not_found(self):
|
|
||||||
bay_uuid = uuidutils.generate_uuid()
|
|
||||||
self.assertRaises(exception.ClusterNotFound, self.dbapi.update_bay,
|
|
||||||
bay_uuid, {'node_count': 5})
|
|
||||||
|
|
||||||
def test_update_bay_uuid(self):
|
|
||||||
bay = utils.create_test_bay()
|
|
||||||
self.assertRaises(exception.InvalidParameterValue,
|
|
||||||
self.dbapi.update_bay, bay.id,
|
|
||||||
{'uuid': ''})
|
|
214
magnum/tests/unit/db/test_cluster.py
Normal file
214
magnum/tests/unit/db/test_cluster.py
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
# Copyright 2015 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Tests for manipulating Clusters via the DB API"""
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
import six
|
||||||
|
|
||||||
|
from magnum.common import context
|
||||||
|
from magnum.common import exception
|
||||||
|
from magnum.objects.fields import ClusterStatus as cluster_status
|
||||||
|
from magnum.tests.unit.db import base
|
||||||
|
from magnum.tests.unit.db import utils
|
||||||
|
|
||||||
|
|
||||||
|
class DbClusterTestCase(base.DbTestCase):
|
||||||
|
|
||||||
|
def test_create_cluster(self):
|
||||||
|
utils.create_test_cluster()
|
||||||
|
|
||||||
|
def test_create_cluster_nullable_cluster_template_id(self):
|
||||||
|
utils.create_test_cluster(cluster_template_id=None)
|
||||||
|
|
||||||
|
def test_create_cluster_already_exists(self):
|
||||||
|
utils.create_test_cluster()
|
||||||
|
self.assertRaises(exception.ClusterAlreadyExists,
|
||||||
|
utils.create_test_cluster)
|
||||||
|
|
||||||
|
def test_get_cluster_by_id(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
res = self.dbapi.get_cluster_by_id(self.context, cluster.id)
|
||||||
|
self.assertEqual(cluster.id, res.id)
|
||||||
|
self.assertEqual(cluster.uuid, res.uuid)
|
||||||
|
|
||||||
|
def test_get_cluster_by_name(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
res = self.dbapi.get_cluster_by_name(self.context, cluster.name)
|
||||||
|
self.assertEqual(cluster.name, res.name)
|
||||||
|
self.assertEqual(cluster.uuid, res.uuid)
|
||||||
|
|
||||||
|
def test_get_cluster_by_uuid(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
res = self.dbapi.get_cluster_by_uuid(self.context, cluster.uuid)
|
||||||
|
self.assertEqual(cluster.id, res.id)
|
||||||
|
self.assertEqual(cluster.uuid, res.uuid)
|
||||||
|
|
||||||
|
def test_get_cluster_that_does_not_exist(self):
|
||||||
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
|
self.dbapi.get_cluster_by_id,
|
||||||
|
self.context, 999)
|
||||||
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
|
self.dbapi.get_cluster_by_uuid,
|
||||||
|
self.context,
|
||||||
|
'12345678-9999-0000-aaaa-123456789012')
|
||||||
|
|
||||||
|
def test_get_cluster_list(self):
|
||||||
|
uuids = []
|
||||||
|
for i in range(1, 6):
|
||||||
|
cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid())
|
||||||
|
uuids.append(six.text_type(cluster['uuid']))
|
||||||
|
res = self.dbapi.get_cluster_list(self.context)
|
||||||
|
res_uuids = [r.uuid for r in res]
|
||||||
|
self.assertEqual(sorted(uuids), sorted(res_uuids))
|
||||||
|
|
||||||
|
def test_get_cluster_list_sorted(self):
|
||||||
|
uuids = []
|
||||||
|
for _ in range(5):
|
||||||
|
cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid())
|
||||||
|
uuids.append(six.text_type(cluster.uuid))
|
||||||
|
res = self.dbapi.get_cluster_list(self.context, sort_key='uuid')
|
||||||
|
res_uuids = [r.uuid for r in res]
|
||||||
|
self.assertEqual(sorted(uuids), res_uuids)
|
||||||
|
|
||||||
|
self.assertRaises(exception.InvalidParameterValue,
|
||||||
|
self.dbapi.get_cluster_list,
|
||||||
|
self.context,
|
||||||
|
sort_key='foo')
|
||||||
|
|
||||||
|
def test_get_cluster_list_with_filters(self):
|
||||||
|
ct1 = utils.get_test_cluster_template(id=1,
|
||||||
|
uuid=uuidutils.generate_uuid())
|
||||||
|
ct2 = utils.get_test_cluster_template(id=2,
|
||||||
|
uuid=uuidutils.generate_uuid())
|
||||||
|
self.dbapi.create_cluster_template(ct1)
|
||||||
|
self.dbapi.create_cluster_template(ct2)
|
||||||
|
|
||||||
|
cluster1 = utils.create_test_cluster(
|
||||||
|
name='cluster-one',
|
||||||
|
uuid=uuidutils.generate_uuid(),
|
||||||
|
cluster_template_id=ct1['uuid'],
|
||||||
|
status=cluster_status.CREATE_IN_PROGRESS)
|
||||||
|
cluster2 = utils.create_test_cluster(
|
||||||
|
name='cluster-two',
|
||||||
|
uuid=uuidutils.generate_uuid(),
|
||||||
|
cluster_template_id=ct2['uuid'],
|
||||||
|
node_count=1,
|
||||||
|
master_count=1,
|
||||||
|
status=cluster_status.UPDATE_IN_PROGRESS)
|
||||||
|
cluster3 = utils.create_test_cluster(
|
||||||
|
name='cluster-three',
|
||||||
|
node_count=2,
|
||||||
|
master_count=5,
|
||||||
|
status=cluster_status.DELETE_IN_PROGRESS)
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(
|
||||||
|
self.context, filters={'cluster_template_id': ct1['uuid']})
|
||||||
|
self.assertEqual([cluster1.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(
|
||||||
|
self.context, filters={'cluster_template_id': ct2['uuid']})
|
||||||
|
self.assertEqual([cluster2.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'name': 'cluster-one'})
|
||||||
|
self.assertEqual([cluster1.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'name': 'bad-cluster'})
|
||||||
|
self.assertEqual([], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'node_count': 3})
|
||||||
|
self.assertEqual([cluster1.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'node_count': 1})
|
||||||
|
self.assertEqual([cluster2.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'master_count': 3})
|
||||||
|
self.assertEqual([cluster1.id], [r.id for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters={'master_count': 1})
|
||||||
|
self.assertEqual([cluster2.id], [r.id for r in res])
|
||||||
|
|
||||||
|
filters = {'status': [cluster_status.CREATE_IN_PROGRESS,
|
||||||
|
cluster_status.DELETE_IN_PROGRESS]}
|
||||||
|
res = self.dbapi.get_cluster_list(self.context,
|
||||||
|
filters=filters)
|
||||||
|
self.assertEqual([cluster1.id, cluster3.id], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_cluster_list_by_admin_all_tenants(self):
|
||||||
|
uuids = []
|
||||||
|
for i in range(1, 6):
|
||||||
|
cluster = utils.create_test_cluster(
|
||||||
|
uuid=uuidutils.generate_uuid(),
|
||||||
|
project_id=uuidutils.generate_uuid(),
|
||||||
|
user_id=uuidutils.generate_uuid())
|
||||||
|
uuids.append(six.text_type(cluster['uuid']))
|
||||||
|
ctx = context.make_admin_context(all_tenants=True)
|
||||||
|
res = self.dbapi.get_cluster_list(ctx)
|
||||||
|
res_uuids = [r.uuid for r in res]
|
||||||
|
self.assertEqual(sorted(uuids), sorted(res_uuids))
|
||||||
|
|
||||||
|
def test_get_cluster_list_cluster_template_not_exist(self):
|
||||||
|
utils.create_test_cluster()
|
||||||
|
self.assertEqual(1, len(self.dbapi.get_cluster_list(self.context)))
|
||||||
|
res = self.dbapi.get_cluster_list(self.context, filters={
|
||||||
|
'cluster_template_id': uuidutils.generate_uuid()})
|
||||||
|
self.assertEqual(0, len(res))
|
||||||
|
|
||||||
|
def test_destroy_cluster(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
self.assertIsNotNone(self.dbapi.get_cluster_by_id(self.context,
|
||||||
|
cluster.id))
|
||||||
|
self.dbapi.destroy_cluster(cluster.id)
|
||||||
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
|
self.dbapi.get_cluster_by_id,
|
||||||
|
self.context, cluster.id)
|
||||||
|
|
||||||
|
def test_destroy_cluster_by_uuid(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
self.assertIsNotNone(self.dbapi.get_cluster_by_uuid(self.context,
|
||||||
|
cluster.uuid))
|
||||||
|
self.dbapi.destroy_cluster(cluster.uuid)
|
||||||
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
|
self.dbapi.get_cluster_by_uuid, self.context,
|
||||||
|
cluster.uuid)
|
||||||
|
|
||||||
|
def test_destroy_cluster_that_does_not_exist(self):
|
||||||
|
self.assertRaises(exception.ClusterNotFound,
|
||||||
|
self.dbapi.destroy_cluster,
|
||||||
|
'12345678-9999-0000-aaaa-123456789012')
|
||||||
|
|
||||||
|
def test_update_cluster(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
old_nc = cluster.node_count
|
||||||
|
new_nc = 5
|
||||||
|
self.assertNotEqual(old_nc, new_nc)
|
||||||
|
res = self.dbapi.update_cluster(cluster.id, {'node_count': new_nc})
|
||||||
|
self.assertEqual(new_nc, res.node_count)
|
||||||
|
|
||||||
|
def test_update_cluster_not_found(self):
|
||||||
|
cluster_uuid = uuidutils.generate_uuid()
|
||||||
|
self.assertRaises(exception.ClusterNotFound, self.dbapi.update_cluster,
|
||||||
|
cluster_uuid, {'node_count': 5})
|
||||||
|
|
||||||
|
def test_update_cluster_uuid(self):
|
||||||
|
cluster = utils.create_test_cluster()
|
||||||
|
self.assertRaises(exception.InvalidParameterValue,
|
||||||
|
self.dbapi.update_cluster, cluster.id,
|
||||||
|
{'uuid': ''})
|
@ -178,10 +178,10 @@ class DbClusterTemplateTestCase(base.DbTestCase):
|
|||||||
self.assertRaises(exception.ClusterTemplateNotFound,
|
self.assertRaises(exception.ClusterTemplateNotFound,
|
||||||
self.dbapi.destroy_cluster_template, 666)
|
self.dbapi.destroy_cluster_template, 666)
|
||||||
|
|
||||||
def test_destroy_cluster_template_that_referenced_by_bays(self):
|
def test_destroy_cluster_template_that_referenced_by_clusters(self):
|
||||||
ct = utils.create_test_cluster_template()
|
ct = utils.create_test_cluster_template()
|
||||||
bay = utils.create_test_bay(cluster_template_id=ct['uuid'])
|
cluster = utils.create_test_cluster(cluster_template_id=ct['uuid'])
|
||||||
self.assertEqual(ct['uuid'], bay.baymodel_id)
|
self.assertEqual(ct['uuid'], cluster.cluster_template_id)
|
||||||
self.assertRaises(exception.ClusterTemplateReferenced,
|
self.assertRaises(exception.ClusterTemplateReferenced,
|
||||||
self.dbapi.destroy_cluster_template, ct['id'])
|
self.dbapi.destroy_cluster_template, ct['id'])
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ def get_test_cluster_template(**kw):
|
|||||||
'project_id': kw.get('project_id', 'fake_project'),
|
'project_id': kw.get('project_id', 'fake_project'),
|
||||||
'user_id': kw.get('user_id', 'fake_user'),
|
'user_id': kw.get('user_id', 'fake_user'),
|
||||||
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
|
'uuid': kw.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'),
|
||||||
'name': kw.get('name', 'baymodel1'),
|
'name': kw.get('name', 'clustermodel1'),
|
||||||
'image_id': kw.get('image_id', 'ubuntu'),
|
'image_id': kw.get('image_id', 'ubuntu'),
|
||||||
'flavor_id': kw.get('flavor_id', 'm1.small'),
|
'flavor_id': kw.get('flavor_id', 'm1.small'),
|
||||||
'master_flavor_id': kw.get('master_flavor_id', 'm1.small'),
|
'master_flavor_id': kw.get('master_flavor_id', 'm1.small'),
|
||||||
@ -73,22 +73,22 @@ def create_test_cluster_template(**kw):
|
|||||||
return dbapi.create_cluster_template(cluster_template)
|
return dbapi.create_cluster_template(cluster_template)
|
||||||
|
|
||||||
|
|
||||||
def get_test_bay(**kw):
|
def get_test_cluster(**kw):
|
||||||
attrs = {
|
attrs = {
|
||||||
'id': kw.get('id', 42),
|
'id': kw.get('id', 42),
|
||||||
'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
|
'uuid': kw.get('uuid', '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'),
|
||||||
'name': kw.get('name', 'bay1'),
|
'name': kw.get('name', 'cluster1'),
|
||||||
'discovery_url': kw.get('discovery_url', None),
|
'discovery_url': kw.get('discovery_url', None),
|
||||||
'ca_cert_ref': kw.get('ca_cert_ref', None),
|
'ca_cert_ref': kw.get('ca_cert_ref', None),
|
||||||
'magnum_cert_ref': kw.get('magnum_cert_ref', None),
|
'magnum_cert_ref': kw.get('magnum_cert_ref', None),
|
||||||
'project_id': kw.get('project_id', 'fake_project'),
|
'project_id': kw.get('project_id', 'fake_project'),
|
||||||
'user_id': kw.get('user_id', 'fake_user'),
|
'user_id': kw.get('user_id', 'fake_user'),
|
||||||
'baymodel_id': kw.get('baymodel_id',
|
'cluster_template_id': kw.get('cluster_template_id',
|
||||||
'e74c40e0-d825-11e2-a28f-0800200c9a66'),
|
'e74c40e0-d825-11e2-a28f-0800200c9a66'),
|
||||||
'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'),
|
'stack_id': kw.get('stack_id', '047c6319-7abd-4bd9-a033-8c6af0173cd0'),
|
||||||
'status': kw.get('status', 'CREATE_IN_PROGRESS'),
|
'status': kw.get('status', 'CREATE_IN_PROGRESS'),
|
||||||
'status_reason': kw.get('status_reason', 'Completed successfully'),
|
'status_reason': kw.get('status_reason', 'Completed successfully'),
|
||||||
'bay_create_timeout': kw.get('bay_create_timeout', 60),
|
'create_timeout': kw.get('create_timeout', 60),
|
||||||
'api_address': kw.get('api_address', '172.17.2.3'),
|
'api_address': kw.get('api_address', '172.17.2.3'),
|
||||||
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
|
'node_addresses': kw.get('node_addresses', ['172.17.2.4']),
|
||||||
'node_count': kw.get('node_count', 3),
|
'node_count': kw.get('node_count', 3),
|
||||||
@ -107,19 +107,19 @@ def get_test_bay(**kw):
|
|||||||
return attrs
|
return attrs
|
||||||
|
|
||||||
|
|
||||||
def create_test_bay(**kw):
|
def create_test_cluster(**kw):
|
||||||
"""Create test bay entry in DB and return Bay DB object.
|
"""Create test cluster entry in DB and return Cluster DB object.
|
||||||
|
|
||||||
Function to be used to create test Bay objects in the database.
|
Function to be used to create test Cluster objects in the database.
|
||||||
:param kw: kwargs with overriding values for bay's attributes.
|
:param kw: kwargs with overriding values for cluster's attributes.
|
||||||
:returns: Test Bay DB object.
|
:returns: Test Cluster DB object.
|
||||||
"""
|
"""
|
||||||
bay = get_test_bay(**kw)
|
cluster = get_test_cluster(**kw)
|
||||||
# Let DB generate ID if it isn't specified explicitly
|
# Let DB generate ID if it isn't specified explicitly
|
||||||
if 'id' not in kw:
|
if 'id' not in kw:
|
||||||
del bay['id']
|
del cluster['id']
|
||||||
dbapi = db_api.get_instance()
|
dbapi = db_api.get_instance()
|
||||||
return dbapi.create_bay(bay)
|
return dbapi.create_cluster(cluster)
|
||||||
|
|
||||||
|
|
||||||
def get_test_x509keypair(**kw):
|
def get_test_x509keypair(**kw):
|
||||||
|
@ -104,7 +104,7 @@ class TemplateDefinitionTestCase(base.TestCase):
|
|||||||
mesos_tdef.UbuntuMesosTemplateDefinition)
|
mesos_tdef.UbuntuMesosTemplateDefinition)
|
||||||
|
|
||||||
def test_get_definition_not_supported(self):
|
def test_get_definition_not_supported(self):
|
||||||
self.assertRaises(exception.BayTypeNotSupported,
|
self.assertRaises(exception.ClusterTypeNotSupported,
|
||||||
cmn_tdef.TemplateDefinition.get_template_definition,
|
cmn_tdef.TemplateDefinition.get_template_definition,
|
||||||
'vm', 'not_supported', 'kubernetes')
|
'vm', 'not_supported', 'kubernetes')
|
||||||
|
|
||||||
@ -112,18 +112,18 @@ class TemplateDefinitionTestCase(base.TestCase):
|
|||||||
cfg.CONF.set_override('enabled_definitions',
|
cfg.CONF.set_override('enabled_definitions',
|
||||||
['magnum_vm_atomic_k8s'],
|
['magnum_vm_atomic_k8s'],
|
||||||
group='cluster')
|
group='cluster')
|
||||||
self.assertRaises(exception.BayTypeNotEnabled,
|
self.assertRaises(exception.ClusterTypeNotEnabled,
|
||||||
cmn_tdef.TemplateDefinition.get_template_definition,
|
cmn_tdef.TemplateDefinition.get_template_definition,
|
||||||
'vm', 'coreos', 'kubernetes')
|
'vm', 'coreos', 'kubernetes')
|
||||||
|
|
||||||
def test_required_param_not_set(self):
|
def test_required_param_not_set(self):
|
||||||
param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test',
|
param = cmn_tdef.ParameterMapping('test', cluster_template_attr='test',
|
||||||
required=True)
|
required=True)
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.test = None
|
mock_cluster_template.test = None
|
||||||
|
|
||||||
self.assertRaises(exception.RequiredParameterNotProvided,
|
self.assertRaises(exception.RequiredParameterNotProvided,
|
||||||
param.set_param, {}, mock_baymodel, None)
|
param.set_param, {}, mock_cluster_template, None)
|
||||||
|
|
||||||
def test_output_mapping(self):
|
def test_output_mapping(self):
|
||||||
heat_outputs = [
|
heat_outputs = [
|
||||||
@ -191,7 +191,7 @@ class BaseTemplateDefinitionTestCase(base.TestCase):
|
|||||||
floating_ip_enabled=True,
|
floating_ip_enabled=True,
|
||||||
public_ip_output_key='kube_masters',
|
public_ip_output_key='kube_masters',
|
||||||
private_ip_output_key='kube_masters_private',
|
private_ip_output_key='kube_masters_private',
|
||||||
bay_attr='master_addresses',
|
cluster_attr='master_addresses',
|
||||||
):
|
):
|
||||||
|
|
||||||
definition = self.get_definition()
|
definition = self.get_definition()
|
||||||
@ -211,13 +211,14 @@ class BaseTemplateDefinitionTestCase(base.TestCase):
|
|||||||
]
|
]
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
mock_stack.to_dict.return_value = {'outputs': outputs}
|
mock_stack.to_dict.return_value = {'outputs': outputs}
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.floating_ip_enabled = floating_ip_enabled
|
mock_cluster_template.floating_ip_enabled = floating_ip_enabled
|
||||||
|
|
||||||
definition.update_outputs(mock_stack, mock_baymodel, mock_bay)
|
definition.update_outputs(mock_stack, mock_cluster_template,
|
||||||
|
mock_cluster)
|
||||||
|
|
||||||
self.assertEqual(expected_address, getattr(mock_bay, bay_attr))
|
self.assertEqual(expected_address, getattr(mock_cluster, cluster_attr))
|
||||||
|
|
||||||
|
|
||||||
class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
||||||
@ -240,12 +241,12 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_get_discovery_url, mock_osc_class):
|
mock_get_discovery_url, mock_osc_class):
|
||||||
mock_context = mock.MagicMock()
|
mock_context = mock.MagicMock()
|
||||||
mock_context.auth_token = 'AUTH_TOKEN'
|
mock_context.auth_token = 'AUTH_TOKEN'
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = False
|
mock_cluster_template.tls_disabled = False
|
||||||
mock_baymodel.registry_enabled = False
|
mock_cluster_template.registry_enabled = False
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
||||||
del mock_bay.stack_id
|
del mock_cluster.stack_id
|
||||||
mock_scale_manager = mock.MagicMock()
|
mock_scale_manager = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
||||||
@ -260,13 +261,14 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_context.user_name = 'fake_user'
|
mock_context.user_name = 'fake_user'
|
||||||
mock_context.tenant = 'fake_tenant'
|
mock_context.tenant = 'fake_tenant'
|
||||||
|
|
||||||
flannel_cidr = mock_baymodel.labels.get('flannel_network_cidr')
|
flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr')
|
||||||
flannel_subnet = mock_baymodel.labels.get('flannel_network_subnetlen')
|
flannel_subnet = mock_cluster_template.labels.get(
|
||||||
flannel_backend = mock_baymodel.labels.get('flannel_backend')
|
'flannel_network_subnetlen')
|
||||||
|
flannel_backend = mock_cluster_template.labels.get('flannel_backend')
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
|
|
||||||
k8s_def.get_params(mock_context, mock_baymodel, mock_bay,
|
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster,
|
||||||
scale_manager=mock_scale_manager)
|
scale_manager=mock_scale_manager)
|
||||||
|
|
||||||
expected_kwargs = {'extra_params': {
|
expected_kwargs = {'extra_params': {
|
||||||
@ -279,8 +281,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
'tenant_name': 'fake_tenant',
|
'tenant_name': 'fake_tenant',
|
||||||
'magnum_url': mock_osc.magnum_url.return_value,
|
'magnum_url': mock_osc.magnum_url.return_value,
|
||||||
'region_name': mock_osc.cinder_region_name.return_value}}
|
'region_name': mock_osc.cinder_region_name.return_value}}
|
||||||
mock_get_params.assert_called_once_with(mock_context, mock_baymodel,
|
mock_get_params.assert_called_once_with(mock_context,
|
||||||
mock_bay, **expected_kwargs)
|
mock_cluster_template,
|
||||||
|
mock_cluster,
|
||||||
|
**expected_kwargs)
|
||||||
|
|
||||||
@mock.patch('magnum.common.clients.OpenStackClients')
|
@mock.patch('magnum.common.clients.OpenStackClients')
|
||||||
@mock.patch('magnum.drivers.common.template_def'
|
@mock.patch('magnum.drivers.common.template_def'
|
||||||
@ -293,12 +297,12 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_get_discovery_url, mock_osc_class):
|
mock_get_discovery_url, mock_osc_class):
|
||||||
mock_context = mock.MagicMock()
|
mock_context = mock.MagicMock()
|
||||||
mock_context.auth_token = 'AUTH_TOKEN'
|
mock_context.auth_token = 'AUTH_TOKEN'
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = True
|
mock_cluster_template.tls_disabled = True
|
||||||
mock_baymodel.registry_enabled = False
|
mock_cluster_template.registry_enabled = False
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
||||||
del mock_bay.stack_id
|
del mock_cluster.stack_id
|
||||||
mock_scale_manager = mock.MagicMock()
|
mock_scale_manager = mock.MagicMock()
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
||||||
@ -313,13 +317,14 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_context.user_name = 'fake_user'
|
mock_context.user_name = 'fake_user'
|
||||||
mock_context.tenant = 'fake_tenant'
|
mock_context.tenant = 'fake_tenant'
|
||||||
|
|
||||||
flannel_cidr = mock_baymodel.labels.get('flannel_network_cidr')
|
flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr')
|
||||||
flannel_subnet = mock_baymodel.labels.get('flannel_network_subnetlen')
|
flannel_subnet = mock_cluster_template.labels.get(
|
||||||
flannel_backend = mock_baymodel.labels.get('flannel_backend')
|
'flannel_network_subnetlen')
|
||||||
|
flannel_backend = mock_cluster_template.labels.get('flannel_backend')
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
|
|
||||||
k8s_def.get_params(mock_context, mock_baymodel, mock_bay,
|
k8s_def.get_params(mock_context, mock_cluster_template, mock_cluster,
|
||||||
scale_manager=mock_scale_manager)
|
scale_manager=mock_scale_manager)
|
||||||
|
|
||||||
expected_kwargs = {'extra_params': {
|
expected_kwargs = {'extra_params': {
|
||||||
@ -334,8 +339,10 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
'region_name': mock_osc.cinder_region_name.return_value,
|
'region_name': mock_osc.cinder_region_name.return_value,
|
||||||
'loadbalancing_protocol': 'HTTP',
|
'loadbalancing_protocol': 'HTTP',
|
||||||
'kubernetes_port': 8080}}
|
'kubernetes_port': 8080}}
|
||||||
mock_get_params.assert_called_once_with(mock_context, mock_baymodel,
|
mock_get_params.assert_called_once_with(mock_context,
|
||||||
mock_bay, **expected_kwargs)
|
mock_cluster_template,
|
||||||
|
mock_cluster,
|
||||||
|
**expected_kwargs)
|
||||||
|
|
||||||
@mock.patch('requests.get')
|
@mock.patch('requests.get')
|
||||||
def test_k8s_validate_discovery_url(self, mock_get):
|
def test_k8s_validate_discovery_url(self, mock_get):
|
||||||
@ -364,7 +371,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
self.assertRaises(exception.InvalidBayDiscoveryURL,
|
self.assertRaises(exception.InvalidClusterDiscoveryURL,
|
||||||
k8s_def.validate_discovery_url,
|
k8s_def.validate_discovery_url,
|
||||||
'http://etcd/test', 1)
|
'http://etcd/test', 1)
|
||||||
|
|
||||||
@ -390,15 +397,15 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_discovery_url
|
mock_resp.text = expected_discovery_url
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.master_count = 10
|
mock_cluster.master_count = 10
|
||||||
mock_bay.discovery_url = None
|
mock_cluster.discovery_url = None
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
discovery_url = k8s_def.get_discovery_url(mock_bay)
|
discovery_url = k8s_def.get_discovery_url(mock_cluster)
|
||||||
|
|
||||||
mock_get.assert_called_once_with('http://etcd/test?size=10')
|
mock_get.assert_called_once_with('http://etcd/test?size=10')
|
||||||
self.assertEqual(expected_discovery_url, mock_bay.discovery_url)
|
self.assertEqual(expected_discovery_url, mock_cluster.discovery_url)
|
||||||
self.assertEqual(expected_discovery_url, discovery_url)
|
self.assertEqual(expected_discovery_url, discovery_url)
|
||||||
|
|
||||||
@mock.patch('requests.get')
|
@mock.patch('requests.get')
|
||||||
@ -407,19 +414,19 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
'http://etcd/test?size=%(size)d',
|
'http://etcd/test?size=%(size)d',
|
||||||
group='cluster')
|
group='cluster')
|
||||||
mock_get.side_effect = req_exceptions.RequestException()
|
mock_get.side_effect = req_exceptions.RequestException()
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.master_count = 10
|
mock_cluster.master_count = 10
|
||||||
mock_bay.discovery_url = None
|
mock_cluster.discovery_url = None
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
|
|
||||||
self.assertRaises(exception.GetDiscoveryUrlFailed,
|
self.assertRaises(exception.GetDiscoveryUrlFailed,
|
||||||
k8s_def.get_discovery_url, mock_bay)
|
k8s_def.get_discovery_url, mock_cluster)
|
||||||
|
|
||||||
def test_k8s_get_heat_param(self):
|
def test_k8s_get_heat_param(self):
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
|
|
||||||
heat_param = k8s_def.get_heat_param(bay_attr='node_count')
|
heat_param = k8s_def.get_heat_param(cluster_attr='node_count')
|
||||||
self.assertEqual('number_of_minions', heat_param)
|
self.assertEqual('number_of_minions', heat_param)
|
||||||
|
|
||||||
@mock.patch('requests.get')
|
@mock.patch('requests.get')
|
||||||
@ -428,13 +435,13 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
mock_resp.text = ''
|
mock_resp.text = ''
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
|
|
||||||
fake_bay = mock.MagicMock()
|
fake_cluster = mock.MagicMock()
|
||||||
fake_bay.discovery_url = None
|
fake_cluster.discovery_url = None
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InvalidDiscoveryURL,
|
exception.InvalidDiscoveryURL,
|
||||||
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
|
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
|
||||||
fake_bay)
|
fake_cluster)
|
||||||
|
|
||||||
def _test_update_outputs_api_address(self, coe, params, tls=True):
|
def _test_update_outputs_api_address(self, coe, params, tls=True):
|
||||||
|
|
||||||
@ -451,13 +458,14 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
]
|
]
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
mock_stack.to_dict.return_value = {'outputs': outputs}
|
mock_stack.to_dict.return_value = {'outputs': outputs}
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = tls
|
mock_cluster_template.tls_disabled = tls
|
||||||
|
|
||||||
definition.update_outputs(mock_stack, mock_baymodel, mock_bay)
|
definition.update_outputs(mock_stack, mock_cluster_template,
|
||||||
|
mock_cluster)
|
||||||
|
|
||||||
self.assertEqual(expected_api_address, mock_bay.api_address)
|
self.assertEqual(expected_api_address, mock_cluster.api_address)
|
||||||
|
|
||||||
def test_update_k8s_outputs_api_address(self):
|
def test_update_k8s_outputs_api_address(self):
|
||||||
address = 'updated_address'
|
address = 'updated_address'
|
||||||
@ -481,7 +489,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
}
|
}
|
||||||
self._test_update_outputs_api_address('swarm', params)
|
self._test_update_outputs_api_address('swarm', params)
|
||||||
|
|
||||||
def test_update_k8s_outputs_if_baymodel_is_secure(self):
|
def test_update_k8s_outputs_if_cluster_template_is_secure(self):
|
||||||
address = 'updated_address'
|
address = 'updated_address'
|
||||||
protocol = 'https'
|
protocol = 'https'
|
||||||
port = '6443'
|
port = '6443'
|
||||||
@ -492,7 +500,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
}
|
}
|
||||||
self._test_update_outputs_api_address('kubernetes', params, tls=False)
|
self._test_update_outputs_api_address('kubernetes', params, tls=False)
|
||||||
|
|
||||||
def test_update_swarm_outputs_if_baymodel_is_secure(self):
|
def test_update_swarm_outputs_if_cluster_template_is_secure(self):
|
||||||
address = 'updated_address'
|
address = 'updated_address'
|
||||||
protocol = 'tcp'
|
protocol = 'tcp'
|
||||||
port = '2376'
|
port = '2376'
|
||||||
@ -517,14 +525,15 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
]
|
]
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
mock_stack.to_dict.return_value = {'outputs': outputs}
|
mock_stack.to_dict.return_value = {'outputs': outputs}
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.api_address = 'none_api_address'
|
mock_cluster.api_address = 'none_api_address'
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = tls
|
mock_cluster_template.tls_disabled = tls
|
||||||
|
|
||||||
definition.update_outputs(mock_stack, mock_baymodel, mock_bay)
|
definition.update_outputs(mock_stack, mock_cluster_template,
|
||||||
|
mock_cluster)
|
||||||
|
|
||||||
self.assertEqual('none_api_address', mock_bay.api_address)
|
self.assertEqual('none_api_address', mock_cluster.api_address)
|
||||||
|
|
||||||
def test_update_k8s_outputs_none_api_address(self):
|
def test_update_k8s_outputs_none_api_address(self):
|
||||||
protocol = 'http'
|
protocol = 'http'
|
||||||
@ -550,14 +559,14 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
self._test_update_outputs_server_addrtess(
|
self._test_update_outputs_server_addrtess(
|
||||||
public_ip_output_key='kube_masters',
|
public_ip_output_key='kube_masters',
|
||||||
private_ip_output_key='kube_masters_private',
|
private_ip_output_key='kube_masters_private',
|
||||||
bay_attr='master_addresses',
|
cluster_attr='master_addresses',
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_update_outputs_node_address(self):
|
def test_update_outputs_node_address(self):
|
||||||
self._test_update_outputs_server_addrtess(
|
self._test_update_outputs_server_addrtess(
|
||||||
public_ip_output_key='kube_minions',
|
public_ip_output_key='kube_minions',
|
||||||
private_ip_output_key='kube_minions_private',
|
private_ip_output_key='kube_minions_private',
|
||||||
bay_attr='node_addresses',
|
cluster_attr='node_addresses',
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_update_outputs_master_address_fip_disabled(self):
|
def test_update_outputs_master_address_fip_disabled(self):
|
||||||
@ -565,7 +574,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
floating_ip_enabled=False,
|
floating_ip_enabled=False,
|
||||||
public_ip_output_key='kube_masters',
|
public_ip_output_key='kube_masters',
|
||||||
private_ip_output_key='kube_masters_private',
|
private_ip_output_key='kube_masters_private',
|
||||||
bay_attr='master_addresses',
|
cluster_attr='master_addresses',
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_update_outputs_node_address_fip_disabled(self):
|
def test_update_outputs_node_address_fip_disabled(self):
|
||||||
@ -573,7 +582,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
|||||||
floating_ip_enabled=False,
|
floating_ip_enabled=False,
|
||||||
public_ip_output_key='kube_minions',
|
public_ip_output_key='kube_minions',
|
||||||
private_ip_output_key='kube_minions_private',
|
private_ip_output_key='kube_minions_private',
|
||||||
bay_attr='node_addresses',
|
cluster_attr='node_addresses',
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -586,11 +595,12 @@ class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
|
|||||||
'kubernetes'
|
'kubernetes'
|
||||||
)
|
)
|
||||||
|
|
||||||
def assert_neutron_find(self, mock_neutron_v20_find, osc, baymodel):
|
def assert_neutron_find(self, mock_neutron_v20_find,
|
||||||
|
osc, cluster_template):
|
||||||
mock_neutron_v20_find.assert_called_once_with(
|
mock_neutron_v20_find.assert_called_once_with(
|
||||||
osc.neutron(),
|
osc.neutron(),
|
||||||
'subnet',
|
'subnet',
|
||||||
baymodel.fixed_subnet
|
cluster_template.fixed_subnet
|
||||||
)
|
)
|
||||||
|
|
||||||
def assert_raises_from_get_fixed_network_id(
|
def assert_raises_from_get_fixed_network_id(
|
||||||
@ -601,14 +611,14 @@ class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
|
|||||||
):
|
):
|
||||||
definition = self.get_definition()
|
definition = self.get_definition()
|
||||||
osc = mock.MagicMock()
|
osc = mock.MagicMock()
|
||||||
baymodel = mock.MagicMock()
|
cluster_template = mock.MagicMock()
|
||||||
mock_neutron_v20_find.side_effect = exeption_from_neutron_client
|
mock_neutron_v20_find.side_effect = exeption_from_neutron_client
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
expected_exception_class,
|
expected_exception_class,
|
||||||
definition.get_fixed_network_id,
|
definition.get_fixed_network_id,
|
||||||
osc,
|
osc,
|
||||||
baymodel
|
cluster_template
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
||||||
@ -616,7 +626,7 @@ class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
|
|||||||
expected_network_id = 'expected_network_id'
|
expected_network_id = 'expected_network_id'
|
||||||
|
|
||||||
osc = mock.MagicMock()
|
osc = mock.MagicMock()
|
||||||
baymodel = mock.MagicMock()
|
cluster_template = mock.MagicMock()
|
||||||
definition = self.get_definition()
|
definition = self.get_definition()
|
||||||
mock_neutron_v20_find.return_value = {
|
mock_neutron_v20_find.return_value = {
|
||||||
'ip_version': 4,
|
'ip_version': 4,
|
||||||
@ -625,15 +635,15 @@ class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
expected_network_id,
|
expected_network_id,
|
||||||
definition.get_fixed_network_id(osc, baymodel)
|
definition.get_fixed_network_id(osc, cluster_template)
|
||||||
)
|
)
|
||||||
self.assert_neutron_find(mock_neutron_v20_find, osc, baymodel)
|
self.assert_neutron_find(mock_neutron_v20_find, osc, cluster_template)
|
||||||
|
|
||||||
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
||||||
def test_get_fixed_network_id_with_invalid_ip_ver(self,
|
def test_get_fixed_network_id_with_invalid_ip_ver(self,
|
||||||
mock_neutron_v20_find):
|
mock_neutron_v20_find):
|
||||||
osc = mock.MagicMock()
|
osc = mock.MagicMock()
|
||||||
baymodel = mock.MagicMock()
|
cluster_template = mock.MagicMock()
|
||||||
definition = self.get_definition()
|
definition = self.get_definition()
|
||||||
mock_neutron_v20_find.return_value = {
|
mock_neutron_v20_find.return_value = {
|
||||||
'ip_version': 6,
|
'ip_version': 6,
|
||||||
@ -644,7 +654,7 @@ class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase):
|
|||||||
exception.InvalidSubnet,
|
exception.InvalidSubnet,
|
||||||
definition.get_fixed_network_id,
|
definition.get_fixed_network_id,
|
||||||
osc,
|
osc,
|
||||||
baymodel
|
cluster_template
|
||||||
)
|
)
|
||||||
|
|
||||||
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
@mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id')
|
||||||
@ -697,12 +707,12 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_get_discovery_url, mock_osc_class):
|
mock_get_discovery_url, mock_osc_class):
|
||||||
mock_context = mock.MagicMock()
|
mock_context = mock.MagicMock()
|
||||||
mock_context.auth_token = 'AUTH_TOKEN'
|
mock_context.auth_token = 'AUTH_TOKEN'
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = False
|
mock_cluster_template.tls_disabled = False
|
||||||
mock_baymodel.registry_enabled = False
|
mock_cluster_template.registry_enabled = False
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
||||||
del mock_bay.stack_id
|
del mock_cluster.stack_id
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
mock_osc.magnum_url.return_value = 'http://127.0.0.1:9511/v1'
|
||||||
mock_osc_class.return_value = mock_osc
|
mock_osc_class.return_value = mock_osc
|
||||||
@ -713,14 +723,15 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_context.user_name = 'fake_user'
|
mock_context.user_name = 'fake_user'
|
||||||
mock_context.tenant = 'fake_tenant'
|
mock_context.tenant = 'fake_tenant'
|
||||||
|
|
||||||
flannel_cidr = mock_baymodel.labels.get('flannel_network_cidr')
|
flannel_cidr = mock_cluster_template.labels.get('flannel_network_cidr')
|
||||||
flannel_subnet = mock_baymodel.labels.get('flannel_network_subnetlen')
|
flannel_subnet = mock_cluster_template.labels.get(
|
||||||
flannel_backend = mock_baymodel.labels.get('flannel_backend')
|
'flannel_network_subnetlen')
|
||||||
rexray_preempt = mock_baymodel.labels.get('rexray_preempt')
|
flannel_backend = mock_cluster_template.labels.get('flannel_backend')
|
||||||
|
rexray_preempt = mock_cluster_template.labels.get('rexray_preempt')
|
||||||
|
|
||||||
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
||||||
|
|
||||||
swarm_def.get_params(mock_context, mock_baymodel, mock_bay)
|
swarm_def.get_params(mock_context, mock_cluster_template, mock_cluster)
|
||||||
|
|
||||||
expected_kwargs = {'extra_params': {
|
expected_kwargs = {'extra_params': {
|
||||||
'discovery_url': 'fake_discovery_url',
|
'discovery_url': 'fake_discovery_url',
|
||||||
@ -730,8 +741,10 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
'flannel_network_subnetlen': flannel_subnet,
|
'flannel_network_subnetlen': flannel_subnet,
|
||||||
'auth_url': 'http://192.168.10.10:5000/v3',
|
'auth_url': 'http://192.168.10.10:5000/v3',
|
||||||
'rexray_preempt': rexray_preempt}}
|
'rexray_preempt': rexray_preempt}}
|
||||||
mock_get_params.assert_called_once_with(mock_context, mock_baymodel,
|
mock_get_params.assert_called_once_with(mock_context,
|
||||||
mock_bay, **expected_kwargs)
|
mock_cluster_template,
|
||||||
|
mock_cluster,
|
||||||
|
**expected_kwargs)
|
||||||
|
|
||||||
@mock.patch('requests.get')
|
@mock.patch('requests.get')
|
||||||
def test_swarm_validate_discovery_url(self, mock_get):
|
def test_swarm_validate_discovery_url(self, mock_get):
|
||||||
@ -760,7 +773,7 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
|
|
||||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||||
self.assertRaises(exception.InvalidBayDiscoveryURL,
|
self.assertRaises(exception.InvalidClusterDiscoveryURL,
|
||||||
k8s_def.validate_discovery_url,
|
k8s_def.validate_discovery_url,
|
||||||
'http://etcd/test', 1)
|
'http://etcd/test', 1)
|
||||||
|
|
||||||
@ -786,14 +799,14 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_resp = mock.MagicMock()
|
mock_resp = mock.MagicMock()
|
||||||
mock_resp.text = expected_discovery_url
|
mock_resp.text = expected_discovery_url
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.discovery_url = None
|
mock_cluster.discovery_url = None
|
||||||
|
|
||||||
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
||||||
discovery_url = swarm_def.get_discovery_url(mock_bay)
|
discovery_url = swarm_def.get_discovery_url(mock_cluster)
|
||||||
|
|
||||||
mock_get.assert_called_once_with('http://etcd/test?size=1')
|
mock_get.assert_called_once_with('http://etcd/test?size=1')
|
||||||
self.assertEqual(mock_bay.discovery_url, expected_discovery_url)
|
self.assertEqual(mock_cluster.discovery_url, expected_discovery_url)
|
||||||
self.assertEqual(discovery_url, expected_discovery_url)
|
self.assertEqual(discovery_url, expected_discovery_url)
|
||||||
|
|
||||||
@mock.patch('requests.get')
|
@mock.patch('requests.get')
|
||||||
@ -802,18 +815,18 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_resp.text = ''
|
mock_resp.text = ''
|
||||||
mock_get.return_value = mock_resp
|
mock_get.return_value = mock_resp
|
||||||
|
|
||||||
fake_bay = mock.MagicMock()
|
fake_cluster = mock.MagicMock()
|
||||||
fake_bay.discovery_url = None
|
fake_cluster.discovery_url = None
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InvalidDiscoveryURL,
|
exception.InvalidDiscoveryURL,
|
||||||
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
|
k8sa_tdef.AtomicK8sTemplateDefinition().get_discovery_url,
|
||||||
fake_bay)
|
fake_cluster)
|
||||||
|
|
||||||
def test_swarm_get_heat_param(self):
|
def test_swarm_get_heat_param(self):
|
||||||
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
swarm_def = swarm_tdef.AtomicSwarmTemplateDefinition()
|
||||||
|
|
||||||
heat_param = swarm_def.get_heat_param(bay_attr='node_count')
|
heat_param = swarm_def.get_heat_param(cluster_attr='node_count')
|
||||||
self.assertEqual('number_of_nodes', heat_param)
|
self.assertEqual('number_of_nodes', heat_param)
|
||||||
|
|
||||||
def test_update_outputs(self):
|
def test_update_outputs(self):
|
||||||
@ -841,13 +854,14 @@ class AtomicSwarmTemplateDefinitionTestCase(base.TestCase):
|
|||||||
]
|
]
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
mock_stack.to_dict.return_value = {'outputs': outputs}
|
mock_stack.to_dict.return_value = {'outputs': outputs}
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
|
|
||||||
swarm_def.update_outputs(mock_stack, mock_baymodel, mock_bay)
|
swarm_def.update_outputs(mock_stack, mock_cluster_template,
|
||||||
|
mock_cluster)
|
||||||
expected_api_address = "tcp://%s:2376" % expected_api_address
|
expected_api_address = "tcp://%s:2376" % expected_api_address
|
||||||
self.assertEqual(expected_api_address, mock_bay.api_address)
|
self.assertEqual(expected_api_address, mock_cluster.api_address)
|
||||||
self.assertEqual(expected_node_addresses, mock_bay.node_addresses)
|
self.assertEqual(expected_node_addresses, mock_cluster.node_addresses)
|
||||||
|
|
||||||
|
|
||||||
class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
||||||
@ -864,19 +878,20 @@ class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mock_context.user_name = 'mesos_user'
|
mock_context.user_name = 'mesos_user'
|
||||||
mock_context.tenant = 'admin'
|
mock_context.tenant = 'admin'
|
||||||
mock_context.domain_name = 'domainname'
|
mock_context.domain_name = 'domainname'
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
mock_baymodel.tls_disabled = False
|
mock_cluster_template.tls_disabled = False
|
||||||
rexray_preempt = mock_baymodel.labels.get('rexray_preempt')
|
rexray_preempt = mock_cluster_template.labels.get('rexray_preempt')
|
||||||
mesos_slave_isolation = mock_baymodel.labels.get(
|
mesos_slave_isolation = mock_cluster_template.labels.get(
|
||||||
'mesos_slave_isolation')
|
'mesos_slave_isolation')
|
||||||
mesos_slave_work_dir = mock_baymodel.labels.get('mesos_slave_work_dir')
|
mesos_slave_work_dir = mock_cluster_template.labels.get(
|
||||||
mesos_slave_image_providers = mock_baymodel.labels.get(
|
'mesos_slave_work_dir')
|
||||||
|
mesos_slave_image_providers = mock_cluster_template.labels.get(
|
||||||
'image_providers')
|
'image_providers')
|
||||||
mesos_slave_executor_env_variables = mock_baymodel.labels.get(
|
mesos_slave_executor_env_variables = mock_cluster_template.labels.get(
|
||||||
'mesos_slave_executor_env_variables')
|
'mesos_slave_executor_env_variables')
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_bay.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
mock_cluster.uuid = '5d12f6fd-a196-4bf0-ae4c-1f639a523a52'
|
||||||
del mock_bay.stack_id
|
del mock_cluster.stack_id
|
||||||
mock_osc = mock.MagicMock()
|
mock_osc = mock.MagicMock()
|
||||||
mock_osc.cinder_region_name.return_value = 'RegionOne'
|
mock_osc.cinder_region_name.return_value = 'RegionOne'
|
||||||
mock_osc_class.return_value = mock_osc
|
mock_osc_class.return_value = mock_osc
|
||||||
@ -887,7 +902,7 @@ class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
|||||||
|
|
||||||
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
|
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
|
||||||
|
|
||||||
mesos_def.get_params(mock_context, mock_baymodel, mock_bay,
|
mesos_def.get_params(mock_context, mock_cluster_template, mock_cluster,
|
||||||
scale_manager=mock_scale_manager)
|
scale_manager=mock_scale_manager)
|
||||||
|
|
||||||
expected_kwargs = {'extra_params': {
|
expected_kwargs = {'extra_params': {
|
||||||
@ -903,16 +918,18 @@ class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
|||||||
mesos_slave_executor_env_variables,
|
mesos_slave_executor_env_variables,
|
||||||
'mesos_slave_image_providers': mesos_slave_image_providers,
|
'mesos_slave_image_providers': mesos_slave_image_providers,
|
||||||
'slaves_to_remove': removal_nodes}}
|
'slaves_to_remove': removal_nodes}}
|
||||||
mock_get_params.assert_called_once_with(mock_context, mock_baymodel,
|
mock_get_params.assert_called_once_with(mock_context,
|
||||||
mock_bay, **expected_kwargs)
|
mock_cluster_template,
|
||||||
|
mock_cluster,
|
||||||
|
**expected_kwargs)
|
||||||
|
|
||||||
def test_mesos_get_heat_param(self):
|
def test_mesos_get_heat_param(self):
|
||||||
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
|
mesos_def = mesos_tdef.UbuntuMesosTemplateDefinition()
|
||||||
|
|
||||||
heat_param = mesos_def.get_heat_param(bay_attr='node_count')
|
heat_param = mesos_def.get_heat_param(cluster_attr='node_count')
|
||||||
self.assertEqual('number_of_slaves', heat_param)
|
self.assertEqual('number_of_slaves', heat_param)
|
||||||
|
|
||||||
heat_param = mesos_def.get_heat_param(bay_attr='master_count')
|
heat_param = mesos_def.get_heat_param(cluster_attr='master_count')
|
||||||
self.assertEqual('number_of_masters', heat_param)
|
self.assertEqual('number_of_masters', heat_param)
|
||||||
|
|
||||||
def test_update_outputs(self):
|
def test_update_outputs(self):
|
||||||
@ -941,11 +958,13 @@ class UbuntuMesosTemplateDefinitionTestCase(base.TestCase):
|
|||||||
]
|
]
|
||||||
mock_stack = mock.MagicMock()
|
mock_stack = mock.MagicMock()
|
||||||
mock_stack.to_dict.return_value = {'outputs': outputs}
|
mock_stack.to_dict.return_value = {'outputs': outputs}
|
||||||
mock_bay = mock.MagicMock()
|
mock_cluster = mock.MagicMock()
|
||||||
mock_baymodel = mock.MagicMock()
|
mock_cluster_template = mock.MagicMock()
|
||||||
|
|
||||||
mesos_def.update_outputs(mock_stack, mock_baymodel, mock_bay)
|
mesos_def.update_outputs(mock_stack, mock_cluster_template,
|
||||||
|
mock_cluster)
|
||||||
|
|
||||||
self.assertEqual(expected_api_address, mock_bay.api_address)
|
self.assertEqual(expected_api_address, mock_cluster.api_address)
|
||||||
self.assertEqual(expected_node_addresses, mock_bay.node_addresses)
|
self.assertEqual(expected_node_addresses, mock_cluster.node_addresses)
|
||||||
self.assertEqual(expected_master_addresses, mock_bay.master_addresses)
|
self.assertEqual(expected_master_addresses,
|
||||||
|
mock_cluster.master_addresses)
|
||||||
|
@ -1,192 +0,0 @@
|
|||||||
# Copyright 2015 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
from testtools.matchers import HasLength
|
|
||||||
|
|
||||||
from magnum.common import exception
|
|
||||||
from magnum import objects
|
|
||||||
from magnum.tests.unit.db import base
|
|
||||||
from magnum.tests.unit.db import utils
|
|
||||||
|
|
||||||
|
|
||||||
class TestBayObject(base.DbTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestBayObject, self).setUp()
|
|
||||||
self.fake_bay = utils.get_test_bay()
|
|
||||||
self.fake_bay['trust_id'] = 'trust_id'
|
|
||||||
self.fake_bay['trustee_username'] = 'trustee_user'
|
|
||||||
self.fake_bay['trustee_user_id'] = 'trustee_user_id'
|
|
||||||
self.fake_bay['trustee_password'] = 'password'
|
|
||||||
self.fake_bay['coe_version'] = 'fake-coe-version'
|
|
||||||
self.fake_bay['container_version'] = 'fake-container-version'
|
|
||||||
cluster_template_id = self.fake_bay['baymodel_id']
|
|
||||||
self.fake_cluster_template = objects.ClusterTemplate(
|
|
||||||
uuid=cluster_template_id)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_get_by_id(self, mock_cluster_template_get):
|
|
||||||
bay_id = self.fake_bay['id']
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_id',
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
mock_get_bay.return_value = self.fake_bay
|
|
||||||
bay = objects.Bay.get(self.context, bay_id)
|
|
||||||
mock_get_bay.assert_called_once_with(self.context, bay_id)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
self.assertEqual(bay.baymodel_id, bay.cluster_template.uuid)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_get_by_uuid(self, mock_cluster_template_get):
|
|
||||||
uuid = self.fake_bay['uuid']
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_uuid',
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
mock_get_bay.return_value = self.fake_bay
|
|
||||||
bay = objects.Bay.get(self.context, uuid)
|
|
||||||
mock_get_bay.assert_called_once_with(self.context, uuid)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
self.assertEqual(bay.baymodel_id, bay.cluster_template.uuid)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_get_by_name(self, mock_cluster_template_get):
|
|
||||||
name = self.fake_bay['name']
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_name',
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
mock_get_bay.return_value = self.fake_bay
|
|
||||||
bay = objects.Bay.get_by_name(self.context, name)
|
|
||||||
mock_get_bay.assert_called_once_with(self.context, name)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
self.assertEqual(bay.baymodel_id, bay.cluster_template.uuid)
|
|
||||||
|
|
||||||
def test_get_bad_id_and_uuid(self):
|
|
||||||
self.assertRaises(exception.InvalidIdentity,
|
|
||||||
objects.Bay.get, self.context, 'not-a-uuid')
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_list(self, mock_cluster_template_get):
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_list',
|
|
||||||
autospec=True) as mock_get_list:
|
|
||||||
mock_get_list.return_value = [self.fake_bay]
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
bays = objects.Bay.list(self.context)
|
|
||||||
self.assertEqual(1, mock_get_list.call_count)
|
|
||||||
self.assertThat(bays, HasLength(1))
|
|
||||||
self.assertIsInstance(bays[0], objects.Bay)
|
|
||||||
self.assertEqual(self.context, bays[0]._context)
|
|
||||||
self.assertEqual(bays[0].baymodel_id,
|
|
||||||
bays[0].cluster_template.uuid)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_list_all(self, mock_cluster_template_get):
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_list',
|
|
||||||
autospec=True) as mock_get_list:
|
|
||||||
mock_get_list.return_value = [self.fake_bay]
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
self.context.all_tenants = True
|
|
||||||
bays = objects.Bay.list(self.context)
|
|
||||||
mock_get_list.assert_called_once_with(
|
|
||||||
self.context, limit=None, marker=None, filters=None,
|
|
||||||
sort_dir=None, sort_key=None)
|
|
||||||
self.assertEqual(1, mock_get_list.call_count)
|
|
||||||
self.assertThat(bays, HasLength(1))
|
|
||||||
self.assertIsInstance(bays[0], objects.Bay)
|
|
||||||
self.assertEqual(self.context, bays[0]._context)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_list_with_filters(self, mock_cluster_template_get):
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_list',
|
|
||||||
autospec=True) as mock_get_list:
|
|
||||||
mock_get_list.return_value = [self.fake_bay]
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
filters = {'name': 'bay1'}
|
|
||||||
bays = objects.Bay.list(self.context, filters=filters)
|
|
||||||
|
|
||||||
mock_get_list.assert_called_once_with(self.context, sort_key=None,
|
|
||||||
sort_dir=None,
|
|
||||||
filters=filters, limit=None,
|
|
||||||
marker=None)
|
|
||||||
self.assertEqual(1, mock_get_list.call_count)
|
|
||||||
self.assertThat(bays, HasLength(1))
|
|
||||||
self.assertIsInstance(bays[0], objects.Bay)
|
|
||||||
self.assertEqual(self.context, bays[0]._context)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_create(self, mock_cluster_template_get):
|
|
||||||
with mock.patch.object(self.dbapi, 'create_bay',
|
|
||||||
autospec=True) as mock_create_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
mock_create_bay.return_value = self.fake_bay
|
|
||||||
bay = objects.Bay(self.context, **self.fake_bay)
|
|
||||||
bay.create()
|
|
||||||
mock_create_bay.assert_called_once_with(self.fake_bay)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_destroy(self, mock_cluster_template_get):
|
|
||||||
uuid = self.fake_bay['uuid']
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_uuid',
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_get_bay.return_value = self.fake_bay
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
with mock.patch.object(self.dbapi, 'destroy_bay',
|
|
||||||
autospec=True) as mock_destroy_bay:
|
|
||||||
bay = objects.Bay.get_by_uuid(self.context, uuid)
|
|
||||||
bay.destroy()
|
|
||||||
mock_get_bay.assert_called_once_with(self.context, uuid)
|
|
||||||
mock_destroy_bay.assert_called_once_with(uuid)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_save(self, mock_cluster_template_get):
|
|
||||||
uuid = self.fake_bay['uuid']
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_uuid',
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
mock_get_bay.return_value = self.fake_bay
|
|
||||||
with mock.patch.object(self.dbapi, 'update_bay',
|
|
||||||
autospec=True) as mock_update_bay:
|
|
||||||
bay = objects.Bay.get_by_uuid(self.context, uuid)
|
|
||||||
bay.node_count = 10
|
|
||||||
bay.master_count = 5
|
|
||||||
bay.save()
|
|
||||||
|
|
||||||
mock_get_bay.assert_called_once_with(self.context, uuid)
|
|
||||||
mock_update_bay.assert_called_once_with(
|
|
||||||
uuid, {'node_count': 10, 'master_count': 5,
|
|
||||||
'cluster_template': self.fake_cluster_template})
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
|
||||||
def test_refresh(self, mock_cluster_template_get):
|
|
||||||
uuid = self.fake_bay['uuid']
|
|
||||||
new_uuid = uuidutils.generate_uuid()
|
|
||||||
returns = [dict(self.fake_bay, uuid=uuid),
|
|
||||||
dict(self.fake_bay, uuid=new_uuid)]
|
|
||||||
expected = [mock.call(self.context, uuid),
|
|
||||||
mock.call(self.context, uuid)]
|
|
||||||
with mock.patch.object(self.dbapi, 'get_bay_by_uuid',
|
|
||||||
side_effect=returns,
|
|
||||||
autospec=True) as mock_get_bay:
|
|
||||||
mock_cluster_template_get.return_value = self.fake_cluster_template
|
|
||||||
bay = objects.Bay.get_by_uuid(self.context, uuid)
|
|
||||||
self.assertEqual(uuid, bay.uuid)
|
|
||||||
bay.refresh()
|
|
||||||
self.assertEqual(new_uuid, bay.uuid)
|
|
||||||
self.assertEqual(expected, mock_get_bay.call_args_list)
|
|
||||||
self.assertEqual(self.context, bay._context)
|
|
195
magnum/tests/unit/objects/test_cluster.py
Normal file
195
magnum/tests/unit/objects/test_cluster.py
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
# Copyright 2015 OpenStack Foundation
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
from testtools.matchers import HasLength
|
||||||
|
|
||||||
|
from magnum.common import exception
|
||||||
|
from magnum import objects
|
||||||
|
from magnum.tests.unit.db import base
|
||||||
|
from magnum.tests.unit.db import utils
|
||||||
|
|
||||||
|
|
||||||
|
class TestClusterObject(base.DbTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestClusterObject, self).setUp()
|
||||||
|
self.fake_cluster = utils.get_test_cluster()
|
||||||
|
self.fake_cluster['trust_id'] = 'trust_id'
|
||||||
|
self.fake_cluster['trustee_username'] = 'trustee_user'
|
||||||
|
self.fake_cluster['trustee_user_id'] = 'trustee_user_id'
|
||||||
|
self.fake_cluster['trustee_password'] = 'password'
|
||||||
|
self.fake_cluster['coe_version'] = 'fake-coe-version'
|
||||||
|
self.fake_cluster['container_version'] = 'fake-container-version'
|
||||||
|
cluster_template_id = self.fake_cluster['cluster_template_id']
|
||||||
|
self.fake_cluster_template = objects.ClusterTemplate(
|
||||||
|
uuid=cluster_template_id)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_get_by_id(self, mock_cluster_template_get):
|
||||||
|
cluster_id = self.fake_cluster['id']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_id',
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
mock_get_cluster.return_value = self.fake_cluster
|
||||||
|
cluster = objects.Cluster.get(self.context, cluster_id)
|
||||||
|
mock_get_cluster.assert_called_once_with(self.context, cluster_id)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
self.assertEqual(cluster.cluster_template_id,
|
||||||
|
cluster.cluster_template.uuid)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_get_by_uuid(self, mock_cluster_template_get):
|
||||||
|
uuid = self.fake_cluster['uuid']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
mock_get_cluster.return_value = self.fake_cluster
|
||||||
|
cluster = objects.Cluster.get(self.context, uuid)
|
||||||
|
mock_get_cluster.assert_called_once_with(self.context, uuid)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
self.assertEqual(cluster.cluster_template_id,
|
||||||
|
cluster.cluster_template.uuid)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_get_by_name(self, mock_cluster_template_get):
|
||||||
|
name = self.fake_cluster['name']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_name',
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
mock_get_cluster.return_value = self.fake_cluster
|
||||||
|
cluster = objects.Cluster.get_by_name(self.context, name)
|
||||||
|
mock_get_cluster.assert_called_once_with(self.context, name)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
self.assertEqual(cluster.cluster_template_id,
|
||||||
|
cluster.cluster_template.uuid)
|
||||||
|
|
||||||
|
def test_get_bad_id_and_uuid(self):
|
||||||
|
self.assertRaises(exception.InvalidIdentity,
|
||||||
|
objects.Cluster.get, self.context, 'not-a-uuid')
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_list(self, mock_cluster_template_get):
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_list',
|
||||||
|
autospec=True) as mock_get_list:
|
||||||
|
mock_get_list.return_value = [self.fake_cluster]
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
clusters = objects.Cluster.list(self.context)
|
||||||
|
self.assertEqual(1, mock_get_list.call_count)
|
||||||
|
self.assertThat(clusters, HasLength(1))
|
||||||
|
self.assertIsInstance(clusters[0], objects.Cluster)
|
||||||
|
self.assertEqual(self.context, clusters[0]._context)
|
||||||
|
self.assertEqual(clusters[0].cluster_template_id,
|
||||||
|
clusters[0].cluster_template.uuid)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_list_all(self, mock_cluster_template_get):
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_list',
|
||||||
|
autospec=True) as mock_get_list:
|
||||||
|
mock_get_list.return_value = [self.fake_cluster]
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
self.context.all_tenants = True
|
||||||
|
clusters = objects.Cluster.list(self.context)
|
||||||
|
mock_get_list.assert_called_once_with(
|
||||||
|
self.context, limit=None, marker=None, filters=None,
|
||||||
|
sort_dir=None, sort_key=None)
|
||||||
|
self.assertEqual(1, mock_get_list.call_count)
|
||||||
|
self.assertThat(clusters, HasLength(1))
|
||||||
|
self.assertIsInstance(clusters[0], objects.Cluster)
|
||||||
|
self.assertEqual(self.context, clusters[0]._context)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_list_with_filters(self, mock_cluster_template_get):
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_list',
|
||||||
|
autospec=True) as mock_get_list:
|
||||||
|
mock_get_list.return_value = [self.fake_cluster]
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
filters = {'name': 'cluster1'}
|
||||||
|
clusters = objects.Cluster.list(self.context, filters=filters)
|
||||||
|
|
||||||
|
mock_get_list.assert_called_once_with(self.context, sort_key=None,
|
||||||
|
sort_dir=None,
|
||||||
|
filters=filters, limit=None,
|
||||||
|
marker=None)
|
||||||
|
self.assertEqual(1, mock_get_list.call_count)
|
||||||
|
self.assertThat(clusters, HasLength(1))
|
||||||
|
self.assertIsInstance(clusters[0], objects.Cluster)
|
||||||
|
self.assertEqual(self.context, clusters[0]._context)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_create(self, mock_cluster_template_get):
|
||||||
|
with mock.patch.object(self.dbapi, 'create_cluster',
|
||||||
|
autospec=True) as mock_create_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
mock_create_cluster.return_value = self.fake_cluster
|
||||||
|
cluster = objects.Cluster(self.context, **self.fake_cluster)
|
||||||
|
cluster.create()
|
||||||
|
mock_create_cluster.assert_called_once_with(self.fake_cluster)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_destroy(self, mock_cluster_template_get):
|
||||||
|
uuid = self.fake_cluster['uuid']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_get_cluster.return_value = self.fake_cluster
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
with mock.patch.object(self.dbapi, 'destroy_cluster',
|
||||||
|
autospec=True) as mock_destroy_cluster:
|
||||||
|
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
|
||||||
|
cluster.destroy()
|
||||||
|
mock_get_cluster.assert_called_once_with(self.context, uuid)
|
||||||
|
mock_destroy_cluster.assert_called_once_with(uuid)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_save(self, mock_cluster_template_get):
|
||||||
|
uuid = self.fake_cluster['uuid']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
mock_get_cluster.return_value = self.fake_cluster
|
||||||
|
with mock.patch.object(self.dbapi, 'update_cluster',
|
||||||
|
autospec=True) as mock_update_cluster:
|
||||||
|
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
|
||||||
|
cluster.node_count = 10
|
||||||
|
cluster.master_count = 5
|
||||||
|
cluster.save()
|
||||||
|
|
||||||
|
mock_get_cluster.assert_called_once_with(self.context, uuid)
|
||||||
|
mock_update_cluster.assert_called_once_with(
|
||||||
|
uuid, {'node_count': 10, 'master_count': 5,
|
||||||
|
'cluster_template': self.fake_cluster_template})
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
||||||
|
|
||||||
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
|
def test_refresh(self, mock_cluster_template_get):
|
||||||
|
uuid = self.fake_cluster['uuid']
|
||||||
|
new_uuid = uuidutils.generate_uuid()
|
||||||
|
returns = [dict(self.fake_cluster, uuid=uuid),
|
||||||
|
dict(self.fake_cluster, uuid=new_uuid)]
|
||||||
|
expected = [mock.call(self.context, uuid),
|
||||||
|
mock.call(self.context, uuid)]
|
||||||
|
with mock.patch.object(self.dbapi, 'get_cluster_by_uuid',
|
||||||
|
side_effect=returns,
|
||||||
|
autospec=True) as mock_get_cluster:
|
||||||
|
mock_cluster_template_get.return_value = self.fake_cluster_template
|
||||||
|
cluster = objects.Cluster.get_by_uuid(self.context, uuid)
|
||||||
|
self.assertEqual(uuid, cluster.uuid)
|
||||||
|
cluster.refresh()
|
||||||
|
self.assertEqual(new_uuid, cluster.uuid)
|
||||||
|
self.assertEqual(expected, mock_get_cluster.call_args_list)
|
||||||
|
self.assertEqual(self.context, cluster._context)
|
@ -17,10 +17,10 @@ from oslo_versionedobjects.tests import test_fields
|
|||||||
from magnum.objects import fields
|
from magnum.objects import fields
|
||||||
|
|
||||||
|
|
||||||
class TestBayStatus(test_fields.TestField):
|
class TestClusterStatus(test_fields.TestField):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBayStatus, self).setUp()
|
super(TestClusterStatus, self).setUp()
|
||||||
self.field = fields.BayStatusField()
|
self.field = fields.ClusterStatusField()
|
||||||
self.coerce_good_values = [('CREATE_IN_PROGRESS',
|
self.coerce_good_values = [('CREATE_IN_PROGRESS',
|
||||||
'CREATE_IN_PROGRESS'),
|
'CREATE_IN_PROGRESS'),
|
||||||
('CREATE_FAILED', 'CREATE_FAILED'),
|
('CREATE_FAILED', 'CREATE_FAILED'),
|
||||||
@ -71,10 +71,10 @@ class TestContainerStatus(test_fields.TestField):
|
|||||||
self.assertRaises(ValueError, self.field.stringify, 'DELETED')
|
self.assertRaises(ValueError, self.field.stringify, 'DELETED')
|
||||||
|
|
||||||
|
|
||||||
class TestBayType(test_fields.TestField):
|
class TestClusterType(test_fields.TestField):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestBayType, self).setUp()
|
super(TestClusterType, self).setUp()
|
||||||
self.field = fields.BayTypeField()
|
self.field = fields.ClusterTypeField()
|
||||||
self.coerce_good_values = [('kubernetes', 'kubernetes'),
|
self.coerce_good_values = [('kubernetes', 'kubernetes'),
|
||||||
('swarm', 'swarm'),
|
('swarm', 'swarm'),
|
||||||
('mesos', 'mesos'), ]
|
('mesos', 'mesos'), ]
|
||||||
|
@ -362,8 +362,8 @@ class TestObject(test_base.TestCase, _TestObject):
|
|||||||
# For more information on object version testing, read
|
# For more information on object version testing, read
|
||||||
# http://docs.openstack.org/developer/magnum/objects.html
|
# http://docs.openstack.org/developer/magnum/objects.html
|
||||||
object_data = {
|
object_data = {
|
||||||
'Bay': '1.8-a6109e08d32dc59d3ad100697e06d8da',
|
'Cluster': '1.9-f9838e23eef5f1a7d9606c1ccce21800',
|
||||||
'ClusterTemplate': '1.16-29dfb88bff54a412b05f9a651f4758a6',
|
'ClusterTemplate': '1.17-65a95ef932dd08800a83871eb3cf312b',
|
||||||
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
||||||
'MyObj': '1.0-b43567e512438205e32f4e95ca616697',
|
'MyObj': '1.0-b43567e512438205e32f4e95ca616697',
|
||||||
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
||||||
|
@ -60,51 +60,33 @@ def create_test_cluster_template(context, **kw):
|
|||||||
return cluster_template
|
return cluster_template
|
||||||
|
|
||||||
|
|
||||||
def get_test_bay(context, **kw):
|
def get_test_cluster(context, **kw):
|
||||||
"""Return a Bay object with appropriate attributes.
|
"""Return a Cluster object with appropriate attributes.
|
||||||
|
|
||||||
NOTE: The object leaves the attributes marked as changed, such
|
NOTE: The object leaves the attributes marked as changed, such
|
||||||
that a create() could be used to commit it to the DB.
|
that a create() could be used to commit it to the DB.
|
||||||
"""
|
"""
|
||||||
db_bay = db_utils.get_test_bay(**kw)
|
db_cluster = db_utils.get_test_cluster(**kw)
|
||||||
# Let DB generate ID if it isn't specified explicitly
|
# Let DB generate ID if it isn't specified explicitly
|
||||||
if 'id' not in kw:
|
if 'id' not in kw:
|
||||||
del db_bay['id']
|
del db_cluster['id']
|
||||||
bay = objects.Bay(context)
|
cluster = objects.Cluster(context)
|
||||||
for key in db_bay:
|
for key in db_cluster:
|
||||||
setattr(bay, key, db_bay[key])
|
setattr(cluster, key, db_cluster[key])
|
||||||
return bay
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
def create_test_bay(context, **kw):
|
|
||||||
"""Create and return a test bay object.
|
|
||||||
|
|
||||||
Create a bay in the DB and return a Bay object with appropriate
|
|
||||||
attributes.
|
|
||||||
"""
|
|
||||||
bay = get_test_bay(context, **kw)
|
|
||||||
create_test_cluster_template(context, uuid=bay['baymodel_id'],
|
|
||||||
coe=kw.get('coe', 'swarm'))
|
|
||||||
bay.create()
|
|
||||||
return bay
|
|
||||||
|
|
||||||
|
|
||||||
def get_test_cluster(context, **kw):
|
|
||||||
"""Return a Cluster object with appropriate attributes.
|
|
||||||
|
|
||||||
NOTE: Object model is the same for Cluster and
|
|
||||||
Bay
|
|
||||||
"""
|
|
||||||
return get_test_bay(context, **kw)
|
|
||||||
|
|
||||||
|
|
||||||
def create_test_cluster(context, **kw):
|
def create_test_cluster(context, **kw):
|
||||||
"""Create and return a test cluster object.
|
"""Create and return a test Cluster object.
|
||||||
|
|
||||||
NOTE: Object model is the same for Cluster and
|
Create a Cluster in the DB and return a Cluster object with appropriate
|
||||||
Bay
|
attributes.
|
||||||
"""
|
"""
|
||||||
return create_test_bay(context, **kw)
|
cluster = get_test_cluster(context, **kw)
|
||||||
|
create_test_cluster_template(context, uuid=cluster['cluster_template_id'],
|
||||||
|
coe=kw.get('coe', 'swarm'))
|
||||||
|
cluster.create()
|
||||||
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
def get_test_x509keypair(context, **kw):
|
def get_test_x509keypair(context, **kw):
|
||||||
|
@ -19,7 +19,7 @@ from magnum.common import context
|
|||||||
from magnum.common.rpc_service import CONF
|
from magnum.common.rpc_service import CONF
|
||||||
from magnum.db.sqlalchemy import api as dbapi
|
from magnum.db.sqlalchemy import api as dbapi
|
||||||
from magnum import objects
|
from magnum import objects
|
||||||
from magnum.objects.fields import BayStatus as bay_status
|
from magnum.objects.fields import ClusterStatus as cluster_status
|
||||||
from magnum.service import periodic
|
from magnum.service import periodic
|
||||||
from magnum.tests import base
|
from magnum.tests import base
|
||||||
from magnum.tests.unit.db import utils
|
from magnum.tests.unit.db import utils
|
||||||
@ -38,7 +38,7 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
|
|
||||||
ctx = context.make_admin_context()
|
ctx = context.make_admin_context()
|
||||||
|
|
||||||
# Can be identical for all bays.
|
# Can be identical for all clusters.
|
||||||
trust_attrs = {
|
trust_attrs = {
|
||||||
'trustee_username': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'trustee_username': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'trustee_password': 'ain7einaebooVaig6d',
|
'trustee_password': 'ain7einaebooVaig6d',
|
||||||
@ -46,39 +46,42 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
}
|
}
|
||||||
|
|
||||||
trust_attrs.update({'id': 1, 'stack_id': '11',
|
trust_attrs.update({'id': 1, 'stack_id': '11',
|
||||||
'status': bay_status.CREATE_IN_PROGRESS})
|
'status': cluster_status.CREATE_IN_PROGRESS})
|
||||||
bay1 = utils.get_test_bay(**trust_attrs)
|
cluster1 = utils.get_test_cluster(**trust_attrs)
|
||||||
trust_attrs.update({'id': 2, 'stack_id': '22',
|
trust_attrs.update({'id': 2, 'stack_id': '22',
|
||||||
'status': bay_status.DELETE_IN_PROGRESS})
|
'status': cluster_status.DELETE_IN_PROGRESS})
|
||||||
bay2 = utils.get_test_bay(**trust_attrs)
|
cluster2 = utils.get_test_cluster(**trust_attrs)
|
||||||
trust_attrs.update({'id': 3, 'stack_id': '33',
|
trust_attrs.update({'id': 3, 'stack_id': '33',
|
||||||
'status': bay_status.UPDATE_IN_PROGRESS})
|
'status': cluster_status.UPDATE_IN_PROGRESS})
|
||||||
bay3 = utils.get_test_bay(**trust_attrs)
|
cluster3 = utils.get_test_cluster(**trust_attrs)
|
||||||
trust_attrs.update({'id': 4, 'stack_id': '44',
|
trust_attrs.update({'id': 4, 'stack_id': '44',
|
||||||
'status': bay_status.CREATE_COMPLETE})
|
'status': cluster_status.CREATE_COMPLETE})
|
||||||
bay4 = utils.get_test_bay(**trust_attrs)
|
cluster4 = utils.get_test_cluster(**trust_attrs)
|
||||||
trust_attrs.update({'id': 5, 'stack_id': '55',
|
trust_attrs.update({'id': 5, 'stack_id': '55',
|
||||||
'status': bay_status.ROLLBACK_IN_PROGRESS})
|
'status': cluster_status.ROLLBACK_IN_PROGRESS})
|
||||||
bay5 = utils.get_test_bay(**trust_attrs)
|
cluster5 = utils.get_test_cluster(**trust_attrs)
|
||||||
|
|
||||||
self.bay1 = objects.Bay(ctx, **bay1)
|
self.cluster1 = objects.Cluster(ctx, **cluster1)
|
||||||
self.bay2 = objects.Bay(ctx, **bay2)
|
self.cluster2 = objects.Cluster(ctx, **cluster2)
|
||||||
self.bay3 = objects.Bay(ctx, **bay3)
|
self.cluster3 = objects.Cluster(ctx, **cluster3)
|
||||||
self.bay4 = objects.Bay(ctx, **bay4)
|
self.cluster4 = objects.Cluster(ctx, **cluster4)
|
||||||
self.bay5 = objects.Bay(ctx, **bay5)
|
self.cluster5 = objects.Cluster(ctx, **cluster5)
|
||||||
|
|
||||||
@mock.patch.object(objects.Bay, 'list')
|
@mock.patch.object(objects.Cluster, 'list')
|
||||||
@mock.patch('magnum.common.clients.OpenStackClients')
|
@mock.patch('magnum.common.clients.OpenStackClients')
|
||||||
@mock.patch.object(dbapi.Connection, 'destroy_bay')
|
@mock.patch.object(dbapi.Connection, 'destroy_cluster')
|
||||||
@mock.patch.object(dbapi.Connection, 'update_bay')
|
@mock.patch.object(dbapi.Connection, 'update_cluster')
|
||||||
def test_sync_bay_status_changes(self, mock_db_update, mock_db_destroy,
|
def test_sync_cluster_status_changes(self, mock_db_update, mock_db_destroy,
|
||||||
mock_oscc, mock_bay_list):
|
mock_oscc, mock_cluster_list):
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
stack1 = fake_stack(id='11', stack_status=bay_status.CREATE_COMPLETE,
|
stack1 = fake_stack(
|
||||||
|
id='11', stack_status=cluster_status.CREATE_COMPLETE,
|
||||||
stack_status_reason='fake_reason_11')
|
stack_status_reason='fake_reason_11')
|
||||||
stack3 = fake_stack(id='33', stack_status=bay_status.UPDATE_COMPLETE,
|
stack3 = fake_stack(
|
||||||
|
id='33', stack_status=cluster_status.UPDATE_COMPLETE,
|
||||||
stack_status_reason='fake_reason_33')
|
stack_status_reason='fake_reason_33')
|
||||||
stack5 = fake_stack(id='55', stack_status=bay_status.ROLLBACK_COMPLETE,
|
stack5 = fake_stack(
|
||||||
|
id='55', stack_status=cluster_status.ROLLBACK_COMPLETE,
|
||||||
stack_status_reason='fake_reason_55')
|
stack_status_reason='fake_reason_55')
|
||||||
mock_heat_client.stacks.list.return_value = [stack1, stack3, stack5]
|
mock_heat_client.stacks.list.return_value = [stack1, stack3, stack5]
|
||||||
get_stacks = {'11': stack1, '33': stack3, '55': stack5}
|
get_stacks = {'11': stack1, '33': stack3, '55': stack5}
|
||||||
@ -91,35 +94,36 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
mock_heat_client.stacks.get.side_effect = stack_get_sideefect
|
mock_heat_client.stacks.get.side_effect = stack_get_sideefect
|
||||||
mock_osc = mock_oscc.return_value
|
mock_osc = mock_oscc.return_value
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay_list.return_value = [self.bay1, self.bay2, self.bay3,
|
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
|
||||||
self.bay5]
|
self.cluster3, self.cluster5]
|
||||||
|
|
||||||
mock_keystone_client = mock.MagicMock()
|
mock_keystone_client = mock.MagicMock()
|
||||||
mock_keystone_client.client.project_id = "fake_project"
|
mock_keystone_client.client.project_id = "fake_project"
|
||||||
mock_osc.keystone.return_value = mock_keystone_client
|
mock_osc.keystone.return_value = mock_keystone_client
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF).sync_bay_status(None)
|
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
|
||||||
|
|
||||||
self.assertEqual(bay_status.CREATE_COMPLETE, self.bay1.status)
|
self.assertEqual(cluster_status.CREATE_COMPLETE, self.cluster1.status)
|
||||||
self.assertEqual('fake_reason_11', self.bay1.status_reason)
|
self.assertEqual('fake_reason_11', self.cluster1.status_reason)
|
||||||
mock_db_destroy.assert_called_once_with(self.bay2.uuid)
|
mock_db_destroy.assert_called_once_with(self.cluster2.uuid)
|
||||||
self.assertEqual(bay_status.UPDATE_COMPLETE, self.bay3.status)
|
self.assertEqual(cluster_status.UPDATE_COMPLETE, self.cluster3.status)
|
||||||
self.assertEqual('fake_reason_33', self.bay3.status_reason)
|
self.assertEqual('fake_reason_33', self.cluster3.status_reason)
|
||||||
self.assertEqual(bay_status.ROLLBACK_COMPLETE, self.bay5.status)
|
self.assertEqual(cluster_status.ROLLBACK_COMPLETE,
|
||||||
self.assertEqual('fake_reason_55', self.bay5.status_reason)
|
self.cluster5.status)
|
||||||
|
self.assertEqual('fake_reason_55', self.cluster5.status_reason)
|
||||||
|
|
||||||
@mock.patch.object(objects.Bay, 'list')
|
@mock.patch.object(objects.Cluster, 'list')
|
||||||
@mock.patch('magnum.common.clients.OpenStackClients')
|
@mock.patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_sync_auth_fail(self, mock_oscc, mock_bay_list):
|
def test_sync_auth_fail(self, mock_oscc, mock_cluster_list):
|
||||||
"""Tests handling for unexpected exceptions in _get_bay_stacks()
|
"""Tests handling for unexpected exceptions in _get_cluster_stacks()
|
||||||
|
|
||||||
It does this by raising an a HTTPUnauthorized exception in Heat client.
|
It does this by raising an a HTTPUnauthorized exception in Heat client.
|
||||||
The affected stack thus missing from the stack list should not lead to
|
The affected stack thus missing from the stack list should not lead to
|
||||||
bay state changing in this case. Likewise, subsequent bays should still
|
cluster state changing in this case. Likewise, subsequent clusters
|
||||||
change state, despite the affected bay being skipped.
|
should still change state, despite the affected cluster being skipped.
|
||||||
"""
|
"""
|
||||||
stack1 = fake_stack(id='11',
|
stack1 = fake_stack(id='11',
|
||||||
stack_status=bay_status.CREATE_COMPLETE)
|
stack_status=cluster_status.CREATE_COMPLETE)
|
||||||
|
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
|
|
||||||
@ -130,23 +134,25 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
mock_heat_client.stacks.list.return_value = [stack1]
|
mock_heat_client.stacks.list.return_value = [stack1]
|
||||||
mock_osc = mock_oscc.return_value
|
mock_osc = mock_oscc.return_value
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay_list.return_value = [self.bay1]
|
mock_cluster_list.return_value = [self.cluster1]
|
||||||
periodic.MagnumPeriodicTasks(CONF).sync_bay_status(None)
|
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
|
||||||
|
|
||||||
self.assertEqual(bay_status.CREATE_IN_PROGRESS, self.bay1.status)
|
self.assertEqual(cluster_status.CREATE_IN_PROGRESS,
|
||||||
|
self.cluster1.status)
|
||||||
|
|
||||||
@mock.patch.object(objects.Bay, 'list')
|
@mock.patch.object(objects.Cluster, 'list')
|
||||||
@mock.patch('magnum.common.clients.OpenStackClients')
|
@mock.patch('magnum.common.clients.OpenStackClients')
|
||||||
def test_sync_bay_status_not_changes(self, mock_oscc, mock_bay_list):
|
def test_sync_cluster_status_not_changes(self, mock_oscc,
|
||||||
|
mock_cluster_list):
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
stack1 = fake_stack(id='11',
|
stack1 = fake_stack(id='11',
|
||||||
stack_status=bay_status.CREATE_IN_PROGRESS)
|
stack_status=cluster_status.CREATE_IN_PROGRESS)
|
||||||
stack2 = fake_stack(id='22',
|
stack2 = fake_stack(id='22',
|
||||||
stack_status=bay_status.DELETE_IN_PROGRESS)
|
stack_status=cluster_status.DELETE_IN_PROGRESS)
|
||||||
stack3 = fake_stack(id='33',
|
stack3 = fake_stack(id='33',
|
||||||
stack_status=bay_status.UPDATE_IN_PROGRESS)
|
stack_status=cluster_status.UPDATE_IN_PROGRESS)
|
||||||
stack5 = fake_stack(id='55',
|
stack5 = fake_stack(id='55',
|
||||||
stack_status=bay_status.ROLLBACK_IN_PROGRESS)
|
stack_status=cluster_status.ROLLBACK_IN_PROGRESS)
|
||||||
get_stacks = {'11': stack1, '22': stack2, '33': stack3, '55': stack5}
|
get_stacks = {'11': stack1, '22': stack2, '33': stack3, '55': stack5}
|
||||||
|
|
||||||
def stack_get_sideefect(arg):
|
def stack_get_sideefect(arg):
|
||||||
@ -159,63 +165,69 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
stack5]
|
stack5]
|
||||||
mock_osc = mock_oscc.return_value
|
mock_osc = mock_oscc.return_value
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay_list.return_value = [self.bay1, self.bay2, self.bay3,
|
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
|
||||||
self.bay5]
|
self.cluster3, self.cluster5]
|
||||||
periodic.MagnumPeriodicTasks(CONF).sync_bay_status(None)
|
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
|
||||||
|
|
||||||
self.assertEqual(bay_status.CREATE_IN_PROGRESS, self.bay1.status)
|
self.assertEqual(cluster_status.CREATE_IN_PROGRESS,
|
||||||
self.assertEqual(bay_status.DELETE_IN_PROGRESS, self.bay2.status)
|
self.cluster1.status)
|
||||||
self.assertEqual(bay_status.UPDATE_IN_PROGRESS, self.bay3.status)
|
self.assertEqual(cluster_status.DELETE_IN_PROGRESS,
|
||||||
self.assertEqual(bay_status.ROLLBACK_IN_PROGRESS, self.bay5.status)
|
self.cluster2.status)
|
||||||
|
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS,
|
||||||
|
self.cluster3.status)
|
||||||
|
self.assertEqual(cluster_status.ROLLBACK_IN_PROGRESS,
|
||||||
|
self.cluster5.status)
|
||||||
|
|
||||||
@mock.patch.object(objects.Bay, 'list')
|
@mock.patch.object(objects.Cluster, 'list')
|
||||||
@mock.patch('magnum.common.clients.OpenStackClients')
|
@mock.patch('magnum.common.clients.OpenStackClients')
|
||||||
@mock.patch.object(dbapi.Connection, 'destroy_bay')
|
@mock.patch.object(dbapi.Connection, 'destroy_cluster')
|
||||||
@mock.patch.object(dbapi.Connection, 'update_bay')
|
@mock.patch.object(dbapi.Connection, 'update_cluster')
|
||||||
def test_sync_bay_status_heat_not_found(self, mock_db_update,
|
def test_sync_cluster_status_heat_not_found(self, mock_db_update,
|
||||||
mock_db_destroy, mock_oscc,
|
mock_db_destroy, mock_oscc,
|
||||||
mock_bay_list):
|
mock_cluster_list):
|
||||||
mock_heat_client = mock.MagicMock()
|
mock_heat_client = mock.MagicMock()
|
||||||
mock_heat_client.stacks.list.return_value = []
|
mock_heat_client.stacks.list.return_value = []
|
||||||
mock_osc = mock_oscc.return_value
|
mock_osc = mock_oscc.return_value
|
||||||
mock_osc.heat.return_value = mock_heat_client
|
mock_osc.heat.return_value = mock_heat_client
|
||||||
mock_bay_list.return_value = [self.bay1, self.bay2, self.bay3]
|
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
|
||||||
|
self.cluster3]
|
||||||
|
|
||||||
mock_keystone_client = mock.MagicMock()
|
mock_keystone_client = mock.MagicMock()
|
||||||
mock_keystone_client.client.project_id = "fake_project"
|
mock_keystone_client.client.project_id = "fake_project"
|
||||||
mock_osc.keystone.return_value = mock_keystone_client
|
mock_osc.keystone.return_value = mock_keystone_client
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF).sync_bay_status(None)
|
periodic.MagnumPeriodicTasks(CONF).sync_cluster_status(None)
|
||||||
|
|
||||||
self.assertEqual(bay_status.CREATE_FAILED, self.bay1.status)
|
self.assertEqual(cluster_status.CREATE_FAILED, self.cluster1.status)
|
||||||
self.assertEqual('Stack with id 11 not found in Heat.',
|
self.assertEqual('Stack with id 11 not found in Heat.',
|
||||||
self.bay1.status_reason)
|
self.cluster1.status_reason)
|
||||||
mock_db_destroy.assert_called_once_with(self.bay2.uuid)
|
mock_db_destroy.assert_called_once_with(self.cluster2.uuid)
|
||||||
self.assertEqual(bay_status.UPDATE_FAILED, self.bay3.status)
|
self.assertEqual(cluster_status.UPDATE_FAILED, self.cluster3.status)
|
||||||
self.assertEqual('Stack with id 33 not found in Heat.',
|
self.assertEqual('Stack with id 33 not found in Heat.',
|
||||||
self.bay3.status_reason)
|
self.cluster3.status_reason)
|
||||||
|
|
||||||
@mock.patch('magnum.conductor.monitors.create_monitor')
|
@mock.patch('magnum.conductor.monitors.create_monitor')
|
||||||
@mock.patch('magnum.objects.Bay.list')
|
@mock.patch('magnum.objects.Cluster.list')
|
||||||
@mock.patch('magnum.common.rpc.get_notifier')
|
@mock.patch('magnum.common.rpc.get_notifier')
|
||||||
@mock.patch('magnum.common.context.make_admin_context')
|
@mock.patch('magnum.common.context.make_admin_context')
|
||||||
def test_send_bay_metrics(self, mock_make_admin_context, mock_get_notifier,
|
def test_send_cluster_metrics(self, mock_make_admin_context,
|
||||||
mock_bay_list, mock_create_monitor):
|
mock_get_notifier, mock_cluster_list,
|
||||||
|
mock_create_monitor):
|
||||||
"""Test if RPC notifier receives the expected message"""
|
"""Test if RPC notifier receives the expected message"""
|
||||||
mock_make_admin_context.return_value = self.context
|
mock_make_admin_context.return_value = self.context
|
||||||
notifier = mock.MagicMock()
|
notifier = mock.MagicMock()
|
||||||
mock_get_notifier.return_value = notifier
|
mock_get_notifier.return_value = notifier
|
||||||
mock_bay_list.return_value = [self.bay1, self.bay2, self.bay3,
|
mock_cluster_list.return_value = [self.cluster1, self.cluster2,
|
||||||
self.bay4]
|
self.cluster3, self.cluster4]
|
||||||
monitor = mock.MagicMock()
|
monitor = mock.MagicMock()
|
||||||
monitor.get_metric_names.return_value = ['metric1', 'metric2']
|
monitor.get_metric_names.return_value = ['metric1', 'metric2']
|
||||||
monitor.compute_metric_value.return_value = 30
|
monitor.compute_metric_value.return_value = 30
|
||||||
monitor.get_metric_unit.return_value = '%'
|
monitor.get_metric_unit.return_value = '%'
|
||||||
mock_create_monitor.return_value = monitor
|
mock_create_monitor.return_value = monitor
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF)._send_bay_metrics(self.context)
|
periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context)
|
||||||
|
|
||||||
expected_event_type = 'magnum.bay.metrics.update'
|
expected_event_type = 'magnum.cluster.metrics.update'
|
||||||
expected_metrics = [
|
expected_metrics = [
|
||||||
{
|
{
|
||||||
'name': 'metric1',
|
'name': 'metric1',
|
||||||
@ -229,9 +241,9 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
},
|
},
|
||||||
]
|
]
|
||||||
expected_msg = {
|
expected_msg = {
|
||||||
'user_id': self.bay4.user_id,
|
'user_id': self.cluster4.user_id,
|
||||||
'project_id': self.bay4.project_id,
|
'project_id': self.cluster4.project_id,
|
||||||
'resource_id': self.bay4.uuid,
|
'resource_id': self.cluster4.uuid,
|
||||||
'metrics': expected_metrics
|
'metrics': expected_metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,29 +252,29 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
self.context, expected_event_type, expected_msg)
|
self.context, expected_event_type, expected_msg)
|
||||||
|
|
||||||
@mock.patch('magnum.conductor.monitors.create_monitor')
|
@mock.patch('magnum.conductor.monitors.create_monitor')
|
||||||
@mock.patch('magnum.objects.Bay.list')
|
@mock.patch('magnum.objects.Cluster.list')
|
||||||
@mock.patch('magnum.common.rpc.get_notifier')
|
@mock.patch('magnum.common.rpc.get_notifier')
|
||||||
@mock.patch('magnum.common.context.make_admin_context')
|
@mock.patch('magnum.common.context.make_admin_context')
|
||||||
def test_send_bay_metrics_compute_metric_raise(
|
def test_send_cluster_metrics_compute_metric_raise(
|
||||||
self, mock_make_admin_context, mock_get_notifier, mock_bay_list,
|
self, mock_make_admin_context, mock_get_notifier,
|
||||||
mock_create_monitor):
|
mock_cluster_list, mock_create_monitor):
|
||||||
mock_make_admin_context.return_value = self.context
|
mock_make_admin_context.return_value = self.context
|
||||||
notifier = mock.MagicMock()
|
notifier = mock.MagicMock()
|
||||||
mock_get_notifier.return_value = notifier
|
mock_get_notifier.return_value = notifier
|
||||||
mock_bay_list.return_value = [self.bay4]
|
mock_cluster_list.return_value = [self.cluster4]
|
||||||
monitor = mock.MagicMock()
|
monitor = mock.MagicMock()
|
||||||
monitor.get_metric_names.return_value = ['metric1', 'metric2']
|
monitor.get_metric_names.return_value = ['metric1', 'metric2']
|
||||||
monitor.compute_metric_value.side_effect = Exception(
|
monitor.compute_metric_value.side_effect = Exception(
|
||||||
"error on computing metric")
|
"error on computing metric")
|
||||||
mock_create_monitor.return_value = monitor
|
mock_create_monitor.return_value = monitor
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF)._send_bay_metrics(self.context)
|
periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context)
|
||||||
|
|
||||||
expected_event_type = 'magnum.bay.metrics.update'
|
expected_event_type = 'magnum.cluster.metrics.update'
|
||||||
expected_msg = {
|
expected_msg = {
|
||||||
'user_id': self.bay4.user_id,
|
'user_id': self.cluster4.user_id,
|
||||||
'project_id': self.bay4.project_id,
|
'project_id': self.cluster4.project_id,
|
||||||
'resource_id': self.bay4.uuid,
|
'resource_id': self.cluster4.uuid,
|
||||||
'metrics': []
|
'metrics': []
|
||||||
}
|
}
|
||||||
self.assertEqual(1, mock_create_monitor.call_count)
|
self.assertEqual(1, mock_create_monitor.call_count)
|
||||||
@ -270,39 +282,39 @@ class PeriodicTestCase(base.TestCase):
|
|||||||
self.context, expected_event_type, expected_msg)
|
self.context, expected_event_type, expected_msg)
|
||||||
|
|
||||||
@mock.patch('magnum.conductor.monitors.create_monitor')
|
@mock.patch('magnum.conductor.monitors.create_monitor')
|
||||||
@mock.patch('magnum.objects.Bay.list')
|
@mock.patch('magnum.objects.Cluster.list')
|
||||||
@mock.patch('magnum.common.rpc.get_notifier')
|
@mock.patch('magnum.common.rpc.get_notifier')
|
||||||
@mock.patch('magnum.common.context.make_admin_context')
|
@mock.patch('magnum.common.context.make_admin_context')
|
||||||
def test_send_bay_metrics_pull_data_raise(
|
def test_send_cluster_metrics_pull_data_raise(
|
||||||
self, mock_make_admin_context, mock_get_notifier, mock_bay_list,
|
self, mock_make_admin_context, mock_get_notifier,
|
||||||
mock_create_monitor):
|
mock_cluster_list, mock_create_monitor):
|
||||||
mock_make_admin_context.return_value = self.context
|
mock_make_admin_context.return_value = self.context
|
||||||
notifier = mock.MagicMock()
|
notifier = mock.MagicMock()
|
||||||
mock_get_notifier.return_value = notifier
|
mock_get_notifier.return_value = notifier
|
||||||
mock_bay_list.return_value = [self.bay4]
|
mock_cluster_list.return_value = [self.cluster4]
|
||||||
monitor = mock.MagicMock()
|
monitor = mock.MagicMock()
|
||||||
monitor.pull_data.side_effect = Exception("error on pulling data")
|
monitor.pull_data.side_effect = Exception("error on pulling data")
|
||||||
mock_create_monitor.return_value = monitor
|
mock_create_monitor.return_value = monitor
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF)._send_bay_metrics(self.context)
|
periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context)
|
||||||
|
|
||||||
self.assertEqual(1, mock_create_monitor.call_count)
|
self.assertEqual(1, mock_create_monitor.call_count)
|
||||||
self.assertEqual(0, notifier.info.call_count)
|
self.assertEqual(0, notifier.info.call_count)
|
||||||
|
|
||||||
@mock.patch('magnum.conductor.monitors.create_monitor')
|
@mock.patch('magnum.conductor.monitors.create_monitor')
|
||||||
@mock.patch('magnum.objects.Bay.list')
|
@mock.patch('magnum.objects.Cluster.list')
|
||||||
@mock.patch('magnum.common.rpc.get_notifier')
|
@mock.patch('magnum.common.rpc.get_notifier')
|
||||||
@mock.patch('magnum.common.context.make_admin_context')
|
@mock.patch('magnum.common.context.make_admin_context')
|
||||||
def test_send_bay_metrics_monitor_none(
|
def test_send_cluster_metrics_monitor_none(
|
||||||
self, mock_make_admin_context, mock_get_notifier, mock_bay_list,
|
self, mock_make_admin_context, mock_get_notifier,
|
||||||
mock_create_monitor):
|
mock_cluster_list, mock_create_monitor):
|
||||||
mock_make_admin_context.return_value = self.context
|
mock_make_admin_context.return_value = self.context
|
||||||
notifier = mock.MagicMock()
|
notifier = mock.MagicMock()
|
||||||
mock_get_notifier.return_value = notifier
|
mock_get_notifier.return_value = notifier
|
||||||
mock_bay_list.return_value = [self.bay4]
|
mock_cluster_list.return_value = [self.cluster4]
|
||||||
mock_create_monitor.return_value = None
|
mock_create_monitor.return_value = None
|
||||||
|
|
||||||
periodic.MagnumPeriodicTasks(CONF)._send_bay_metrics(self.context)
|
periodic.MagnumPeriodicTasks(CONF)._send_cluster_metrics(self.context)
|
||||||
|
|
||||||
self.assertEqual(1, mock_create_monitor.call_count)
|
self.assertEqual(1, mock_create_monitor.call_count)
|
||||||
self.assertEqual(0, notifier.info.call_count)
|
self.assertEqual(0, notifier.info.call_count)
|
||||||
|
Loading…
Reference in New Issue
Block a user