Rename Bay DB, Object, and internal usage to Cluster
This is patch 3 of 3 to change the internal usage of the terms Bay and BayModel. This patch updates Bay to Cluster in DB and Object as well as all the usages. No functionality should be changed by this patch, just naming and db updates. Change-Id: Ife04b0f944ded03ca932d70e09e6766d09cf5d9f Implements: blueprint rename-bay-to-clusterchanges/53/366353/5
parent
68463dd005
commit
729c2d0ab4
|
@ -143,12 +143,12 @@ class Controller(rest.RestController):
|
|||
Example:
|
||||
@base.Controller.api_version("1.1", "1.2")
|
||||
@expose.expose(Cluster, types.uuid_or_name)
|
||||
def get_one(self, bay_ident):
|
||||
def get_one(self, cluster_ident):
|
||||
{...code for versions 1.1 to 1.2...}
|
||||
|
||||
@base.Controller.api_version("1.3")
|
||||
@expose.expose(Cluster, types.uuid_or_name)
|
||||
def get_one(self, bay_ident):
|
||||
def get_one(self, cluster_ident):
|
||||
{...code for versions 1.3 to latest}
|
||||
|
||||
@min_ver: string representing minimum version
|
||||
|
|
|
@ -28,7 +28,7 @@ from magnum.api.controllers.v1 import collection
|
|||
from magnum.api.controllers.v1 import types
|
||||
from magnum.api import expose
|
||||
from magnum.api import utils as api_utils
|
||||
from magnum.api.validation import validate_bay_properties
|
||||
from magnum.api.validation import validate_cluster_properties
|
||||
from magnum.common import clients
|
||||
from magnum.common import exception
|
||||
from magnum.common import name_generator
|
||||
|
@ -66,7 +66,7 @@ class Bay(base.APIBase):
|
|||
self._baymodel_id = baymodel.uuid
|
||||
except exception.ClusterTemplateNotFound as e:
|
||||
# Change error code because 404 (NotFound) is inappropriate
|
||||
# response for a POST request to create a Bay
|
||||
# response for a POST request to create a Cluster
|
||||
e.code = 400 # BadRequest
|
||||
raise
|
||||
elif value == wtypes.Unset:
|
||||
|
@ -99,7 +99,7 @@ class Bay(base.APIBase):
|
|||
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
||||
"""Stack id of the heat stack"""
|
||||
|
||||
status = wtypes.Enum(str, *fields.BayStatus.ALL)
|
||||
status = wtypes.Enum(str, *fields.ClusterStatus.ALL)
|
||||
"""Status of the bay from the heat stack"""
|
||||
|
||||
status_reason = wtypes.text
|
||||
|
@ -131,13 +131,43 @@ class Bay(base.APIBase):
|
|||
super(Bay, self).__init__()
|
||||
|
||||
self.fields = []
|
||||
for field in objects.Bay.fields:
|
||||
for field in objects.Cluster.fields:
|
||||
# Skip fields we do not expose.
|
||||
if not hasattr(self, field):
|
||||
continue
|
||||
self.fields.append(field)
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
# Set the renamed attributes for bay backwards compatibility
|
||||
self.fields.append('baymodel_id')
|
||||
if 'baymodel_id' in kwargs.keys():
|
||||
setattr(self, 'cluster_template_id',
|
||||
kwargs.get('baymodel_id', None))
|
||||
setattr(self, 'baymodel_id',
|
||||
kwargs.get('baymodel_id', None))
|
||||
else:
|
||||
setattr(self, 'baymodel_id', kwargs.get('cluster_template_id',
|
||||
None))
|
||||
|
||||
self.fields.append('bay_create_timeout')
|
||||
if 'bay_create_timeout' in kwargs.keys():
|
||||
setattr(self, 'create_timeout',
|
||||
kwargs.get('bay_create_timeout', wtypes.Unset))
|
||||
setattr(self, 'bay_create_timeout',
|
||||
kwargs.get('bay_create_timeout', wtypes.Unset))
|
||||
else:
|
||||
setattr(self, 'bay_create_timeout', kwargs.get('create_timeout',
|
||||
wtypes.Unset))
|
||||
|
||||
self.fields.append('bay_faults')
|
||||
if 'bay_faults' in kwargs.keys():
|
||||
setattr(self, 'faults',
|
||||
kwargs.get('bay_faults', wtypes.Unset))
|
||||
setattr(self, 'bay_faults',
|
||||
kwargs.get('bay_faults', wtypes.Unset))
|
||||
else:
|
||||
setattr(self, 'bay_faults', kwargs.get('faults', wtypes.Unset))
|
||||
|
||||
@staticmethod
|
||||
def _convert_with_links(bay, url, expand=True):
|
||||
if not expand:
|
||||
|
@ -167,7 +197,7 @@ class Bay(base.APIBase):
|
|||
master_count=1,
|
||||
bay_create_timeout=15,
|
||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||
status=fields.BayStatus.CREATE_COMPLETE,
|
||||
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||
status_reason="CREATE completed successfully",
|
||||
api_address='172.24.4.3',
|
||||
node_addresses=['172.24.4.4', '172.24.4.5'],
|
||||
|
@ -177,6 +207,24 @@ class Bay(base.APIBase):
|
|||
container_version=None)
|
||||
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
||||
|
||||
def as_dict(self):
|
||||
"""Render this object as a dict of its fields."""
|
||||
|
||||
# Override this for old bay values
|
||||
d = super(Bay, self).as_dict()
|
||||
|
||||
d['cluster_template_id'] = d['baymodel_id']
|
||||
del d['baymodel_id']
|
||||
|
||||
d['create_timeout'] = d['bay_create_timeout']
|
||||
del d['bay_create_timeout']
|
||||
|
||||
if 'bay_faults' in d.keys():
|
||||
d['faults'] = d['bay_faults']
|
||||
del d['bay_faults']
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class BayPatchType(types.JsonPatchType):
|
||||
_api_base = Bay
|
||||
|
@ -239,12 +287,12 @@ class BaysController(base.Controller):
|
|||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
|
||||
marker)
|
||||
marker_obj = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||
marker)
|
||||
|
||||
bays = objects.Bay.list(pecan.request.context, limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
bays = objects.Cluster.list(pecan.request.context, limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
|
||||
return BayCollection.convert_with_links(bays, limit,
|
||||
url=resource_url,
|
||||
|
@ -323,13 +371,13 @@ class BaysController(base.Controller):
|
|||
:param bay_ident: UUID of a bay or logical name of the bay.
|
||||
"""
|
||||
context = pecan.request.context
|
||||
bay = api_utils.get_resource('Bay', bay_ident)
|
||||
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||
policy.enforce(context, 'bay:get', bay,
|
||||
action='bay:get')
|
||||
|
||||
bay = Bay.convert_with_links(bay)
|
||||
|
||||
if bay.status in fields.BayStatus.STATUS_FAILED:
|
||||
if bay.status in fields.ClusterStatus.STATUS_FAILED:
|
||||
bay.bay_faults = self._collect_fault_info(context, bay)
|
||||
|
||||
return bay
|
||||
|
@ -342,8 +390,8 @@ class BaysController(base.Controller):
|
|||
:param bay: a bay within the request body.
|
||||
"""
|
||||
new_bay = self._post(bay)
|
||||
res_bay = pecan.request.rpcapi.bay_create(new_bay,
|
||||
bay.bay_create_timeout)
|
||||
res_bay = pecan.request.rpcapi.cluster_create(new_bay,
|
||||
bay.bay_create_timeout)
|
||||
|
||||
# Set the HTTP Location Header
|
||||
pecan.response.location = link.build_url('bays', res_bay.uuid)
|
||||
|
@ -357,7 +405,8 @@ class BaysController(base.Controller):
|
|||
:param bay: a bay within the request body.
|
||||
"""
|
||||
new_bay = self._post(bay)
|
||||
pecan.request.rpcapi.bay_create_async(new_bay, bay.bay_create_timeout)
|
||||
pecan.request.rpcapi.cluster_create_async(new_bay,
|
||||
bay.bay_create_timeout)
|
||||
return BayID(new_bay.uuid)
|
||||
|
||||
def _post(self, bay):
|
||||
|
@ -378,7 +427,7 @@ class BaysController(base.Controller):
|
|||
bay_dict['coe_version'] = None
|
||||
bay_dict['container_version'] = None
|
||||
|
||||
new_bay = objects.Bay(context, **bay_dict)
|
||||
new_bay = objects.Cluster(context, **bay_dict)
|
||||
new_bay.uuid = uuid.uuid4()
|
||||
return new_bay
|
||||
|
||||
|
@ -392,7 +441,7 @@ class BaysController(base.Controller):
|
|||
:param patch: a json PATCH document to apply to this bay.
|
||||
"""
|
||||
bay = self._patch(bay_ident, patch)
|
||||
res_bay = pecan.request.rpcapi.bay_update(bay)
|
||||
res_bay = pecan.request.rpcapi.cluster_update(bay)
|
||||
return Bay.convert_with_links(res_bay)
|
||||
|
||||
@base.Controller.api_version("1.2", "1.2") # noqa
|
||||
|
@ -406,7 +455,7 @@ class BaysController(base.Controller):
|
|||
:param patch: a json PATCH document to apply to this bay.
|
||||
"""
|
||||
bay = self._patch(bay_ident, patch)
|
||||
pecan.request.rpcapi.bay_update_async(bay)
|
||||
pecan.request.rpcapi.cluster_update_async(bay)
|
||||
return BayID(bay.uuid)
|
||||
|
||||
@base.Controller.api_version("1.3") # noqa
|
||||
|
@ -421,12 +470,12 @@ class BaysController(base.Controller):
|
|||
:param patch: a json PATCH document to apply to this bay.
|
||||
"""
|
||||
bay = self._patch(bay_ident, patch)
|
||||
pecan.request.rpcapi.bay_update_async(bay, rollback=rollback)
|
||||
pecan.request.rpcapi.cluster_update_async(bay, rollback=rollback)
|
||||
return BayID(bay.uuid)
|
||||
|
||||
def _patch(self, bay_ident, patch):
|
||||
context = pecan.request.context
|
||||
bay = api_utils.get_resource('Bay', bay_ident)
|
||||
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||
policy.enforce(context, 'bay:update', bay,
|
||||
action='bay:update')
|
||||
try:
|
||||
|
@ -436,7 +485,7 @@ class BaysController(base.Controller):
|
|||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
# Update only the fields that have changed
|
||||
for field in objects.Bay.fields:
|
||||
for field in objects.Cluster.fields:
|
||||
try:
|
||||
patch_val = getattr(new_bay, field)
|
||||
except AttributeError:
|
||||
|
@ -449,7 +498,7 @@ class BaysController(base.Controller):
|
|||
|
||||
delta = bay.obj_what_changed()
|
||||
|
||||
validate_bay_properties(delta)
|
||||
validate_cluster_properties(delta)
|
||||
return bay
|
||||
|
||||
@base.Controller.api_version("1.1", "1.1")
|
||||
|
@ -461,7 +510,7 @@ class BaysController(base.Controller):
|
|||
"""
|
||||
bay = self._delete(bay_ident)
|
||||
|
||||
pecan.request.rpcapi.bay_delete(bay.uuid)
|
||||
pecan.request.rpcapi.cluster_delete(bay.uuid)
|
||||
|
||||
@base.Controller.api_version("1.2") # noqa
|
||||
@expose.expose(None, types.uuid_or_name, status_code=204)
|
||||
|
@ -472,11 +521,11 @@ class BaysController(base.Controller):
|
|||
"""
|
||||
bay = self._delete(bay_ident)
|
||||
|
||||
pecan.request.rpcapi.bay_delete_async(bay.uuid)
|
||||
pecan.request.rpcapi.cluster_delete_async(bay.uuid)
|
||||
|
||||
def _delete(self, bay_ident):
|
||||
context = pecan.request.context
|
||||
bay = api_utils.get_resource('Bay', bay_ident)
|
||||
bay = api_utils.get_resource('Cluster', bay_ident)
|
||||
policy.enforce(context, 'bay:delete', bay,
|
||||
action='bay:delete')
|
||||
return bay
|
||||
|
|
|
@ -46,7 +46,7 @@ class BayModel(base.APIBase):
|
|||
name = wtypes.StringType(min_length=1, max_length=255)
|
||||
"""The name of the Baymodel"""
|
||||
|
||||
coe = wtypes.Enum(str, *fields.BayType.ALL, mandatory=True)
|
||||
coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True)
|
||||
"""The Container Orchestration Engine for this bay model"""
|
||||
|
||||
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
||||
|
@ -181,7 +181,7 @@ class BayModel(base.APIBase):
|
|||
docker_volume_size=25,
|
||||
docker_storage_driver='devicemapper',
|
||||
cluster_distro='fedora-atomic',
|
||||
coe=fields.BayType.KUBERNETES,
|
||||
coe=fields.ClusterType.KUBERNETES,
|
||||
http_proxy='http://proxy.com:123',
|
||||
https_proxy='https://proxy.com:123',
|
||||
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
||||
|
|
|
@ -46,11 +46,11 @@ class Certificate(base.APIBase):
|
|||
def _set_cluster_uuid(self, value):
|
||||
if value and self._cluster_uuid != value:
|
||||
try:
|
||||
self._cluster = api_utils.get_resource('Bay', value)
|
||||
self._cluster = api_utils.get_resource('Cluster', value)
|
||||
self._cluster_uuid = self._cluster.uuid
|
||||
except exception.ClusterNotFound as e:
|
||||
# Change error code because 404 (NotFound) is inappropriate
|
||||
# response for a POST request to create a Bay
|
||||
# response for a POST request to create a Cluster
|
||||
e.code = 400 # BadRequest
|
||||
raise
|
||||
elif value == wtypes.Unset:
|
||||
|
@ -90,7 +90,8 @@ class Certificate(base.APIBase):
|
|||
|
||||
def get_cluster(self):
|
||||
if not self._cluster:
|
||||
self._cluster = api_utils.get_resource('Bay', self.cluster_uuid)
|
||||
self._cluster = api_utils.get_resource('Cluster',
|
||||
self.cluster_uuid)
|
||||
return self._cluster
|
||||
|
||||
@staticmethod
|
||||
|
@ -141,7 +142,7 @@ class CertificateController(base.Controller):
|
|||
logical name of the cluster.
|
||||
"""
|
||||
context = pecan.request.context
|
||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
||||
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||
policy.enforce(context, 'certificate:get', cluster,
|
||||
action='certificate:get')
|
||||
certificate = pecan.request.rpcapi.get_ca_certificate(cluster)
|
||||
|
|
|
@ -28,7 +28,7 @@ from magnum.api.controllers.v1 import collection
|
|||
from magnum.api.controllers.v1 import types
|
||||
from magnum.api import expose
|
||||
from magnum.api import utils as api_utils
|
||||
from magnum.api.validation import validate_bay_properties
|
||||
from magnum.api.validation import validate_cluster_properties
|
||||
from magnum.common import clients
|
||||
from magnum.common import exception
|
||||
from magnum.common import name_generator
|
||||
|
@ -110,7 +110,7 @@ class Cluster(base.APIBase):
|
|||
stack_id = wsme.wsattr(wtypes.text, readonly=True)
|
||||
"""Stack id of the heat stack"""
|
||||
|
||||
status = wtypes.Enum(str, *fields.BayStatus.ALL)
|
||||
status = wtypes.Enum(str, *fields.ClusterStatus.ALL)
|
||||
"""Status of the cluster from the heat stack"""
|
||||
|
||||
status_reason = wtypes.text
|
||||
|
@ -141,36 +141,13 @@ class Cluster(base.APIBase):
|
|||
def __init__(self, **kwargs):
|
||||
super(Cluster, self).__init__()
|
||||
self.fields = []
|
||||
for field in objects.Bay.fields:
|
||||
for field in objects.Cluster.fields:
|
||||
# Skip fields we do not expose.
|
||||
if not hasattr(self, field):
|
||||
continue
|
||||
self.fields.append(field)
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
# Set the renamed attributes for clusters
|
||||
self.fields.append('cluster_template_id')
|
||||
if 'cluster_template_id' in kwargs.keys():
|
||||
setattr(self, 'cluster_template_id',
|
||||
kwargs.get('cluster_template_id', wtypes.Unset))
|
||||
else:
|
||||
setattr(self, 'cluster_template_id', kwargs.get('baymodel_id',
|
||||
wtypes.Unset))
|
||||
|
||||
self.fields.append('create_timeout')
|
||||
if 'create_timeout' in kwargs.keys():
|
||||
setattr(self, 'create_timeout', kwargs.get('create_timeout',
|
||||
wtypes.Unset))
|
||||
else:
|
||||
setattr(self, 'create_timeout', kwargs.get('bay_create_timeout',
|
||||
wtypes.Unset))
|
||||
|
||||
self.fields.append('faults')
|
||||
if 'faults' in kwargs.keys():
|
||||
setattr(self, 'faults', kwargs.get('faults', wtypes.Unset))
|
||||
else:
|
||||
setattr(self, 'faults', kwargs.get('bay_faults', wtypes.Unset))
|
||||
|
||||
@staticmethod
|
||||
def _convert_with_links(cluster, url, expand=True):
|
||||
if not expand:
|
||||
|
@ -180,9 +157,9 @@ class Cluster(base.APIBase):
|
|||
'stack_id'])
|
||||
|
||||
cluster.links = [link.Link.make_link('self', url,
|
||||
'bays', cluster.uuid),
|
||||
'clusters', cluster.uuid),
|
||||
link.Link.make_link('bookmark', url,
|
||||
'bays', cluster.uuid,
|
||||
'clusters', cluster.uuid,
|
||||
bookmark=True)]
|
||||
return cluster
|
||||
|
||||
|
@ -201,7 +178,7 @@ class Cluster(base.APIBase):
|
|||
master_count=1,
|
||||
create_timeout=15,
|
||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||
status=fields.BayStatus.CREATE_COMPLETE,
|
||||
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||
status_reason="CREATE completed successfully",
|
||||
api_address='172.24.4.3',
|
||||
node_addresses=['172.24.4.4', '172.24.4.5'],
|
||||
|
@ -211,26 +188,6 @@ class Cluster(base.APIBase):
|
|||
container_version=None)
|
||||
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
|
||||
|
||||
def as_dict(self):
|
||||
"""Render this object as a dict of its fields."""
|
||||
|
||||
# Override this for updated cluster values
|
||||
d = super(Cluster, self).as_dict()
|
||||
|
||||
if 'cluster_template_id' in d.keys():
|
||||
d['baymodel_id'] = d['cluster_template_id']
|
||||
del d['cluster_template_id']
|
||||
|
||||
if 'create_timeout' in d.keys():
|
||||
d['bay_create_timeout'] = d['create_timeout']
|
||||
del d['create_timeout']
|
||||
|
||||
if 'faults' in d.keys():
|
||||
d['bay_faults'] = d['faults']
|
||||
del d['faults']
|
||||
|
||||
return d
|
||||
|
||||
|
||||
class ClusterPatchType(types.JsonPatchType):
|
||||
_api_base = Cluster
|
||||
|
@ -295,12 +252,12 @@ class ClustersController(base.Controller):
|
|||
|
||||
marker_obj = None
|
||||
if marker:
|
||||
marker_obj = objects.Bay.get_by_uuid(pecan.request.context,
|
||||
marker)
|
||||
marker_obj = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||
marker)
|
||||
|
||||
clusters = objects.Bay.list(pecan.request.context, limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
clusters = objects.Cluster.list(pecan.request.context, limit,
|
||||
marker_obj, sort_key=sort_key,
|
||||
sort_dir=sort_dir)
|
||||
|
||||
return ClusterCollection.convert_with_links(clusters, limit,
|
||||
url=resource_url,
|
||||
|
@ -380,13 +337,13 @@ class ClustersController(base.Controller):
|
|||
:param cluster_ident: UUID or logical name of the Cluster.
|
||||
"""
|
||||
context = pecan.request.context
|
||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
||||
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||
policy.enforce(context, 'cluster:get', cluster,
|
||||
action='cluster:get')
|
||||
|
||||
cluster = Cluster.convert_with_links(cluster)
|
||||
|
||||
if cluster.status in fields.BayStatus.STATUS_FAILED:
|
||||
if cluster.status in fields.ClusterStatus.STATUS_FAILED:
|
||||
cluster.faults = self._collect_fault_info(context, cluster)
|
||||
|
||||
return cluster
|
||||
|
@ -420,10 +377,10 @@ class ClustersController(base.Controller):
|
|||
cluster_dict['coe_version'] = None
|
||||
cluster_dict['container_version'] = None
|
||||
|
||||
new_cluster = objects.Bay(context, **cluster_dict)
|
||||
new_cluster = objects.Cluster(context, **cluster_dict)
|
||||
new_cluster.uuid = uuid.uuid4()
|
||||
pecan.request.rpcapi.bay_create_async(new_cluster,
|
||||
cluster.create_timeout)
|
||||
pecan.request.rpcapi.cluster_create_async(new_cluster,
|
||||
cluster.create_timeout)
|
||||
|
||||
return ClusterID(new_cluster.uuid)
|
||||
|
||||
|
@ -438,7 +395,7 @@ class ClustersController(base.Controller):
|
|||
:param patch: a json PATCH document to apply to this cluster.
|
||||
"""
|
||||
cluster = self._patch(cluster_ident, patch)
|
||||
pecan.request.rpcapi.bay_update_async(cluster)
|
||||
pecan.request.rpcapi.cluster_update_async(cluster)
|
||||
return ClusterID(cluster.uuid)
|
||||
|
||||
@base.Controller.api_version("1.3") # noqa
|
||||
|
@ -453,12 +410,12 @@ class ClustersController(base.Controller):
|
|||
:param patch: a json PATCH document to apply to this cluster.
|
||||
"""
|
||||
cluster = self._patch(cluster_ident, patch)
|
||||
pecan.request.rpcapi.bay_update_async(cluster, rollback)
|
||||
pecan.request.rpcapi.cluster_update_async(cluster, rollback)
|
||||
return ClusterID(cluster.uuid)
|
||||
|
||||
def _patch(self, cluster_ident, patch):
|
||||
context = pecan.request.context
|
||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
||||
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||
policy.enforce(context, 'cluster:update', cluster,
|
||||
action='cluster:update')
|
||||
try:
|
||||
|
@ -469,7 +426,7 @@ class ClustersController(base.Controller):
|
|||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
# Update only the fields that have changed
|
||||
for field in objects.Bay.fields:
|
||||
for field in objects.Cluster.fields:
|
||||
try:
|
||||
patch_val = getattr(new_cluster, field)
|
||||
except AttributeError:
|
||||
|
@ -482,7 +439,7 @@ class ClustersController(base.Controller):
|
|||
|
||||
delta = cluster.obj_what_changed()
|
||||
|
||||
validate_bay_properties(delta)
|
||||
validate_cluster_properties(delta)
|
||||
return cluster
|
||||
|
||||
@expose.expose(None, types.uuid_or_name, status_code=204)
|
||||
|
@ -492,8 +449,8 @@ class ClustersController(base.Controller):
|
|||
:param cluster_ident: UUID of cluster or logical name of the cluster.
|
||||
"""
|
||||
context = pecan.request.context
|
||||
cluster = api_utils.get_resource('Bay', cluster_ident)
|
||||
cluster = api_utils.get_resource('Cluster', cluster_ident)
|
||||
policy.enforce(context, 'cluster:delete', cluster,
|
||||
action='cluster:delete')
|
||||
|
||||
pecan.request.rpcapi.bay_delete_async(cluster.uuid)
|
||||
pecan.request.rpcapi.cluster_delete_async(cluster.uuid)
|
||||
|
|
|
@ -47,7 +47,7 @@ class ClusterTemplate(base.APIBase):
|
|||
name = wtypes.StringType(min_length=1, max_length=255)
|
||||
"""The name of the ClusterTemplate"""
|
||||
|
||||
coe = wtypes.Enum(str, *fields.BayType.ALL, mandatory=True)
|
||||
coe = wtypes.Enum(str, *fields.ClusterType.ALL, mandatory=True)
|
||||
"""The Container Orchestration Engine for this clustertemplate"""
|
||||
|
||||
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
|
||||
|
@ -185,7 +185,7 @@ class ClusterTemplate(base.APIBase):
|
|||
docker_volume_size=25,
|
||||
docker_storage_driver='devicemapper',
|
||||
cluster_distro='fedora-atomic',
|
||||
coe=fields.BayType.KUBERNETES,
|
||||
coe=fields.ClusterType.KUBERNETES,
|
||||
http_proxy='http://proxy.com:123',
|
||||
https_proxy='https://proxy.com:123',
|
||||
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
|
||||
|
|
|
@ -65,32 +65,35 @@ cluster_template_opts = [
|
|||
cfg.CONF.register_opts(cluster_template_opts, group='cluster_template')
|
||||
|
||||
|
||||
bay_update_allowed_properties = set(['node_count'])
|
||||
cluster_update_allowed_properties = set(['node_count'])
|
||||
|
||||
|
||||
def enforce_bay_types(*bay_types):
|
||||
"""Enforce that bay_type is in supported list."""
|
||||
def enforce_cluster_types(*cluster_types):
|
||||
"""Enforce that cluster_type is in supported list."""
|
||||
@decorator.decorator
|
||||
def wrapper(func, *args, **kwargs):
|
||||
# Note(eliqiao): This decorator has some assumptions
|
||||
# args[1] should be an APIBase instance or
|
||||
# args[2] should be a bay_ident
|
||||
# args[2] should be a cluster_ident
|
||||
obj = args[1]
|
||||
if hasattr(obj, 'bay_uuid'):
|
||||
bay = objects.Bay.get_by_uuid(pecan.request.context, obj.bay_uuid)
|
||||
if hasattr(obj, 'cluster_uuid'):
|
||||
cluster = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||
obj.cluster_uuid)
|
||||
else:
|
||||
bay_ident = args[2]
|
||||
if uuidutils.is_uuid_like(bay_ident):
|
||||
bay = objects.Bay.get_by_uuid(pecan.request.context, bay_ident)
|
||||
cluster_ident = args[2]
|
||||
if uuidutils.is_uuid_like(cluster_ident):
|
||||
cluster = objects.Cluster.get_by_uuid(pecan.request.context,
|
||||
cluster_ident)
|
||||
else:
|
||||
bay = objects.Bay.get_by_name(pecan.request.context, bay_ident)
|
||||
cluster = objects.Cluster.get_by_name(pecan.request.context,
|
||||
cluster_ident)
|
||||
|
||||
if bay.cluster_template.coe not in bay_types:
|
||||
if cluster.cluster_template.coe not in cluster_types:
|
||||
raise exception.InvalidParameterValue(_(
|
||||
'Cannot fulfill request with a %(bay_type)s bay, '
|
||||
'expecting a %(supported_bay_types)s bay.') %
|
||||
{'bay_type': bay.cluster_template.coe,
|
||||
'supported_bay_types': '/'.join(bay_types)})
|
||||
'Cannot fulfill request with a %(cluster_type)s cluster, '
|
||||
'expecting a %(supported_cluster_types)s cluster.') %
|
||||
{'cluster_type': cluster.cluster_template.coe,
|
||||
'supported_cluster_types': '/'.join(cluster_types)})
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
|
@ -192,11 +195,11 @@ def _enforce_volume_storage_size(cluster_template):
|
|||
'driver.') % (volume_size, storage_driver)
|
||||
|
||||
|
||||
def validate_bay_properties(delta):
|
||||
def validate_cluster_properties(delta):
|
||||
|
||||
update_disallowed_properties = delta - bay_update_allowed_properties
|
||||
update_disallowed_properties = delta - cluster_update_allowed_properties
|
||||
if update_disallowed_properties:
|
||||
err = (_("cannot change bay property(ies) %s.") %
|
||||
err = (_("cannot change cluster property(ies) %s.") %
|
||||
", ".join(update_disallowed_properties))
|
||||
raise exception.InvalidParameterValue(err=err)
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ from oslo_service import service
|
|||
from magnum.common import rpc_service
|
||||
from magnum.common import service as magnum_service
|
||||
from magnum.common import short_id
|
||||
from magnum.conductor.handlers import bay_conductor
|
||||
from magnum.conductor.handlers import ca_conductor
|
||||
from magnum.conductor.handlers import cluster_conductor
|
||||
from magnum.conductor.handlers import conductor_listener
|
||||
from magnum.conductor.handlers import indirection_api
|
||||
from magnum.i18n import _LI
|
||||
|
@ -49,7 +49,7 @@ def main():
|
|||
conductor_id = short_id.generate_id()
|
||||
endpoints = [
|
||||
indirection_api.Handler(),
|
||||
bay_conductor.Handler(),
|
||||
cluster_conductor.Handler(),
|
||||
conductor_listener.Handler(),
|
||||
ca_conductor.Handler(),
|
||||
]
|
||||
|
|
|
@ -50,7 +50,7 @@ class TemplateList(lister.Lister):
|
|||
parser.add_argument('-d', '--details',
|
||||
action='store_true',
|
||||
dest='details',
|
||||
help=('display the bay types provided by '
|
||||
help=('display the cluster types provided by '
|
||||
'each template'))
|
||||
parser.add_argument('-p', '--paths',
|
||||
action='store_true',
|
||||
|
@ -77,10 +77,10 @@ class TemplateList(lister.Lister):
|
|||
path=definition.template_path)
|
||||
|
||||
if parsed_args.details:
|
||||
for bay_type in definition.provides:
|
||||
for cluster_type in definition.provides:
|
||||
row = dict()
|
||||
row.update(template)
|
||||
row.update(bay_type)
|
||||
row.update(cluster_type)
|
||||
rows.append(row)
|
||||
else:
|
||||
rows.append(template)
|
||||
|
|
|
@ -105,15 +105,15 @@ def make_admin_context(show_deleted=False, all_tenants=False):
|
|||
return context
|
||||
|
||||
|
||||
def make_bay_context(bay, show_deleted=False):
|
||||
"""Create a user context based on a bay's stored Keystone trust.
|
||||
def make_cluster_context(cluster, show_deleted=False):
|
||||
"""Create a user context based on a cluster's stored Keystone trust.
|
||||
|
||||
:param bay: the bay supplying the Keystone trust to use
|
||||
:param cluster: the cluster supplying the Keystone trust to use
|
||||
:param show_deleted: if True, will show deleted items when query db
|
||||
"""
|
||||
context = RequestContext(user_name=bay.trustee_username,
|
||||
password=bay.trustee_password,
|
||||
trust_id=bay.trust_id,
|
||||
context = RequestContext(user_name=cluster.trustee_username,
|
||||
password=cluster.trustee_password,
|
||||
trust_id=cluster.trust_id,
|
||||
show_deleted=show_deleted,
|
||||
user_domain_id=CONF.trust.trustee_domain_id,
|
||||
user_domain_name=CONF.trust.trustee_domain_name)
|
||||
|
|
|
@ -75,20 +75,21 @@ def is_docker_api_version_atleast(docker, version):
|
|||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def docker_for_bay(context, bay):
|
||||
cluster_template = conductor_utils.retrieve_cluster_template(context, bay)
|
||||
def docker_for_cluster(context, cluster):
|
||||
cluster_template = conductor_utils.retrieve_cluster_template(
|
||||
context, cluster)
|
||||
|
||||
ca_cert, magnum_key, magnum_cert = None, None, None
|
||||
client_kwargs = dict()
|
||||
if not cluster_template.tls_disabled:
|
||||
(ca_cert, magnum_key,
|
||||
magnum_cert) = cert_manager.create_client_files(bay)
|
||||
magnum_cert) = cert_manager.create_client_files(cluster)
|
||||
client_kwargs['ca_cert'] = ca_cert.name
|
||||
client_kwargs['client_key'] = magnum_key.name
|
||||
client_kwargs['client_cert'] = magnum_cert.name
|
||||
|
||||
yield DockerHTTPClient(
|
||||
bay.api_address,
|
||||
cluster.api_address,
|
||||
CONF.docker.docker_remote_api_version,
|
||||
CONF.docker.default_timeout,
|
||||
**client_kwargs
|
||||
|
|
|
@ -154,7 +154,7 @@ class GetDiscoveryUrlFailed(MagnumException):
|
|||
message = _("Failed to get discovery url from '%(discovery_endpoint)s'.")
|
||||
|
||||
|
||||
class InvalidBayDiscoveryURL(Invalid):
|
||||
class InvalidClusterDiscoveryURL(Invalid):
|
||||
message = _("Invalid discovery URL '%(discovery_url)s'.")
|
||||
|
||||
|
||||
|
@ -271,11 +271,11 @@ class PodAlreadyExists(Conflict):
|
|||
|
||||
|
||||
class PodListNotFound(ResourceNotFound):
|
||||
message = _("Pod list could not be found for Bay %(bay_uuid)s.")
|
||||
message = _("Pod list could not be found for Cluster %(cluster_uuid)s.")
|
||||
|
||||
|
||||
class PodCreationFailed(Invalid):
|
||||
message = _("Pod creation failed in Bay %(bay_uuid)s.")
|
||||
message = _("Pod creation failed in Cluster %(cluster_uuid)s.")
|
||||
|
||||
|
||||
class ServiceNotFound(ResourceNotFound):
|
||||
|
@ -287,11 +287,12 @@ class ServiceAlreadyExists(Conflict):
|
|||
|
||||
|
||||
class ServiceListNotFound(ResourceNotFound):
|
||||
message = _("Service list could not be found for Bay %(bay_uuid)s.")
|
||||
message = _("Service list could not be found for Cluster "
|
||||
"%(cluster_uuid)s.")
|
||||
|
||||
|
||||
class ServiceCreationFailed(Invalid):
|
||||
message = _("Service creation failed for Bay %(bay_uuid)s.")
|
||||
message = _("Service creation failed for Cluster %(cluster_uuid)s.")
|
||||
|
||||
|
||||
class ContainerException(Exception):
|
||||
|
@ -303,13 +304,13 @@ class NotSupported(MagnumException):
|
|||
code = 400
|
||||
|
||||
|
||||
class BayTypeNotSupported(MagnumException):
|
||||
message = _("Bay type (%(server_type)s, %(os)s, %(coe)s)"
|
||||
class ClusterTypeNotSupported(MagnumException):
|
||||
message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)"
|
||||
" not supported.")
|
||||
|
||||
|
||||
class BayTypeNotEnabled(MagnumException):
|
||||
message = _("Bay type (%(server_type)s, %(os)s, %(coe)s)"
|
||||
class ClusterTypeNotEnabled(MagnumException):
|
||||
message = _("Cluster type (%(server_type)s, %(os)s, %(coe)s)"
|
||||
" not enabled.")
|
||||
|
||||
|
||||
|
@ -322,7 +323,8 @@ class Urllib2InvalidScheme(MagnumException):
|
|||
|
||||
|
||||
class OperationInProgress(Invalid):
|
||||
message = _("Bay %(bay_name)s already has an operation in progress.")
|
||||
message = _("Cluster %(cluster_name)s already has an operation in "
|
||||
"progress.")
|
||||
|
||||
|
||||
class ImageNotFound(ResourceNotFound):
|
||||
|
@ -383,11 +385,11 @@ class MagnumServiceAlreadyExists(Conflict):
|
|||
|
||||
|
||||
class UnsupportedK8sQuantityFormat(MagnumException):
|
||||
message = _("Unsupported quantity format for k8s bay.")
|
||||
message = _("Unsupported quantity format for k8s cluster.")
|
||||
|
||||
|
||||
class UnsupportedDockerQuantityFormat(MagnumException):
|
||||
message = _("Unsupported quantity format for Swarm bay.")
|
||||
message = _("Unsupported quantity format for Swarm cluster.")
|
||||
|
||||
|
||||
class FlavorNotFound(ResourceNotFound):
|
||||
|
@ -429,8 +431,9 @@ class RegionsListFailed(MagnumException):
|
|||
message = _("Failed to list regions.")
|
||||
|
||||
|
||||
class TrusteeOrTrustToBayFailed(MagnumException):
|
||||
message = _("Failed to create trustee or trust for Bay: %(bay_uuid)s")
|
||||
class TrusteeOrTrustToClusterFailed(MagnumException):
|
||||
message = _("Failed to create trustee or trust for Cluster: "
|
||||
"%(cluster_uuid)s")
|
||||
|
||||
|
||||
class CertificatesToClusterFailed(MagnumException):
|
||||
|
|
|
@ -32,9 +32,9 @@ LOG = logging.getLogger(__name__)
|
|||
|
||||
trust_opts = [
|
||||
cfg.StrOpt('trustee_domain_id',
|
||||
help=_('Id of the domain to create trustee for bays')),
|
||||
help=_('Id of the domain to create trustee for clusters')),
|
||||
cfg.StrOpt('trustee_domain_name',
|
||||
help=_('Name of the domain to create trustee for bays')),
|
||||
help=_('Name of the domain to create trustee for s')),
|
||||
cfg.StrOpt('trustee_domain_admin_id',
|
||||
help=_('Id of the admin with roles sufficient to manage users'
|
||||
' in the trustee_domain')),
|
||||
|
@ -256,21 +256,21 @@ class KeystoneClientV3(object):
|
|||
trustee_user_id=trustee_user)
|
||||
return trust
|
||||
|
||||
def delete_trust(self, context, bay):
|
||||
if bay.trust_id is None:
|
||||
def delete_trust(self, context, cluster):
|
||||
if cluster.trust_id is None:
|
||||
return
|
||||
|
||||
# Trust can only be deleted by the user who creates it. So when
|
||||
# other users in the same project want to delete the bay, we need
|
||||
# other users in the same project want to delete the cluster, we need
|
||||
# use the trustee which can impersonate the trustor to delete the
|
||||
# trust.
|
||||
if context.user_id == bay.user_id:
|
||||
if context.user_id == cluster.user_id:
|
||||
client = self.client
|
||||
else:
|
||||
auth = ka_v3.Password(auth_url=self.auth_url,
|
||||
user_id=bay.trustee_user_id,
|
||||
password=bay.trustee_password,
|
||||
trust_id=bay.trust_id)
|
||||
user_id=cluster.trustee_user_id,
|
||||
password=cluster.trustee_password,
|
||||
trust_id=cluster.trust_id)
|
||||
|
||||
sess = ka_loading.session.Session().load_from_options(
|
||||
auth=auth,
|
||||
|
@ -280,12 +280,12 @@ class KeystoneClientV3(object):
|
|||
cert=CONF[CFG_LEGACY_GROUP].certfile)
|
||||
client = kc_v3.Client(session=sess)
|
||||
try:
|
||||
client.trusts.delete(bay.trust_id)
|
||||
client.trusts.delete(cluster.trust_id)
|
||||
except kc_exception.NotFound:
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to delete trust'))
|
||||
raise exception.TrustDeleteFailed(trust_id=bay.trust_id)
|
||||
raise exception.TrustDeleteFailed(trust_id=cluster.trust_id)
|
||||
|
||||
def create_trustee(self, username, password):
|
||||
domain_id = self.trustee_domain_id
|
||||
|
|
|
@ -104,11 +104,11 @@ def enforce_wsgi(api_name, act=None):
|
|||
|
||||
example:
|
||||
from magnum.common import policy
|
||||
class BaysController(rest.RestController):
|
||||
class ClustersController(rest.RestController):
|
||||
....
|
||||
@policy.enforce_wsgi("bay", "delete")
|
||||
@policy.enforce_wsgi("cluster", "delete")
|
||||
@wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
|
||||
def delete(self, bay_ident):
|
||||
def delete(self, cluster_ident):
|
||||
...
|
||||
"""
|
||||
@decorator.decorator
|
||||
|
|
|
@ -44,8 +44,8 @@ TRANSPORT_ALIASES = {
|
|||
periodic_opts = [
|
||||
cfg.BoolOpt('periodic_global_stack_list',
|
||||
default=False,
|
||||
help="List Heat stacks globally when syncing bays. "
|
||||
"Default is to do retrieve each bay's stack "
|
||||
help="List Heat stacks globally when syncing clusters. "
|
||||
"Default is to do retrieve each cluster's stack "
|
||||
"individually. Reduces number of requests against "
|
||||
"Heat API if enabled but requires changes to Heat's "
|
||||
"policy.json."),
|
||||
|
|
|
@ -28,27 +28,27 @@ class API(rpc_service.API):
|
|||
super(API, self).__init__(transport, context,
|
||||
topic=cfg.CONF.conductor.topic)
|
||||
|
||||
# Bay Operations
|
||||
# Cluster Operations
|
||||
|
||||
def bay_create(self, bay, bay_create_timeout):
|
||||
return self._call('bay_create', bay=bay,
|
||||
bay_create_timeout=bay_create_timeout)
|
||||
def cluster_create(self, cluster, create_timeout):
|
||||
return self._call('cluster_create', cluster=cluster,
|
||||
create_timeout=create_timeout)
|
||||
|
||||
def bay_create_async(self, bay, bay_create_timeout):
|
||||
self._cast('bay_create', bay=bay,
|
||||
bay_create_timeout=bay_create_timeout)
|
||||
def cluster_create_async(self, cluster, create_timeout):
|
||||
self._cast('cluster_create', cluster=cluster,
|
||||
create_timeout=create_timeout)
|
||||
|
||||
def bay_delete(self, uuid):
|
||||
return self._call('bay_delete', uuid=uuid)
|
||||
def cluster_delete(self, uuid):
|
||||
return self._call('cluster_delete', uuid=uuid)
|
||||
|
||||
def bay_delete_async(self, uuid):
|
||||
self._cast('bay_delete', uuid=uuid)
|
||||
def cluster_delete_async(self, uuid):
|
||||
self._cast('cluster_delete', uuid=uuid)
|
||||
|
||||
def bay_update(self, bay):
|
||||
return self._call('bay_update', bay=bay)
|
||||
def cluster_update(self, cluster):
|
||||
return self._call('cluster_update', cluster=cluster)
|
||||
|
||||
def bay_update_async(self, bay, rollback=False):
|
||||
self._cast('bay_update', bay=bay, rollback=rollback)
|
||||
def cluster_update_async(self, cluster, rollback=False):
|
||||
self._cast('cluster_update', cluster=cluster, rollback=rollback)
|
||||
|
||||
# CA operations
|
||||
|
||||
|
|
|
@ -68,8 +68,9 @@ CONF.register_opts(cluster_heat_opts, group='cluster_heat')
|
|||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_template_definition(context, bay, scale_manager=None):
|
||||
cluster_template = conductor_utils.retrieve_cluster_template(context, bay)
|
||||
def _extract_template_definition(context, cluster, scale_manager=None):
|
||||
cluster_template = conductor_utils.retrieve_cluster_template(context,
|
||||
cluster)
|
||||
cluster_distro = cluster_template.cluster_distro
|
||||
cluster_coe = cluster_template.coe
|
||||
cluster_server_type = cluster_template.server_type
|
||||
|
@ -77,7 +78,7 @@ def _extract_template_definition(context, bay, scale_manager=None):
|
|||
cluster_server_type,
|
||||
cluster_distro,
|
||||
cluster_coe)
|
||||
return definition.extract_definition(context, cluster_template, bay,
|
||||
return definition.extract_definition(context, cluster_template, cluster,
|
||||
scale_manager=scale_manager)
|
||||
|
||||
|
||||
|
@ -91,9 +92,9 @@ def _get_env_files(template_path, env_rel_paths):
|
|||
return environment_files, env_map
|
||||
|
||||
|
||||
def _create_stack(context, osc, bay, bay_create_timeout):
|
||||
def _create_stack(context, osc, cluster, create_timeout):
|
||||
template_path, heat_params, env_files = (
|
||||
_extract_template_definition(context, bay))
|
||||
_extract_template_definition(context, cluster))
|
||||
|
||||
tpl_files, template = template_utils.get_template_contents(template_path)
|
||||
|
||||
|
@ -101,11 +102,11 @@ def _create_stack(context, osc, bay, bay_create_timeout):
|
|||
tpl_files.update(env_map)
|
||||
|
||||
# Make sure no duplicate stack name
|
||||
stack_name = '%s-%s' % (bay.name, short_id.generate_id())
|
||||
if bay_create_timeout:
|
||||
heat_timeout = bay_create_timeout
|
||||
stack_name = '%s-%s' % (cluster.name, short_id.generate_id())
|
||||
if create_timeout:
|
||||
heat_timeout = create_timeout
|
||||
else:
|
||||
# no bay_create_timeout value was passed in to the request
|
||||
# no create_timeout value was passed in to the request
|
||||
# so falling back on configuration file value
|
||||
heat_timeout = cfg.CONF.cluster_heat.create_timeout
|
||||
fields = {
|
||||
|
@ -121,9 +122,9 @@ def _create_stack(context, osc, bay, bay_create_timeout):
|
|||
return created_stack
|
||||
|
||||
|
||||
def _update_stack(context, osc, bay, scale_manager=None, rollback=False):
|
||||
def _update_stack(context, osc, cluster, scale_manager=None, rollback=False):
|
||||
template_path, heat_params, env_files = _extract_template_definition(
|
||||
context, bay, scale_manager=scale_manager)
|
||||
context, cluster, scale_manager=scale_manager)
|
||||
|
||||
tpl_files, template = template_utils.get_template_contents(template_path)
|
||||
environment_files, env_map = _get_env_files(template_path, env_files)
|
||||
|
@ -137,7 +138,7 @@ def _update_stack(context, osc, bay, scale_manager=None, rollback=False):
|
|||
'disable_rollback': not rollback
|
||||
}
|
||||
|
||||
return osc.heat().stacks.update(bay.stack_id, **fields)
|
||||
return osc.heat().stacks.update(cluster.stack_id, **fields)
|
||||
|
||||
|
||||
class Handler(object):
|
||||
|
@ -145,26 +146,28 @@ class Handler(object):
|
|||
def __init__(self):
|
||||
super(Handler, self).__init__()
|
||||
|
||||
# Bay Operations
|
||||
# Cluster Operations
|
||||
|
||||
def bay_create(self, context, bay, bay_create_timeout):
|
||||
LOG.debug('bay_heat bay_create')
|
||||
def cluster_create(self, context, cluster, create_timeout):
|
||||
LOG.debug('cluster_heat cluster_create')
|
||||
|
||||
osc = clients.OpenStackClients(context)
|
||||
|
||||
try:
|
||||
# Create trustee/trust and set them to bay
|
||||
trust_manager.create_trustee_and_trust(osc, bay)
|
||||
# Generate certificate and set the cert reference to bay
|
||||
cert_manager.generate_certificates_to_cluster(bay, context=context)
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
# Create trustee/trust and set them to cluster
|
||||
trust_manager.create_trustee_and_trust(osc, cluster)
|
||||
# Generate certificate and set the cert reference to cluster
|
||||
cert_manager.generate_certificates_to_cluster(cluster,
|
||||
context=context)
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING)
|
||||
created_stack = _create_stack(context, osc, bay,
|
||||
bay_create_timeout)
|
||||
created_stack = _create_stack(context, osc, cluster,
|
||||
create_timeout)
|
||||
except Exception as e:
|
||||
cert_manager.delete_certificates_from_cluster(bay, context=context)
|
||||
trust_manager.delete_trustee_and_trust(osc, context, bay)
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
cert_manager.delete_certificates_from_cluster(cluster,
|
||||
context=context)
|
||||
trust_manager.delete_trustee_and_trust(osc, context, cluster)
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE)
|
||||
|
||||
if isinstance(e, exc.HTTPBadRequest):
|
||||
|
@ -173,111 +176,112 @@ class Handler(object):
|
|||
raise e
|
||||
raise
|
||||
|
||||
bay.stack_id = created_stack['stack']['id']
|
||||
bay.status = fields.BayStatus.CREATE_IN_PROGRESS
|
||||
bay.create()
|
||||
cluster.stack_id = created_stack['stack']['id']
|
||||
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
|
||||
cluster.create()
|
||||
|
||||
self._poll_and_check(osc, bay)
|
||||
self._poll_and_check(osc, cluster)
|
||||
|
||||
return bay
|
||||
return cluster
|
||||
|
||||
def bay_update(self, context, bay, rollback=False):
|
||||
LOG.debug('bay_heat bay_update')
|
||||
def cluster_update(self, context, cluster, rollback=False):
|
||||
LOG.debug('cluster_heat cluster_update')
|
||||
|
||||
osc = clients.OpenStackClients(context)
|
||||
stack = osc.heat().stacks.get(bay.stack_id)
|
||||
stack = osc.heat().stacks.get(cluster.stack_id)
|
||||
allow_update_status = (
|
||||
fields.BayStatus.CREATE_COMPLETE,
|
||||
fields.BayStatus.UPDATE_COMPLETE,
|
||||
fields.BayStatus.RESUME_COMPLETE,
|
||||
fields.BayStatus.RESTORE_COMPLETE,
|
||||
fields.BayStatus.ROLLBACK_COMPLETE,
|
||||
fields.BayStatus.SNAPSHOT_COMPLETE,
|
||||
fields.BayStatus.CHECK_COMPLETE,
|
||||
fields.BayStatus.ADOPT_COMPLETE
|
||||
fields.ClusterStatus.CREATE_COMPLETE,
|
||||
fields.ClusterStatus.UPDATE_COMPLETE,
|
||||
fields.ClusterStatus.RESUME_COMPLETE,
|
||||
fields.ClusterStatus.RESTORE_COMPLETE,
|
||||
fields.ClusterStatus.ROLLBACK_COMPLETE,
|
||||
fields.ClusterStatus.SNAPSHOT_COMPLETE,
|
||||
fields.ClusterStatus.CHECK_COMPLETE,
|
||||
fields.ClusterStatus.ADOPT_COMPLETE
|
||||
)
|
||||
if stack.stack_status not in allow_update_status:
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
|
||||
operation = _('Updating a bay when stack status is '
|
||||
operation = _('Updating a cluster when stack status is '
|
||||
'"%s"') % stack.stack_status
|
||||
raise exception.NotSupported(operation=operation)
|
||||
|
||||
delta = bay.obj_what_changed()
|
||||
delta = cluster.obj_what_changed()
|
||||
if not delta:
|
||||
return bay
|
||||
return cluster
|
||||
|
||||
manager = scale_manager.ScaleManager(context, osc, bay)
|
||||
manager = scale_manager.ScaleManager(context, osc, cluster)
|
||||
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
|
||||
|
||||
_update_stack(context, osc, bay, manager, rollback)
|
||||
self._poll_and_check(osc, bay)
|
||||
_update_stack(context, osc, cluster, manager, rollback)
|
||||
self._poll_and_check(osc, cluster)
|
||||
|
||||
return bay
|
||||
return cluster
|
||||
|
||||
def bay_delete(self, context, uuid):
|
||||
LOG.debug('bay_heat bay_delete')
|
||||
def cluster_delete(self, context, uuid):
|
||||
LOG.debug('cluster_heat cluster_delete')
|
||||
osc = clients.OpenStackClients(context)
|
||||
bay = objects.Bay.get_by_uuid(context, uuid)
|
||||
cluster = objects.Cluster.get_by_uuid(context, uuid)
|
||||
|
||||
stack_id = bay.stack_id
|
||||
stack_id = cluster.stack_id
|
||||
# NOTE(sdake): This will execute a stack_delete operation. This will
|
||||
# Ignore HTTPNotFound exceptions (stack wasn't present). In the case
|
||||
# that Heat couldn't find the stack representing the bay, likely a user
|
||||
# has deleted the stack outside the context of Magnum. Therefore the
|
||||
# contents of the bay are forever lost.
|
||||
# that Heat couldn't find the stack representing the cluster, likely a
|
||||
# user has deleted the stack outside the context of Magnum. Therefore
|
||||
# the contents of the cluster are forever lost.
|
||||
#
|
||||
# If the exception is unhandled, the original exception will be raised.
|
||||
try:
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING)
|
||||
osc.heat().stacks.delete(stack_id)
|
||||
except exc.HTTPNotFound:
|
||||
LOG.info(_LI('The stack %s was not found during bay'
|
||||
LOG.info(_LI('The stack %s was not found during cluster'
|
||||
' deletion.'), stack_id)
|
||||
try:
|
||||
trust_manager.delete_trustee_and_trust(osc, context, bay)
|
||||
cert_manager.delete_certificates_from_cluster(bay,
|
||||
trust_manager.delete_trustee_and_trust(osc, context, cluster)
|
||||
cert_manager.delete_certificates_from_cluster(cluster,
|
||||
context=context)
|
||||
bay.destroy()
|
||||
cluster.destroy()
|
||||
except exception.ClusterNotFound:
|
||||
LOG.info(_LI('The bay %s has been deleted by others.'), uuid)
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
LOG.info(_LI('The cluster %s has been deleted by others.'),
|
||||
uuid)
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS)
|
||||
return None
|
||||
except exc.HTTPConflict:
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||
raise exception.OperationInProgress(bay_name=bay.name)
|
||||
raise exception.OperationInProgress(cluster_name=cluster.name)
|
||||
except Exception:
|
||||
conductor_utils.notify_about_bay_operation(
|
||||
conductor_utils.notify_about_cluster_operation(
|
||||
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
|
||||
raise
|
||||
|
||||
bay.status = fields.BayStatus.DELETE_IN_PROGRESS
|
||||
bay.save()
|
||||
cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS
|
||||
cluster.save()
|
||||
|
||||
self._poll_and_check(osc, bay)
|
||||
self._poll_and_check(osc, cluster)
|
||||
|
||||
return None
|
||||
|
||||
def _poll_and_check(self, osc, bay):
|
||||
poller = HeatPoller(osc, bay)
|
||||
def _poll_and_check(self, osc, cluster):
|
||||
poller = HeatPoller(osc, cluster)
|
||||
lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check)
|
||||
lc.start(cfg.CONF.cluster_heat.wait_interval, True)
|
||||
|
||||
|
||||
class HeatPoller(object):
|
||||
|
||||
def __init__(self, openstack_client, bay):
|
||||
def __init__(self, openstack_client, cluster):
|
||||
self.openstack_client = openstack_client
|
||||
self.context = self.openstack_client.context
|
||||
self.bay = bay
|
||||
self.cluster = cluster
|
||||
self.attempts = 0
|
||||
self.cluster_template = conductor_utils.retrieve_cluster_template(
|
||||
self.context, bay)
|
||||
self. |