Apply black formatter to dcorch/engine
This commit applies the Black format to the `dcorch/engine` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50445 Change-Id: Ie39ef3f89b32565c7ce9cf383d1227a0bf3f7a00 Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:

committed by
Hugo Nicodemos

parent
51bee5a605
commit
51b6e19a2c
@@ -43,10 +43,11 @@ class FernetKeyManager(manager.Manager):
|
||||
"""Manages tasks related to fernet key management"""
|
||||
|
||||
def __init__(self, gsm, *args, **kwargs):
|
||||
LOG.debug(_('FernetKeyManager initialization...'))
|
||||
LOG.debug(_("FernetKeyManager initialization..."))
|
||||
|
||||
super(FernetKeyManager, self).__init__(service_name="fernet_manager",
|
||||
*args, **kwargs)
|
||||
super(FernetKeyManager, self).__init__(
|
||||
service_name="fernet_manager", *args, **kwargs
|
||||
)
|
||||
self.gsm = gsm
|
||||
self.context = context.get_admin_context()
|
||||
self.endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
|
||||
@@ -54,8 +55,7 @@ class FernetKeyManager(manager.Manager):
|
||||
|
||||
@classmethod
|
||||
def to_resource_info(cls, key_list):
|
||||
return dict((getattr(key, 'id'), getattr(key, 'key'))
|
||||
for key in key_list)
|
||||
return dict((getattr(key, "id"), getattr(key, "key")) for key in key_list)
|
||||
|
||||
@classmethod
|
||||
def from_resource_info(cls, keys):
|
||||
@@ -69,18 +69,19 @@ class FernetKeyManager(manager.Manager):
|
||||
def _schedule_work(self, operation_type, subcloud=None):
|
||||
keys = self._get_master_keys()
|
||||
if not keys:
|
||||
LOG.info(_("No fernet keys returned from %s") %
|
||||
dccommon_consts.CLOUD_0)
|
||||
LOG.info(_("No fernet keys returned from %s") % dccommon_consts.CLOUD_0)
|
||||
return
|
||||
try:
|
||||
resource_info = FernetKeyManager.to_resource_info(keys)
|
||||
utils.enqueue_work(self.context,
|
||||
self.endpoint_type,
|
||||
self.resource_type,
|
||||
FERNET_REPO_MASTER_ID,
|
||||
operation_type,
|
||||
resource_info=jsonutils.dumps(resource_info),
|
||||
subcloud=subcloud)
|
||||
utils.enqueue_work(
|
||||
self.context,
|
||||
self.endpoint_type,
|
||||
self.resource_type,
|
||||
FERNET_REPO_MASTER_ID,
|
||||
operation_type,
|
||||
resource_info=jsonutils.dumps(resource_info),
|
||||
subcloud=subcloud,
|
||||
)
|
||||
# wake up sync thread
|
||||
if self.gsm:
|
||||
self.gsm.sync_request(self.context, self.endpoint_type)
|
||||
@@ -98,15 +99,20 @@ class FernetKeyManager(manager.Manager):
|
||||
sysinv_client = SysinvClient(
|
||||
dccommon_consts.CLOUD_0,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
|
||||
)
|
||||
keys = sysinv_client.get_fernet_keys()
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut):
|
||||
LOG.info(_("Retrieving the fernet keys from %s timeout") %
|
||||
dccommon_consts.CLOUD_0)
|
||||
except (
|
||||
exceptions.ConnectionRefused,
|
||||
exceptions.NotAuthorized,
|
||||
exceptions.TimeOut,
|
||||
):
|
||||
LOG.info(
|
||||
_("Retrieving the fernet keys from %s timeout")
|
||||
% dccommon_consts.CLOUD_0
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.info(_("Fail to retrieve the master fernet keys: %s") %
|
||||
str(e))
|
||||
LOG.info(_("Fail to retrieve the master fernet keys: %s") % str(e))
|
||||
return keys
|
||||
|
||||
def rotate_fernet_keys(self):
|
||||
@@ -114,9 +120,8 @@ class FernetKeyManager(manager.Manager):
|
||||
|
||||
with open(os.devnull, "w") as fnull:
|
||||
try:
|
||||
subprocess.check_call(KEY_ROTATE_CMD, # pylint: disable=E1102
|
||||
stdout=fnull,
|
||||
stderr=fnull)
|
||||
# pylint: disable-next=E1102
|
||||
subprocess.check_call(KEY_ROTATE_CMD, stdout=fnull, stderr=fnull)
|
||||
except subprocess.CalledProcessError:
|
||||
msg = _("Failed to rotate the keys")
|
||||
LOG.exception(msg)
|
||||
@@ -128,8 +133,7 @@ class FernetKeyManager(manager.Manager):
|
||||
def distribute_keys(subcloud_name):
|
||||
keys = FernetKeyManager._get_master_keys()
|
||||
if not keys:
|
||||
LOG.info(_("No fernet keys returned from %s") %
|
||||
dccommon_consts.CLOUD_0)
|
||||
LOG.info(_("No fernet keys returned from %s") % dccommon_consts.CLOUD_0)
|
||||
return
|
||||
resource_info = FernetKeyManager.to_resource_info(keys)
|
||||
key_list = FernetKeyManager.from_resource_info(resource_info)
|
||||
@@ -147,12 +151,15 @@ class FernetKeyManager(manager.Manager):
|
||||
sysinv_client = SysinvClient(
|
||||
subcloud_name,
|
||||
ks_client.session,
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint('sysinv'))
|
||||
endpoint=ks_client.endpoint_cache.get_endpoint("sysinv"),
|
||||
)
|
||||
sysinv_client.post_fernet_repo(key_list)
|
||||
except (exceptions.ConnectionRefused, exceptions.NotAuthorized,
|
||||
exceptions.TimeOut):
|
||||
LOG.info(_("Update the fernet repo on %s timeout") %
|
||||
subcloud_name)
|
||||
except (
|
||||
exceptions.ConnectionRefused,
|
||||
exceptions.NotAuthorized,
|
||||
exceptions.TimeOut,
|
||||
):
|
||||
LOG.info(_("Update the fernet repo on %s timeout") % subcloud_name)
|
||||
except Exception as e:
|
||||
error_msg = "subcloud: {}, {}".format(subcloud_name, str(e))
|
||||
LOG.info(_("Fail to update fernet repo %s") % error_msg)
|
||||
|
@@ -68,8 +68,7 @@ class GenericSyncManager(object):
|
||||
|
||||
def _process_subclouds(self, rpc_method, subcloud_sync_list):
|
||||
# We want a chunksize of at least 1 so add the number of workers.
|
||||
chunksize = \
|
||||
(len(subcloud_sync_list) + CONF.workers) // (CONF.workers)
|
||||
chunksize = (len(subcloud_sync_list) + CONF.workers) // (CONF.workers)
|
||||
|
||||
subcloud_sync_chunk = []
|
||||
for subcloud_sync in subcloud_sync_list:
|
||||
@@ -94,12 +93,16 @@ class GenericSyncManager(object):
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED,
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
initial_sync_state=dco_consts.INITIAL_SYNC_STATE_COMPLETED,
|
||||
sync_requests=[dco_consts.SYNC_STATUS_REQUESTED,
|
||||
dco_consts.SYNC_STATUS_FAILED])
|
||||
sync_requests=[
|
||||
dco_consts.SYNC_STATUS_REQUESTED,
|
||||
dco_consts.SYNC_STATUS_FAILED,
|
||||
],
|
||||
)
|
||||
|
||||
if subcloud_sync_list:
|
||||
self._process_subclouds(
|
||||
self.engine_worker_rpc_client.sync_subclouds, subcloud_sync_list)
|
||||
self.engine_worker_rpc_client.sync_subclouds, subcloud_sync_list
|
||||
)
|
||||
else:
|
||||
LOG.debug("No eligible subclouds for sync.")
|
||||
|
||||
@@ -117,20 +120,26 @@ class GenericSyncManager(object):
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED,
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
initial_sync_state=dco_consts.INITIAL_SYNC_STATE_COMPLETED,
|
||||
audit_interval=AUDIT_INTERVAL)
|
||||
audit_interval=AUDIT_INTERVAL,
|
||||
)
|
||||
|
||||
if subcloud_sync_list:
|
||||
self._process_subclouds(
|
||||
self.engine_worker_rpc_client.run_sync_audit, subcloud_sync_list)
|
||||
self.engine_worker_rpc_client.run_sync_audit, subcloud_sync_list
|
||||
)
|
||||
else:
|
||||
LOG.debug("No eligible subclouds for audit.")
|
||||
|
||||
def _send_chunk(self, rpc_method, subcloud_sync_chunk):
|
||||
try:
|
||||
rpc_method(self.context, subcloud_sync_chunk)
|
||||
LOG.debug(f"Sent {rpc_method.__name__} request message for "
|
||||
f"{len(subcloud_sync_chunk)} (subcloud, endpoint_type) "
|
||||
f"pairs.")
|
||||
LOG.debug(
|
||||
f"Sent {rpc_method.__name__} request message for "
|
||||
f"{len(subcloud_sync_chunk)} (subcloud, endpoint_type) "
|
||||
f"pairs."
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred in {rpc_method.__name__} for "
|
||||
f"subclouds {subcloud_sync_chunk}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred in {rpc_method.__name__} for "
|
||||
f"subclouds {subcloud_sync_chunk}: {e}"
|
||||
)
|
||||
|
@@ -26,7 +26,7 @@ SYNC_TIMEOUT = 600 # Timeout for subcloud sync
|
||||
sync_object_class_map = {
|
||||
dccommon_consts.ENDPOINT_TYPE_PLATFORM: SysinvSyncThread,
|
||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY: IdentitySyncThread,
|
||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY_OS: IdentitySyncThread
|
||||
dccommon_consts.ENDPOINT_TYPE_IDENTITY_OS: IdentitySyncThread,
|
||||
}
|
||||
|
||||
|
||||
@@ -38,59 +38,71 @@ class GenericSyncWorkerManager(object):
|
||||
self.engine_id = engine_id
|
||||
# Keeps track of greenthreads we create to do the sync work.
|
||||
self.sync_thread_group_manager = scheduler.ThreadGroupManager(
|
||||
thread_pool_size=100)
|
||||
thread_pool_size=100
|
||||
)
|
||||
# Keeps track of greenthreads we create to do the audit work.
|
||||
self.audit_thread_group_manager = scheduler.ThreadGroupManager(
|
||||
thread_pool_size=100)
|
||||
thread_pool_size=100
|
||||
)
|
||||
|
||||
def create_sync_objects(self, subcloud_name, capabilities, management_ip):
|
||||
"""Create sync object objects for the subcloud
|
||||
|
||||
The objects handle the syncing of the subcloud's endpoint_types
|
||||
The objects handle the syncing of the subcloud's endpoint_types
|
||||
"""
|
||||
sync_objs = {}
|
||||
endpoint_type_list = capabilities.get('endpoint_types', None)
|
||||
endpoint_type_list = capabilities.get("endpoint_types", None)
|
||||
if endpoint_type_list:
|
||||
for endpoint_type in endpoint_type_list:
|
||||
LOG.debug(f"Engine id:({self.engine_id}) create "
|
||||
f"{subcloud_name}/{endpoint_type}/{management_ip} "
|
||||
f"sync obj")
|
||||
sync_obj = sync_object_class_map[endpoint_type](subcloud_name,
|
||||
endpoint_type,
|
||||
management_ip)
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}) create "
|
||||
f"{subcloud_name}/{endpoint_type}/{management_ip} "
|
||||
f"sync obj"
|
||||
)
|
||||
sync_obj = sync_object_class_map[endpoint_type](
|
||||
subcloud_name, endpoint_type, management_ip
|
||||
)
|
||||
sync_objs[endpoint_type] = sync_obj
|
||||
return sync_objs
|
||||
|
||||
def sync_subclouds(self, context, subcloud_sync_list):
|
||||
LOG.info(f"Engine id:({self.engine_id}) Start to sync "
|
||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs.")
|
||||
LOG.debug(f"Engine id:({self.engine_id}) Start to sync "
|
||||
f"{subcloud_sync_list}.")
|
||||
LOG.info(
|
||||
f"Engine id:({self.engine_id}) Start to sync "
|
||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
||||
)
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}) Start to sync " f"{subcloud_sync_list}."
|
||||
)
|
||||
|
||||
for sc_region_name, ept, ip in subcloud_sync_list:
|
||||
try:
|
||||
self.sync_thread_group_manager.start(self._sync_subcloud,
|
||||
self.context,
|
||||
sc_region_name,
|
||||
ept,
|
||||
ip)
|
||||
self.sync_thread_group_manager.start(
|
||||
self._sync_subcloud, self.context, sc_region_name, ept, ip
|
||||
)
|
||||
except exceptions.SubcloudSyncNotFound:
|
||||
# The endpoint in subcloud_sync has been removed
|
||||
LOG.debug(f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
||||
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
||||
f"has been removed")
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
||||
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
||||
f"has been removed"
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred when running sync {ept} for "
|
||||
f"subcloud {sc_region_name}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred when running sync {ept} for "
|
||||
f"subcloud {sc_region_name}: {e}"
|
||||
)
|
||||
db_api.subcloud_sync_update(
|
||||
self.context, sc_region_name, ept,
|
||||
values={'sync_request': dco_consts.SYNC_STATUS_FAILED})
|
||||
self.context,
|
||||
sc_region_name,
|
||||
ept,
|
||||
values={"sync_request": dco_consts.SYNC_STATUS_FAILED},
|
||||
)
|
||||
|
||||
def _sync_subcloud(self, context, subcloud_name, endpoint_type, management_ip):
|
||||
LOG.info(f"Start to sync subcloud {subcloud_name}/{endpoint_type}.")
|
||||
sync_obj = sync_object_class_map[endpoint_type](subcloud_name,
|
||||
endpoint_type,
|
||||
management_ip)
|
||||
sync_obj = sync_object_class_map[endpoint_type](
|
||||
subcloud_name, endpoint_type, management_ip
|
||||
)
|
||||
new_state = dco_consts.SYNC_STATUS_COMPLETED
|
||||
timeout = eventlet.timeout.Timeout(SYNC_TIMEOUT)
|
||||
try:
|
||||
@@ -107,24 +119,32 @@ class GenericSyncWorkerManager(object):
|
||||
timeout.cancel()
|
||||
|
||||
db_api.subcloud_sync_update(
|
||||
context, subcloud_name, endpoint_type,
|
||||
values={'sync_request': new_state})
|
||||
context, subcloud_name, endpoint_type, values={"sync_request": new_state}
|
||||
)
|
||||
LOG.info(f"End of sync_subcloud {subcloud_name}.")
|
||||
|
||||
def add_subcloud(self, context, name, version, management_ip):
|
||||
# create subcloud in DB and create the sync objects
|
||||
LOG.info(f"adding subcloud {name}")
|
||||
endpoint_type_list = dco_consts.SYNC_ENDPOINT_TYPES_LIST[:]
|
||||
capabilities = {'endpoint_types': endpoint_type_list}
|
||||
capabilities = {"endpoint_types": endpoint_type_list}
|
||||
|
||||
sc = subcloud.Subcloud(
|
||||
context, region_name=name, software_version=version,
|
||||
capabilities=capabilities, management_ip=management_ip)
|
||||
context,
|
||||
region_name=name,
|
||||
software_version=version,
|
||||
capabilities=capabilities,
|
||||
management_ip=management_ip,
|
||||
)
|
||||
sc = sc.create()
|
||||
for endpoint_type in endpoint_type_list:
|
||||
db_api.subcloud_sync_create(context, name, endpoint_type,
|
||||
# pylint: disable-next=no-member
|
||||
values={'subcloud_id': sc.id})
|
||||
db_api.subcloud_sync_create(
|
||||
context,
|
||||
name,
|
||||
endpoint_type,
|
||||
# pylint: disable-next=no-member
|
||||
values={"subcloud_id": sc.id},
|
||||
)
|
||||
# Create the sync object for this engine
|
||||
self.create_sync_objects(name, capabilities, management_ip)
|
||||
|
||||
@@ -134,39 +154,54 @@ class GenericSyncWorkerManager(object):
|
||||
context,
|
||||
subcloud_name,
|
||||
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
|
||||
)
|
||||
try:
|
||||
# delete this subcloud
|
||||
subcloud.Subcloud.delete_subcloud_by_name(context, subcloud_name)
|
||||
except Exception:
|
||||
raise exceptions.SubcloudNotFound(region_name=subcloud_name)
|
||||
|
||||
def subcloud_state_matches(self, subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None):
|
||||
def subcloud_state_matches(
|
||||
self,
|
||||
subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None,
|
||||
):
|
||||
# compare subcloud states
|
||||
match = True
|
||||
sc = subcloud.Subcloud.get_by_name(self.context, subcloud_name)
|
||||
if management_state is not None and \
|
||||
sc.management_state != management_state:
|
||||
if management_state is not None and sc.management_state != management_state:
|
||||
match = False
|
||||
if match and availability_status is not None and \
|
||||
sc.availability_status != availability_status:
|
||||
if (
|
||||
match
|
||||
and availability_status is not None
|
||||
and sc.availability_status != availability_status
|
||||
):
|
||||
match = False
|
||||
if match and initial_sync_state is not None and \
|
||||
sc.initial_sync_state != initial_sync_state:
|
||||
if (
|
||||
match
|
||||
and initial_sync_state is not None
|
||||
and sc.initial_sync_state != initial_sync_state
|
||||
):
|
||||
match = False
|
||||
return match
|
||||
|
||||
def update_subcloud_state(self, context, subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None):
|
||||
LOG.info(f"updating state for subcloud {subcloud_name} - "
|
||||
f"management_state: {management_state} "
|
||||
f"availability_status: {availability_status} "
|
||||
f"initial_sync_state: {initial_sync_state}")
|
||||
def update_subcloud_state(
|
||||
self,
|
||||
context,
|
||||
subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None,
|
||||
):
|
||||
LOG.info(
|
||||
f"updating state for subcloud {subcloud_name} - "
|
||||
f"management_state: {management_state} "
|
||||
f"availability_status: {availability_status} "
|
||||
f"initial_sync_state: {initial_sync_state}"
|
||||
)
|
||||
sc = subcloud.Subcloud.get_by_name(context, subcloud_name)
|
||||
if management_state is not None:
|
||||
sc.management_state = management_state
|
||||
@@ -186,38 +221,44 @@ class GenericSyncWorkerManager(object):
|
||||
sc = subcloud.Subcloud.get_by_name(self.context, subcloud_name)
|
||||
# We only enable syncing if the subcloud is online and the initial
|
||||
# sync has completed.
|
||||
return (sc.availability_status == dccommon_consts.AVAILABILITY_ONLINE and
|
||||
sc.initial_sync_state == dco_consts.INITIAL_SYNC_STATE_COMPLETED)
|
||||
return (
|
||||
sc.availability_status == dccommon_consts.AVAILABILITY_ONLINE
|
||||
and sc.initial_sync_state == dco_consts.INITIAL_SYNC_STATE_COMPLETED
|
||||
)
|
||||
|
||||
def is_subcloud_ready(self, subcloud_name):
|
||||
# is this subcloud ready for synchronization
|
||||
return self.is_subcloud_managed(subcloud_name) and \
|
||||
self.is_subcloud_enabled(subcloud_name)
|
||||
return self.is_subcloud_managed(subcloud_name) and self.is_subcloud_enabled(
|
||||
subcloud_name
|
||||
)
|
||||
|
||||
def add_subcloud_sync_endpoint_type(self, context, subcloud_name,
|
||||
endpoint_type_list=None):
|
||||
def add_subcloud_sync_endpoint_type(
|
||||
self, context, subcloud_name, endpoint_type_list=None
|
||||
):
|
||||
|
||||
# TODO(jkung): This method is currently only required by
|
||||
# stx-openstack and is to be integrated with stx-openstack when
|
||||
# that feature is enabled.
|
||||
|
||||
LOG.info(f"add_subcloud_sync_endpoint_type subcloud_name={subcloud_name} "
|
||||
f"endpoint_type_list={endpoint_type_list}")
|
||||
LOG.info(
|
||||
f"add_subcloud_sync_endpoint_type subcloud_name={subcloud_name} "
|
||||
f"endpoint_type_list={endpoint_type_list}"
|
||||
)
|
||||
|
||||
if endpoint_type_list is None:
|
||||
return
|
||||
|
||||
sc = subcloud.Subcloud.get_by_name(context, subcloud_name)
|
||||
capabilities = sc.capabilities
|
||||
c_endpoint_type_list = capabilities.get('endpoint_types', [])
|
||||
c_endpoint_type_list = capabilities.get("endpoint_types", [])
|
||||
|
||||
# Update the DB first
|
||||
for endpoint_type in endpoint_type_list:
|
||||
if endpoint_type not in c_endpoint_type_list:
|
||||
c_endpoint_type_list.append(endpoint_type)
|
||||
if capabilities.get('endpoint_types') is None:
|
||||
if capabilities.get("endpoint_types") is None:
|
||||
# assign back if 'endpoint_types' is not in capabilities
|
||||
capabilities['endpoint_types'] = c_endpoint_type_list
|
||||
capabilities["endpoint_types"] = c_endpoint_type_list
|
||||
sc.capabilities = capabilities
|
||||
sc.save()
|
||||
|
||||
@@ -226,30 +267,37 @@ class GenericSyncWorkerManager(object):
|
||||
# Check whether sync endpoint already exists
|
||||
try:
|
||||
subcloud_sync = db_api.subcloud_sync_get(
|
||||
context, subcloud_name,
|
||||
endpoint_type)
|
||||
context, subcloud_name, endpoint_type
|
||||
)
|
||||
|
||||
if subcloud_sync:
|
||||
LOG.info(f"subcloud_sync subcloud={subcloud_name} "
|
||||
f"endpoint_type={endpoint_type} already exists")
|
||||
LOG.info(
|
||||
f"subcloud_sync subcloud={subcloud_name} "
|
||||
f"endpoint_type={endpoint_type} already exists"
|
||||
)
|
||||
continue
|
||||
except exceptions.SubcloudSyncNotFound:
|
||||
pass
|
||||
|
||||
sync_obj = sync_object_class_map[endpoint_type](
|
||||
subcloud_name, endpoint_type, sc.management_ip)
|
||||
subcloud_name, endpoint_type, sc.management_ip
|
||||
)
|
||||
|
||||
# create the subcloud_sync !!!
|
||||
db_api.subcloud_sync_create(
|
||||
context, subcloud_name, endpoint_type,
|
||||
values={'subcloud_id': sc.id}) # pylint: disable=E1101
|
||||
context,
|
||||
subcloud_name,
|
||||
endpoint_type,
|
||||
values={"subcloud_id": sc.id}, # pylint: disable=E1101
|
||||
)
|
||||
|
||||
if self.is_subcloud_ready(subcloud_name):
|
||||
sync_obj.enable()
|
||||
sync_obj.initial_sync()
|
||||
|
||||
def remove_subcloud_sync_endpoint_type(self, context, subcloud_name,
|
||||
endpoint_type_list=None):
|
||||
def remove_subcloud_sync_endpoint_type(
|
||||
self, context, subcloud_name, endpoint_type_list=None
|
||||
):
|
||||
|
||||
# TODO(jkung): This method is currently only required by
|
||||
# stx-openstack and is to be integrated with stx-openstack when
|
||||
@@ -257,22 +305,23 @@ class GenericSyncWorkerManager(object):
|
||||
# The subcloud_sync delete can be more graceful by ensuring the
|
||||
# sync object is updated for each engine on delete.
|
||||
|
||||
LOG.info(f"remove_subcloud_sync_endpoint_type "
|
||||
f"subcloud_name={subcloud_name} "
|
||||
f"endpoint_type_list={endpoint_type_list}")
|
||||
LOG.info(
|
||||
f"remove_subcloud_sync_endpoint_type "
|
||||
f"subcloud_name={subcloud_name} "
|
||||
f"endpoint_type_list={endpoint_type_list}"
|
||||
)
|
||||
|
||||
# Remove sync_objs and subcloud_sync for endpoint types to be removed
|
||||
if endpoint_type_list:
|
||||
for endpoint_type in endpoint_type_list:
|
||||
try:
|
||||
db_api.subcloud_sync_delete(
|
||||
context, subcloud_name, endpoint_type)
|
||||
db_api.subcloud_sync_delete(context, subcloud_name, endpoint_type)
|
||||
except exceptions.SubcloudSyncNotFound:
|
||||
pass
|
||||
|
||||
# remove the endpoint types from subcloud capabilities
|
||||
sc = subcloud.Subcloud.get_by_name(context, subcloud_name)
|
||||
c_endpoint_type_list = sc.capabilities.get('endpoint_types', [])
|
||||
c_endpoint_type_list = sc.capabilities.get("endpoint_types", [])
|
||||
|
||||
if endpoint_type_list and c_endpoint_type_list:
|
||||
for endpoint_type in endpoint_type_list:
|
||||
@@ -313,43 +362,55 @@ class GenericSyncWorkerManager(object):
|
||||
timeout.cancel()
|
||||
|
||||
db_api.subcloud_sync_update(
|
||||
context, subcloud_name, endpoint_type,
|
||||
values={'audit_status': new_state})
|
||||
context, subcloud_name, endpoint_type, values={"audit_status": new_state}
|
||||
)
|
||||
|
||||
def run_sync_audit(self, context, subcloud_sync_list):
|
||||
# Clear the master resource cache
|
||||
SyncThread.reset_master_resources_cache()
|
||||
|
||||
LOG.info(f"Engine id:({self.engine_id}) Start to audit "
|
||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs.")
|
||||
LOG.debug(f"Engine id:({self.engine_id}) Start to audit "
|
||||
f"{subcloud_sync_list}.")
|
||||
LOG.info(
|
||||
f"Engine id:({self.engine_id}) Start to audit "
|
||||
f"{len(subcloud_sync_list)} (subcloud, endpoint_type) pairs."
|
||||
)
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}) Start to audit " f"{subcloud_sync_list}."
|
||||
)
|
||||
|
||||
for sc_region_name, ept, ip in subcloud_sync_list:
|
||||
LOG.debug(f"Attempt audit_subcloud: "
|
||||
f"{self.engine_id}/{sc_region_name}/{ept}")
|
||||
LOG.debug(
|
||||
f"Attempt audit_subcloud: " f"{self.engine_id}/{sc_region_name}/{ept}"
|
||||
)
|
||||
try:
|
||||
sync_obj = sync_object_class_map[ept](sc_region_name, ept, ip)
|
||||
self.audit_thread_group_manager.start(self._audit_subcloud,
|
||||
self.context,
|
||||
sc_region_name,
|
||||
ept,
|
||||
sync_obj)
|
||||
self.audit_thread_group_manager.start(
|
||||
self._audit_subcloud, self.context, sc_region_name, ept, sync_obj
|
||||
)
|
||||
except exceptions.SubcloudSyncNotFound:
|
||||
# The endpoint in subcloud_sync has been removed
|
||||
LOG.debug(f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
||||
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
||||
f"has been removed")
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}/{sc_region_name}/{ept}) "
|
||||
f"SubcloudSyncNotFound: The endpoint in subcloud_sync "
|
||||
f"has been removed"
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred when running audit {ept} for "
|
||||
f"subcloud {sc_region_name}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred when running audit {ept} for "
|
||||
f"subcloud {sc_region_name}: {e}"
|
||||
)
|
||||
db_api.subcloud_sync_update(
|
||||
self.context, sc_region_name, ept,
|
||||
values={'audit_status': dco_consts.AUDIT_STATUS_FAILED})
|
||||
self.context,
|
||||
sc_region_name,
|
||||
ept,
|
||||
values={"audit_status": dco_consts.AUDIT_STATUS_FAILED},
|
||||
)
|
||||
|
||||
def sync_request(self, ctxt, endpoint_type):
|
||||
# Someone has enqueued a sync job. set the endpoint sync_request to
|
||||
# requested
|
||||
db_api.subcloud_sync_update_all(
|
||||
ctxt, dccommon_consts.MANAGEMENT_MANAGED, endpoint_type,
|
||||
values={'sync_request': dco_consts.SYNC_STATUS_REQUESTED})
|
||||
ctxt,
|
||||
dccommon_consts.MANAGEMENT_MANAGED,
|
||||
endpoint_type,
|
||||
values={"sync_request": dco_consts.SYNC_STATUS_REQUESTED},
|
||||
)
|
||||
|
@@ -46,20 +46,23 @@ class InitialSyncManager(object):
|
||||
subclouds = db_api.subcloud_update_all_initial_state(
|
||||
self.context,
|
||||
pre_initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS,
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
|
||||
)
|
||||
if subclouds > 0:
|
||||
LOG.info("Initial sync for subclouds were in progress and "
|
||||
"will be re-attempted.")
|
||||
LOG.info(
|
||||
"Initial sync for subclouds were in progress and "
|
||||
"will be re-attempted."
|
||||
)
|
||||
|
||||
# Since we are starting up, any failed syncs won't be re-attempted
|
||||
# because the timer will not be running. Reattempt them.
|
||||
subclouds = db_api.subcloud_update_all_initial_state(
|
||||
self.context,
|
||||
pre_initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
|
||||
)
|
||||
if subclouds > 0:
|
||||
LOG.info(
|
||||
"Initial sync for subclouds were failed and will be re-attempted.")
|
||||
LOG.info("Initial sync for subclouds were failed and will be re-attempted.")
|
||||
|
||||
def initial_sync_thread(self):
|
||||
"""Perform initial sync for subclouds as required."""
|
||||
@@ -78,8 +81,8 @@ class InitialSyncManager(object):
|
||||
def _initial_sync_subclouds(self):
|
||||
"""Perform initial sync for subclouds that require it."""
|
||||
subclouds = db_api.subcloud_capabilities_get_all(
|
||||
self.context,
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
|
||||
self.context, initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED
|
||||
)
|
||||
if not subclouds:
|
||||
LOG.debug("No eligible subclouds for initial sync.")
|
||||
return
|
||||
@@ -97,23 +100,31 @@ class InitialSyncManager(object):
|
||||
# to process.
|
||||
try:
|
||||
self.engine_worker_rpc_client.initial_sync_subclouds(
|
||||
self.context,
|
||||
subcloud_capabilities)
|
||||
LOG.debug(f"Sent initial sync request message for "
|
||||
f"{len(subcloud_capabilities)} subclouds")
|
||||
self.context, subcloud_capabilities
|
||||
)
|
||||
LOG.debug(
|
||||
f"Sent initial sync request message for "
|
||||
f"{len(subcloud_capabilities)} subclouds"
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred in initial_sync for subclouds "
|
||||
f"{list(subcloud_capabilities.keys())}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred in initial_sync for subclouds "
|
||||
f"{list(subcloud_capabilities.keys())}: {e}"
|
||||
)
|
||||
subcloud_capabilities = {}
|
||||
if subcloud_capabilities:
|
||||
# We've got a partial batch...send it off for processing.
|
||||
try:
|
||||
self.engine_worker_rpc_client.initial_sync_subclouds(
|
||||
self.context,
|
||||
subcloud_capabilities)
|
||||
LOG.debug(f"Sent initial sync request message for "
|
||||
f"{len(subcloud_capabilities)} subclouds")
|
||||
self.context, subcloud_capabilities
|
||||
)
|
||||
LOG.debug(
|
||||
f"Sent initial sync request message for "
|
||||
f"{len(subcloud_capabilities)} subclouds"
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred in initial_sync for subclouds "
|
||||
f"{list(subcloud_capabilities.keys())}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred in initial_sync for subclouds "
|
||||
f"{list(subcloud_capabilities.keys())}: {e}"
|
||||
)
|
||||
LOG.debug("Done sending initial sync request messages.")
|
||||
|
@@ -31,15 +31,18 @@ class InitialSyncWorkerManager(object):
|
||||
self.engine_id = engine_id
|
||||
self.context = context.get_admin_context()
|
||||
# Keeps track of greenthreads we create to do work.
|
||||
self.thread_group_manager = scheduler.ThreadGroupManager(
|
||||
thread_pool_size=100)
|
||||
self.thread_group_manager = scheduler.ThreadGroupManager(thread_pool_size=100)
|
||||
|
||||
def initial_sync_subclouds(self, context, subcloud_capabilities):
|
||||
"""Perform initial sync for subclouds that require it."""
|
||||
LOG.info(f"Engine id:({self.engine_id}) Start initial sync for "
|
||||
f"{len(subcloud_capabilities)} subclouds.")
|
||||
LOG.debug(f"Engine id:({self.engine_id}) Start initial sync for "
|
||||
f"subclouds {list(subcloud_capabilities.keys())}.")
|
||||
LOG.info(
|
||||
f"Engine id:({self.engine_id}) Start initial sync for "
|
||||
f"{len(subcloud_capabilities)} subclouds."
|
||||
)
|
||||
LOG.debug(
|
||||
f"Engine id:({self.engine_id}) Start initial sync for "
|
||||
f"subclouds {list(subcloud_capabilities.keys())}."
|
||||
)
|
||||
|
||||
for sc_region_name, sc_capabilities_and_ip in subcloud_capabilities.items():
|
||||
# Create a new greenthread for each subcloud to allow the
|
||||
@@ -52,13 +55,17 @@ class InitialSyncWorkerManager(object):
|
||||
self.context,
|
||||
sc_region_name,
|
||||
sc_capabilities_and_ip[0],
|
||||
sc_capabilities_and_ip[1])
|
||||
sc_capabilities_and_ip[1],
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(f"Exception occurred when running initial_sync for "
|
||||
f"subcloud {sc_region_name}: {e}")
|
||||
LOG.error(
|
||||
f"Exception occurred when running initial_sync for "
|
||||
f"subcloud {sc_region_name}: {e}"
|
||||
)
|
||||
|
||||
def _initial_sync_subcloud(self, context, subcloud_name, subcloud_capabilities,
|
||||
management_ip):
|
||||
def _initial_sync_subcloud(
|
||||
self, context, subcloud_name, subcloud_capabilities, management_ip
|
||||
):
|
||||
"""Perform initial sync for a subcloud.
|
||||
|
||||
This runs in a separate greenthread for each subcloud.
|
||||
@@ -71,16 +78,19 @@ class InitialSyncWorkerManager(object):
|
||||
context,
|
||||
subcloud_name,
|
||||
consts.INITIAL_SYNC_STATE_REQUESTED,
|
||||
consts.INITIAL_SYNC_STATE_IN_PROGRESS)
|
||||
consts.INITIAL_SYNC_STATE_IN_PROGRESS,
|
||||
)
|
||||
if result == 0:
|
||||
# Sync is no longer required
|
||||
LOG.debug(f"Initial sync for subcloud {subcloud_name} "
|
||||
f"no longer required")
|
||||
LOG.debug(
|
||||
f"Initial sync for subcloud {subcloud_name} " f"no longer required"
|
||||
)
|
||||
return
|
||||
|
||||
# sync_objs stores the sync object per endpoint
|
||||
sync_objs = self.gswm.create_sync_objects(
|
||||
subcloud_name, subcloud_capabilities, management_ip)
|
||||
subcloud_name, subcloud_capabilities, management_ip
|
||||
)
|
||||
|
||||
# Initial sync. It's synchronous so that identity
|
||||
# get synced before fernet token keys are synced. This is
|
||||
@@ -103,10 +113,8 @@ class InitialSyncWorkerManager(object):
|
||||
# Verify that the sync wasn't cancelled while we did the sync (for
|
||||
# example, the subcloud could have been unmanaged).
|
||||
result = db_api.subcloud_update_initial_state(
|
||||
context,
|
||||
subcloud_name,
|
||||
consts.INITIAL_SYNC_STATE_IN_PROGRESS,
|
||||
new_state)
|
||||
context, subcloud_name, consts.INITIAL_SYNC_STATE_IN_PROGRESS, new_state
|
||||
)
|
||||
if result > 0:
|
||||
if new_state == consts.INITIAL_SYNC_STATE_COMPLETED:
|
||||
# The initial sync was completed and we have updated the
|
||||
@@ -117,16 +125,19 @@ class InitialSyncWorkerManager(object):
|
||||
# This thread is not taken from the thread pool, because we
|
||||
# don't want a large number of failed syncs to prevent new
|
||||
# subclouds from syncing.
|
||||
eventlet.greenthread.spawn_after(SYNC_FAIL_HOLD_OFF,
|
||||
self._reattempt_sync,
|
||||
subcloud_name)
|
||||
eventlet.greenthread.spawn_after(
|
||||
SYNC_FAIL_HOLD_OFF, self._reattempt_sync, subcloud_name
|
||||
)
|
||||
pass
|
||||
else:
|
||||
LOG.error(f"Unexpected new_state {new_state} for "
|
||||
f"subcloud {subcloud_name}")
|
||||
LOG.error(
|
||||
f"Unexpected new_state {new_state} for " f"subcloud {subcloud_name}"
|
||||
)
|
||||
else:
|
||||
LOG.debug(f"Initial sync was cancelled for subcloud "
|
||||
f"{subcloud_name} while in progress")
|
||||
LOG.debug(
|
||||
f"Initial sync was cancelled for subcloud "
|
||||
f"{subcloud_name} while in progress"
|
||||
)
|
||||
|
||||
def _reattempt_sync(self, subcloud_name):
|
||||
# Verify that the sync state hasn't changed since the last attempt.
|
||||
@@ -134,19 +145,24 @@ class InitialSyncWorkerManager(object):
|
||||
self.context,
|
||||
subcloud_name,
|
||||
consts.INITIAL_SYNC_STATE_FAILED,
|
||||
consts.INITIAL_SYNC_STATE_REQUESTED)
|
||||
consts.INITIAL_SYNC_STATE_REQUESTED,
|
||||
)
|
||||
if result == 0:
|
||||
# Sync is no longer required
|
||||
LOG.debug(f"Reattempt initial sync for subcloud {subcloud_name} "
|
||||
f"no longer required")
|
||||
LOG.debug(
|
||||
f"Reattempt initial sync for subcloud {subcloud_name} "
|
||||
f"no longer required"
|
||||
)
|
||||
return
|
||||
|
||||
def enable_subcloud(self, subcloud_name, sync_objs):
|
||||
LOG.debug(f"enabling subcloud {subcloud_name}")
|
||||
for endpoint_type, sync_obj in sync_objs.items():
|
||||
LOG.debug(f"Engine id: {self.engine_id} enabling sync thread "
|
||||
f"for subcloud {subcloud_name} and "
|
||||
f"endpoint type {endpoint_type}.")
|
||||
LOG.debug(
|
||||
f"Engine id: {self.engine_id} enabling sync thread "
|
||||
f"for subcloud {subcloud_name} and "
|
||||
f"endpoint type {endpoint_type}."
|
||||
)
|
||||
sync_obj.enable()
|
||||
|
||||
def init_subcloud_sync_audit(self, subcloud_name):
|
||||
@@ -154,11 +170,16 @@ class InitialSyncWorkerManager(object):
|
||||
|
||||
for endpoint_type in consts.SYNC_ENDPOINT_TYPES_LIST:
|
||||
db_api.subcloud_sync_update(
|
||||
self.context, subcloud_name, endpoint_type,
|
||||
values={'audit_status': consts.AUDIT_STATUS_NONE,
|
||||
'sync_status_reported': consts.SYNC_STATUS_NONE,
|
||||
'sync_status_report_time': None,
|
||||
'last_audit_time': None})
|
||||
self.context,
|
||||
subcloud_name,
|
||||
endpoint_type,
|
||||
values={
|
||||
"audit_status": consts.AUDIT_STATUS_NONE,
|
||||
"sync_status_reported": consts.SYNC_STATUS_NONE,
|
||||
"sync_status_report_time": None,
|
||||
"last_audit_time": None,
|
||||
},
|
||||
)
|
||||
|
||||
def initial_sync(self, subcloud_name, sync_objs):
|
||||
LOG.debug(f"Initial sync subcloud {subcloud_name} {self.engine_id}")
|
||||
|
@@ -40,15 +40,17 @@ LOG = logging.getLogger(__name__)
|
||||
# Projects are synced batch by batch. Below configuration defines
|
||||
# number of projects in each batch
|
||||
batch_opts = [
|
||||
cfg.IntOpt('batch_size',
|
||||
default=3,
|
||||
help='Batch size number of projects will be synced at a time')
|
||||
cfg.IntOpt(
|
||||
"batch_size",
|
||||
default=3,
|
||||
help="Batch size number of projects will be synced at a time",
|
||||
)
|
||||
]
|
||||
|
||||
batch_opt_group = cfg.OptGroup('batch')
|
||||
batch_opt_group = cfg.OptGroup("batch")
|
||||
cfg.CONF.register_group(batch_opt_group)
|
||||
cfg.CONF.register_opts(batch_opts, group=batch_opt_group)
|
||||
TASK_TYPE = 'quota_sync'
|
||||
TASK_TYPE = "quota_sync"
|
||||
|
||||
|
||||
class QuotaManager(manager.Manager):
|
||||
@@ -65,44 +67,49 @@ class QuotaManager(manager.Manager):
|
||||
regions_usage_dict = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
LOG.debug(_('QuotaManager initialization...'))
|
||||
LOG.debug(_("QuotaManager initialization..."))
|
||||
|
||||
super(QuotaManager, self).__init__(service_name="quota_manager",
|
||||
*args, **kwargs)
|
||||
super(QuotaManager, self).__init__(
|
||||
service_name="quota_manager", *args, **kwargs
|
||||
)
|
||||
self.context = context.get_admin_context()
|
||||
self.endpoints = endpoint_cache.EndpointCache()
|
||||
|
||||
@classmethod
|
||||
def calculate_subcloud_project_quotas(cls, project_id, user_id,
|
||||
new_global_quotas, subcloud):
|
||||
def calculate_subcloud_project_quotas(
|
||||
cls, project_id, user_id, new_global_quotas, subcloud
|
||||
):
|
||||
# Someone has changed the quotas for a project, so we need to
|
||||
# calculate the new quotas in each subcloud.
|
||||
|
||||
# First, grab a copy of the usage from the last quota audit.
|
||||
with cls.usage_lock:
|
||||
regions_usage_dict = copy.deepcopy(
|
||||
cls.regions_usage_dict.get((project_id, user_id), {}))
|
||||
cls.regions_usage_dict.get((project_id, user_id), {})
|
||||
)
|
||||
total_project_usages = copy.deepcopy(
|
||||
cls.total_project_usages.get((project_id, user_id), {}))
|
||||
cls.total_project_usages.get((project_id, user_id), {})
|
||||
)
|
||||
|
||||
# Calculate the remaining global project limit based on the new quotas
|
||||
# and the total usage for the project across all subclouds.
|
||||
unused_global_limits = collections.Counter(
|
||||
new_global_quotas) - collections.Counter(total_project_usages)
|
||||
new_global_quotas
|
||||
) - collections.Counter(total_project_usages)
|
||||
|
||||
# Now get the region-specific usage and trim it back to just the dict
|
||||
# keys present in the new quotas.
|
||||
try:
|
||||
region_usage = regions_usage_dict[subcloud]
|
||||
region_usage = dict([(k, region_usage[k])
|
||||
for k in new_global_quotas])
|
||||
region_usage = dict([(k, region_usage[k]) for k in new_global_quotas])
|
||||
except KeyError:
|
||||
# From startup until the quota audit runs we'll end up here.
|
||||
region_usage = {}
|
||||
|
||||
# Now add the region-specific usage to the global remaining limits.
|
||||
region_new_limits = dict(unused_global_limits +
|
||||
collections.Counter(region_usage))
|
||||
region_new_limits = dict(
|
||||
unused_global_limits + collections.Counter(region_usage)
|
||||
)
|
||||
|
||||
return region_new_limits
|
||||
|
||||
@@ -112,12 +119,12 @@ class QuotaManager(manager.Manager):
|
||||
os_client = sdk.OpenStackDriver(dccommon_consts.VIRTUAL_MASTER_CLOUD)
|
||||
try:
|
||||
quotas = os_client.nova_client.nova_client.quotas.list()
|
||||
project_user_quotas = quotas['project_user_quotas']
|
||||
project_user_quotas = quotas["project_user_quotas"]
|
||||
for project_user_quota in project_user_quotas:
|
||||
project_id = project_user_quota['project_id']
|
||||
user_quotas = project_user_quota['user_quotas']
|
||||
project_id = project_user_quota["project_id"]
|
||||
user_quotas = project_user_quota["user_quotas"]
|
||||
for user_quota in user_quotas:
|
||||
user_id = user_quota['user_id']
|
||||
user_id = user_quota["user_id"]
|
||||
project_user_list.add((project_id, user_id))
|
||||
except AttributeError:
|
||||
# Dealing with novaclient that doesn't have quotas.list(),
|
||||
@@ -145,8 +152,11 @@ class QuotaManager(manager.Manager):
|
||||
with QuotaManager.usage_lock:
|
||||
# The same keys should be in QuotaManager.total_project_usages
|
||||
# so we only need to look at one of them.
|
||||
to_delete = [k for k in QuotaManager.regions_usage_dict
|
||||
if k not in project_user_mod_list]
|
||||
to_delete = [
|
||||
k
|
||||
for k in QuotaManager.regions_usage_dict
|
||||
if k not in project_user_mod_list
|
||||
]
|
||||
for k in to_delete:
|
||||
del QuotaManager.regions_usage_dict[k]
|
||||
del QuotaManager.total_project_usages[k]
|
||||
@@ -156,18 +166,26 @@ class QuotaManager(manager.Manager):
|
||||
# Divide list of projects into batches and perfrom quota sync
|
||||
# for one batch at a time.
|
||||
for current_batch_projects_users in utils.get_batch_projects(
|
||||
cfg.CONF.batch.batch_size, project_user_list):
|
||||
cfg.CONF.batch.batch_size, project_user_list
|
||||
):
|
||||
# "current_batch_projects_users" may have some None entries that
|
||||
# we don't want to iterate over.
|
||||
current_batch_projects_users = [
|
||||
x for x in current_batch_projects_users if x is not None]
|
||||
LOG.info("Syncing quota for current batch with projects: %s",
|
||||
current_batch_projects_users)
|
||||
x for x in current_batch_projects_users if x is not None
|
||||
]
|
||||
LOG.info(
|
||||
"Syncing quota for current batch with projects: %s",
|
||||
current_batch_projects_users,
|
||||
)
|
||||
for project_id, user_id in current_batch_projects_users:
|
||||
if project_id:
|
||||
thread = threading.Thread(
|
||||
target=self.quota_sync_for_project,
|
||||
args=(project_id, user_id,))
|
||||
args=(
|
||||
project_id,
|
||||
user_id,
|
||||
),
|
||||
)
|
||||
projects_thread_list.append(thread)
|
||||
thread.start()
|
||||
# Wait for all the threads to complete
|
||||
@@ -178,14 +196,15 @@ class QuotaManager(manager.Manager):
|
||||
def read_quota_usage(self, project_id, user_id, region, usage_queue):
|
||||
# Writes usage dict to the Queue in the following format
|
||||
# {'region_name': (<nova_usages>, <neutron_usages>, <cinder_usages>)}
|
||||
LOG.info("Reading quota usage for project: %(project_id)s and user: "
|
||||
"%(user_id)s in %(region)s",
|
||||
{'project_id': project_id, 'user_id': user_id,
|
||||
'region': region}
|
||||
)
|
||||
LOG.info(
|
||||
"Reading quota usage for project: %(project_id)s and user: "
|
||||
"%(user_id)s in %(region)s",
|
||||
{"project_id": project_id, "user_id": user_id, "region": region},
|
||||
)
|
||||
os_client = sdk.OpenStackDriver(region)
|
||||
(nova_usage, neutron_usage, cinder_usage) = \
|
||||
os_client.get_resource_usages(project_id, user_id)
|
||||
(nova_usage, neutron_usage, cinder_usage) = os_client.get_resource_usages(
|
||||
project_id, user_id
|
||||
)
|
||||
total_region_usage = collections.defaultdict(dict)
|
||||
# region_usage[0], region_usage[1], region_usage[3] are
|
||||
# nova, neutron & cinder usages respectively
|
||||
@@ -203,21 +222,23 @@ class QuotaManager(manager.Manager):
|
||||
resultant_dict = collections.Counter()
|
||||
for current_region in regions_dict:
|
||||
single_region[current_region] = collections.Counter(
|
||||
regions_dict[current_region])
|
||||
regions_dict[current_region]
|
||||
)
|
||||
resultant_dict += single_region[current_region]
|
||||
return resultant_dict
|
||||
|
||||
def get_tenant_quota_limits_region(self, project_id, user_id, region):
|
||||
# returns quota limits for region in the following format
|
||||
# {<nova_limits>, <neutron_limits>, <cinder_limits>}
|
||||
LOG.info("Reading quota limits for project: %(project_id)s and user: "
|
||||
"%(user_id)s in %(region)s",
|
||||
{'project_id': project_id, 'user_id': user_id,
|
||||
'region': region}
|
||||
)
|
||||
LOG.info(
|
||||
"Reading quota limits for project: %(project_id)s and user: "
|
||||
"%(user_id)s in %(region)s",
|
||||
{"project_id": project_id, "user_id": user_id, "region": region},
|
||||
)
|
||||
os_client = sdk.OpenStackDriver(region)
|
||||
(nova_limits, neutron_limits, cinder_limits) = \
|
||||
os_client.get_quota_limits(project_id, user_id)
|
||||
(nova_limits, neutron_limits, cinder_limits) = os_client.get_quota_limits(
|
||||
project_id, user_id
|
||||
)
|
||||
limits = {}
|
||||
limits.update(nova_limits)
|
||||
limits.update(neutron_limits)
|
||||
@@ -229,17 +250,15 @@ class QuotaManager(manager.Manager):
|
||||
dc_orch_limits_for_project = collections.defaultdict(dict)
|
||||
try:
|
||||
# checks if there are any quota limit in DB for a project
|
||||
limits_from_db = db_api.quota_get_all_by_project(self.context,
|
||||
project_id)
|
||||
limits_from_db = db_api.quota_get_all_by_project(self.context, project_id)
|
||||
except exceptions.ProjectQuotaNotFound:
|
||||
limits_from_db = {}
|
||||
for current_resource in CONF.dc_orch_global_limit.items():
|
||||
resource = re.sub('quota_', '', current_resource[0])
|
||||
resource = re.sub("quota_", "", current_resource[0])
|
||||
# If resource limit in DB, then use it or else use limit
|
||||
# from conf file
|
||||
if resource in limits_from_db:
|
||||
dc_orch_limits_for_project[resource] = limits_from_db[
|
||||
resource]
|
||||
dc_orch_limits_for_project[resource] = limits_from_db[resource]
|
||||
else:
|
||||
dc_orch_limits_for_project[resource] = current_resource[1]
|
||||
return dc_orch_limits_for_project
|
||||
@@ -247,37 +266,38 @@ class QuotaManager(manager.Manager):
|
||||
def _arrange_quotas_by_service_name(self, region_new_limit):
|
||||
# Returns a dict of resources with limits arranged by service name
|
||||
resource_with_service = collections.defaultdict(dict)
|
||||
resource_with_service['nova'] = collections.defaultdict(dict)
|
||||
resource_with_service['cinder'] = collections.defaultdict(dict)
|
||||
resource_with_service['neutron'] = collections.defaultdict(dict)
|
||||
resource_with_service["nova"] = collections.defaultdict(dict)
|
||||
resource_with_service["cinder"] = collections.defaultdict(dict)
|
||||
resource_with_service["neutron"] = collections.defaultdict(dict)
|
||||
for limit in region_new_limit:
|
||||
if limit in dccommon_consts.NOVA_QUOTA_FIELDS:
|
||||
resource_with_service['nova'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
resource_with_service["nova"].update({limit: region_new_limit[limit]})
|
||||
elif limit in dccommon_consts.CINDER_QUOTA_FIELDS:
|
||||
resource_with_service['cinder'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
resource_with_service["cinder"].update({limit: region_new_limit[limit]})
|
||||
elif limit in dccommon_consts.NEUTRON_QUOTA_FIELDS:
|
||||
resource_with_service['neutron'].update(
|
||||
{limit: region_new_limit[limit]})
|
||||
resource_with_service["neutron"].update(
|
||||
{limit: region_new_limit[limit]}
|
||||
)
|
||||
return resource_with_service
|
||||
|
||||
def update_quota_limits(self, project_id, user_id, region_new_limit,
|
||||
current_region):
|
||||
def update_quota_limits(
|
||||
self, project_id, user_id, region_new_limit, current_region
|
||||
):
|
||||
# Updates quota limit for a project with new calculated limit
|
||||
os_client = sdk.OpenStackDriver(current_region)
|
||||
os_client.write_quota_limits(project_id, user_id, region_new_limit)
|
||||
|
||||
def quota_usage_update(self, project_id, user_id):
|
||||
# Update the quota usage for the specified project/user
|
||||
regions_usage_dict = self.get_tenant_quota_usage_per_region(project_id,
|
||||
user_id)
|
||||
regions_usage_dict = self.get_tenant_quota_usage_per_region(project_id, user_id)
|
||||
if not regions_usage_dict:
|
||||
# Skip syncing for the project if not able to read regions usage
|
||||
LOG.error("Error reading regions usage for the project: "
|
||||
"'%(project)s' and user: '%(user)s'. Aborting, continue "
|
||||
"with next project/user.",
|
||||
{'project': project_id, 'user': user_id})
|
||||
LOG.error(
|
||||
"Error reading regions usage for the project: "
|
||||
"'%(project)s' and user: '%(user)s'. Aborting, continue "
|
||||
"with next project/user.",
|
||||
{"project": project_id, "user": user_id},
|
||||
)
|
||||
return None, None
|
||||
|
||||
# We want to return the original per-subcloud usage, so make a
|
||||
@@ -294,18 +314,19 @@ class QuotaManager(manager.Manager):
|
||||
regions_usage_dict_copy[region].pop(quota, None)
|
||||
|
||||
# Add up the usage for this project/user across all subclouds.
|
||||
total_project_usages = dict(
|
||||
self.get_summation(regions_usage_dict_copy))
|
||||
total_project_usages = dict(self.get_summation(regions_usage_dict_copy))
|
||||
|
||||
# Save the global and per-region usage for use when
|
||||
# modifying quotas later
|
||||
with QuotaManager.usage_lock:
|
||||
# Use the project/user tuple as the dict key.
|
||||
# 'user_id' will be None for the overall project usage.
|
||||
QuotaManager.total_project_usages[(project_id, user_id)] = \
|
||||
copy.deepcopy(total_project_usages)
|
||||
QuotaManager.regions_usage_dict[(project_id, user_id)] = \
|
||||
copy.deepcopy(regions_usage_dict)
|
||||
QuotaManager.total_project_usages[(project_id, user_id)] = copy.deepcopy(
|
||||
total_project_usages
|
||||
)
|
||||
QuotaManager.regions_usage_dict[(project_id, user_id)] = copy.deepcopy(
|
||||
regions_usage_dict
|
||||
)
|
||||
|
||||
return total_project_usages, regions_usage_dict
|
||||
|
||||
@@ -315,27 +336,31 @@ class QuotaManager(manager.Manager):
|
||||
# DC Orchestrator global limit - Summation of usages
|
||||
# in all the regions
|
||||
# New quota limit = Global remaining limit + usage in that region
|
||||
LOG.info("Quota sync called for project: %(project)s user: %(user)s",
|
||||
{'project': project_id, 'user': user_id})
|
||||
LOG.info(
|
||||
"Quota sync called for project: %(project)s user: %(user)s",
|
||||
{"project": project_id, "user": user_id},
|
||||
)
|
||||
regions_thread_list = []
|
||||
# Retrieve regions for the project. This is also done in
|
||||
# get_tenant_quota_usage_per_region() so we may be able to only do
|
||||
# it once. Would have to consider the failure modes though.
|
||||
os_driver = sdk.OpenStackDriver()
|
||||
region_lists = os_driver.get_all_regions_for_project(
|
||||
project_id)
|
||||
region_lists = os_driver.get_all_regions_for_project(project_id)
|
||||
|
||||
total_project_usages, regions_usage_dict = self.quota_usage_update(
|
||||
project_id, user_id)
|
||||
project_id, user_id
|
||||
)
|
||||
if (total_project_usages, regions_usage_dict) == (None, None):
|
||||
return
|
||||
|
||||
# Get the global limit for this project from the master subcloud.
|
||||
dc_orch_global_limits = self.get_overall_tenant_quota_limits(
|
||||
project_id, user_id)
|
||||
project_id, user_id
|
||||
)
|
||||
# Calculate how much of the various limits have not yet been used.
|
||||
unused_global_limits = collections.Counter(
|
||||
dc_orch_global_limits) - collections.Counter(total_project_usages)
|
||||
dc_orch_global_limits
|
||||
) - collections.Counter(total_project_usages)
|
||||
|
||||
# Remove the master region from the list. Its quotas should already
|
||||
# be up to date for managed resources.
|
||||
@@ -351,11 +376,11 @@ class QuotaManager(manager.Manager):
|
||||
for current_region in region_lists:
|
||||
# Calculate the new limit for this region.
|
||||
region_new_limits = dict(
|
||||
unused_global_limits + collections.Counter(
|
||||
regions_usage_dict[current_region]))
|
||||
unused_global_limits
|
||||
+ collections.Counter(regions_usage_dict[current_region])
|
||||
)
|
||||
# Reformat the limits
|
||||
region_new_limits = self._arrange_quotas_by_service_name(
|
||||
region_new_limits)
|
||||
region_new_limits = self._arrange_quotas_by_service_name(region_new_limits)
|
||||
# Update the subcloud with the new limit
|
||||
try:
|
||||
# First find this project and user in this subcloud
|
||||
@@ -363,22 +388,30 @@ class QuotaManager(manager.Manager):
|
||||
sc_os_driver = sdk.OpenStackDriver(current_region)
|
||||
sc_project = sc_os_driver.get_project_by_name(qproject.name)
|
||||
if not sc_project:
|
||||
LOG.info("Cannot find project %s in subcloud %s. Skipping "
|
||||
"quota sync for this project on subcloud",
|
||||
qproject.name, current_region)
|
||||
LOG.info(
|
||||
"Cannot find project %s in subcloud %s. Skipping "
|
||||
"quota sync for this project on subcloud",
|
||||
qproject.name,
|
||||
current_region,
|
||||
)
|
||||
continue
|
||||
sc_project_id = sc_project.id
|
||||
if quser:
|
||||
sc_user = sc_os_driver.get_user_by_name(quser.name)
|
||||
sc_user_id = getattr(sc_user, 'id', None)
|
||||
sc_user_id = getattr(sc_user, "id", None)
|
||||
except Exception as e:
|
||||
LOG.error("quota sync %s: %s", current_region, str(e))
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.update_quota_limits,
|
||||
args=(sc_project_id, sc_user_id,
|
||||
region_new_limits,
|
||||
current_region,))
|
||||
thread = threading.Thread(
|
||||
target=self.update_quota_limits,
|
||||
args=(
|
||||
sc_project_id,
|
||||
sc_user_id,
|
||||
region_new_limits,
|
||||
current_region,
|
||||
),
|
||||
)
|
||||
regions_thread_list.append(thread)
|
||||
thread.start()
|
||||
|
||||
@@ -390,16 +423,15 @@ class QuotaManager(manager.Manager):
|
||||
# Return quota limits in the master cloud. These are the overall
|
||||
# quota limits for the whole cloud.
|
||||
return self.get_tenant_quota_limits_region(
|
||||
project_id, user_id,
|
||||
dccommon_consts.VIRTUAL_MASTER_CLOUD)
|
||||
project_id, user_id, dccommon_consts.VIRTUAL_MASTER_CLOUD
|
||||
)
|
||||
|
||||
def get_tenant_quota_usage_per_region(self, project_id, user_id):
|
||||
# Return quota usage dict with keys as region name & values as usages.
|
||||
# Calculates the usage from each region concurrently using threads.
|
||||
os_driver = sdk.OpenStackDriver()
|
||||
# Retrieve regions for the project
|
||||
region_lists = os_driver.get_all_regions_for_project(
|
||||
project_id)
|
||||
region_lists = os_driver.get_all_regions_for_project(project_id)
|
||||
usage_queue = Queue()
|
||||
regions_usage_dict = collections.defaultdict(dict)
|
||||
regions_thread_list = []
|
||||
@@ -413,21 +445,25 @@ class QuotaManager(manager.Manager):
|
||||
sc_os_driver = sdk.OpenStackDriver(current_region)
|
||||
sc_project = sc_os_driver.get_project_by_name(qproject.name)
|
||||
if not sc_project:
|
||||
LOG.info("Cannot find project %s in subcloud %s. Skipping "
|
||||
"quota usage for this project on subcloud",
|
||||
qproject.name, current_region)
|
||||
LOG.info(
|
||||
"Cannot find project %s in subcloud %s. Skipping "
|
||||
"quota usage for this project on subcloud",
|
||||
qproject.name,
|
||||
current_region,
|
||||
)
|
||||
continue
|
||||
sc_project_id = sc_project.id
|
||||
if quser:
|
||||
sc_user = sc_os_driver.get_user_by_name(quser.name)
|
||||
sc_user_id = getattr(sc_user, 'id', None)
|
||||
sc_user_id = getattr(sc_user, "id", None)
|
||||
except Exception as e:
|
||||
LOG.error("quota usage %s: %s", current_region, str(e))
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.read_quota_usage,
|
||||
args=(sc_project_id, sc_user_id,
|
||||
current_region, usage_queue))
|
||||
thread = threading.Thread(
|
||||
target=self.read_quota_usage,
|
||||
args=(sc_project_id, sc_user_id, current_region, usage_queue),
|
||||
)
|
||||
regions_thread_list.append(thread)
|
||||
thread.start()
|
||||
# Wait for all the threads to finish reading usages
|
||||
@@ -441,22 +477,22 @@ class QuotaManager(manager.Manager):
|
||||
regions_usage_dict.update(current_region_data)
|
||||
return regions_usage_dict
|
||||
|
||||
def get_usage_for_project_and_user(self, endpoint_type,
|
||||
project_id, user_id):
|
||||
def get_usage_for_project_and_user(self, endpoint_type, project_id, user_id):
|
||||
# Returns cached quota usage for a project and user. If there
|
||||
# is no cached usage information then update the cache.
|
||||
|
||||
with QuotaManager.usage_lock:
|
||||
# First, try to get a copy of the usage from the last quota audit.
|
||||
total_project_usages = copy.deepcopy(
|
||||
QuotaManager.total_project_usages.get((project_id, user_id),
|
||||
None))
|
||||
QuotaManager.total_project_usages.get((project_id, user_id), None)
|
||||
)
|
||||
if total_project_usages is None:
|
||||
# This project/user doesn't have any cached usage information,
|
||||
# so we need to query it.
|
||||
try:
|
||||
total_project_usages, regions_usage_dict = \
|
||||
self.quota_usage_update(project_id, user_id)
|
||||
total_project_usages, regions_usage_dict = self.quota_usage_update(
|
||||
project_id, user_id
|
||||
)
|
||||
except exceptions.ProjectNotFound:
|
||||
total_project_usages = {}
|
||||
|
||||
|
@@ -10,7 +10,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import time
|
||||
@@ -36,8 +36,7 @@ class ThreadGroupManager(object):
|
||||
|
||||
# Create dummy service task, because when there is nothing queued
|
||||
# on self.tg the process exits
|
||||
self.add_timer(cfg.CONF.scheduler.periodic_interval,
|
||||
self._service_task)
|
||||
self.add_timer(cfg.CONF.scheduler.periodic_interval, self._service_task)
|
||||
|
||||
def _service_task(self):
|
||||
"""Dummy task which gets queued on the service.Service threadgroup.
|
||||
@@ -96,8 +95,7 @@ def reschedule(action, sleep_time=1):
|
||||
"""
|
||||
|
||||
if sleep_time is not None:
|
||||
LOG.debug('Action %s sleep for %s seconds' % (
|
||||
action.id, sleep_time))
|
||||
LOG.debug("Action %s sleep for %s seconds" % (action.id, sleep_time))
|
||||
eventlet.sleep(sleep_time)
|
||||
|
||||
|
||||
|
@@ -92,11 +92,13 @@ class EngineService(service.Service):
|
||||
|
||||
if self.periodic_enable:
|
||||
LOG.info("Adding periodic tasks for the engine to perform")
|
||||
self.TG.add_timer(CONF.fernet.key_rotation_interval *
|
||||
dccommon_consts.SECONDS_IN_HOUR,
|
||||
self.periodic_key_rotation,
|
||||
initial_delay=(CONF.fernet.key_rotation_interval
|
||||
* dccommon_consts.SECONDS_IN_HOUR))
|
||||
self.TG.add_timer(
|
||||
CONF.fernet.key_rotation_interval * dccommon_consts.SECONDS_IN_HOUR,
|
||||
self.periodic_key_rotation,
|
||||
initial_delay=(
|
||||
CONF.fernet.key_rotation_interval * dccommon_consts.SECONDS_IN_HOUR
|
||||
),
|
||||
)
|
||||
|
||||
def init_tgm(self):
|
||||
self.TG = scheduler.ThreadGroupManager()
|
||||
@@ -119,23 +121,23 @@ class EngineService(service.Service):
|
||||
|
||||
def periodic_balance_all(self):
|
||||
# Automated Quota Sync for all the keystone projects
|
||||
LOG.info("Periodic quota sync job started at: %s",
|
||||
time.strftime("%c"))
|
||||
LOG.info("Periodic quota sync job started at: %s", time.strftime("%c"))
|
||||
self.qm.periodic_balance_all()
|
||||
|
||||
@request_context
|
||||
def get_usage_for_project_and_user(self, context, endpoint_type,
|
||||
project_id, user_id=None):
|
||||
def get_usage_for_project_and_user(
|
||||
self, context, endpoint_type, project_id, user_id=None
|
||||
):
|
||||
# Returns cached usage as of last quota sync audit so will be
|
||||
# slightly stale.
|
||||
return self.qm.get_usage_for_project_and_user(endpoint_type,
|
||||
project_id, user_id)
|
||||
return self.qm.get_usage_for_project_and_user(
|
||||
endpoint_type, project_id, user_id
|
||||
)
|
||||
|
||||
@request_context
|
||||
def quota_sync_for_project(self, context, project_id, user_id):
|
||||
# On Demand Quota Sync for a project, will be triggered by KB-API
|
||||
LOG.info("On Demand Quota Sync Called for: %s %s",
|
||||
project_id, user_id)
|
||||
LOG.info("On Demand Quota Sync Called for: %s %s", project_id, user_id)
|
||||
self.qm.quota_sync_for_project(project_id, user_id)
|
||||
|
||||
def _stop_rpc_server(self):
|
||||
@@ -197,9 +199,9 @@ class EngineWorkerService(service.Service):
|
||||
def start(self):
|
||||
LOG.info("Starting %s", self.__class__.__name__)
|
||||
self.engine_id = uuidutils.generate_uuid()
|
||||
target = oslo_messaging.Target(version=self.rpc_api_version,
|
||||
server=self.host,
|
||||
topic=self.topic)
|
||||
target = oslo_messaging.Target(
|
||||
version=self.rpc_api_version, server=self.host, topic=self.topic
|
||||
)
|
||||
self.target = target
|
||||
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
||||
self._rpc_server.start()
|
||||
@@ -213,11 +215,14 @@ class EngineWorkerService(service.Service):
|
||||
|
||||
def set_resource_limit(self):
|
||||
try:
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (cfg.CONF.rlimit_nofile,
|
||||
cfg.CONF.rlimit_nofile))
|
||||
resource.setrlimit(
|
||||
resource.RLIMIT_NOFILE, (cfg.CONF.rlimit_nofile, cfg.CONF.rlimit_nofile)
|
||||
)
|
||||
except Exception as ex:
|
||||
LOG.error('Engine id %s: failed to set the NOFILE resource limit: '
|
||||
'%s' % (self.engine_id, ex))
|
||||
LOG.error(
|
||||
"Engine id %s: failed to set the NOFILE resource limit: "
|
||||
"%s" % (self.engine_id, ex)
|
||||
)
|
||||
|
||||
@request_context
|
||||
def add_subcloud(self, ctxt, subcloud_name, sw_version, management_ip):
|
||||
@@ -230,9 +235,9 @@ class EngineWorkerService(service.Service):
|
||||
|
||||
@request_context
|
||||
# todo: add authentication since ctxt not actually needed later
|
||||
def update_subcloud_states(self, ctxt, subcloud_name,
|
||||
management_state,
|
||||
availability_status):
|
||||
def update_subcloud_states(
|
||||
self, ctxt, subcloud_name, management_state, availability_status
|
||||
):
|
||||
"""Handle subcloud state updates from dcmanager
|
||||
|
||||
These state updates must be processed quickly. Any work triggered by
|
||||
@@ -243,23 +248,26 @@ class EngineWorkerService(service.Service):
|
||||
|
||||
# Check if state has changed before doing anything
|
||||
if self.gswm.subcloud_state_matches(
|
||||
subcloud_name,
|
||||
management_state=management_state,
|
||||
availability_status=availability_status):
|
||||
subcloud_name,
|
||||
management_state=management_state,
|
||||
availability_status=availability_status,
|
||||
):
|
||||
# No change in state - nothing to do.
|
||||
LOG.debug('Ignoring unchanged state update for %s' % subcloud_name)
|
||||
LOG.debug("Ignoring unchanged state update for %s" % subcloud_name)
|
||||
return
|
||||
|
||||
# Check if the subcloud is ready to sync.
|
||||
if (management_state == dccommon_consts.MANAGEMENT_MANAGED) and \
|
||||
(availability_status == dccommon_consts.AVAILABILITY_ONLINE):
|
||||
if (management_state == dccommon_consts.MANAGEMENT_MANAGED) and (
|
||||
availability_status == dccommon_consts.AVAILABILITY_ONLINE
|
||||
):
|
||||
# Update the subcloud state and schedule an initial sync
|
||||
self.gswm.update_subcloud_state(
|
||||
ctxt,
|
||||
subcloud_name,
|
||||
management_state=management_state,
|
||||
availability_status=availability_status,
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
|
||||
)
|
||||
else:
|
||||
# Update the subcloud state and cancel the initial sync
|
||||
self.gswm.update_subcloud_state(
|
||||
@@ -267,41 +275,55 @@ class EngineWorkerService(service.Service):
|
||||
subcloud_name,
|
||||
management_state=management_state,
|
||||
availability_status=availability_status,
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE)
|
||||
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE,
|
||||
)
|
||||
|
||||
@request_context
|
||||
def update_subcloud_state(self, ctxt, subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None):
|
||||
def update_subcloud_state(
|
||||
self,
|
||||
ctxt,
|
||||
subcloud_name,
|
||||
management_state=None,
|
||||
availability_status=None,
|
||||
initial_sync_state=None,
|
||||
):
|
||||
LOG.info("Trigger update state for subcloud %s", subcloud_name)
|
||||
self.gswm.update_subcloud_state(ctxt, subcloud_name,
|
||||
management_state,
|
||||
availability_status,
|
||||
initial_sync_state)
|
||||
self.gswm.update_subcloud_state(
|
||||
ctxt,
|
||||
subcloud_name,
|
||||
management_state,
|
||||
availability_status,
|
||||
initial_sync_state,
|
||||
)
|
||||
|
||||
@request_context
|
||||
def add_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
|
||||
endpoint_type_list=None):
|
||||
def add_subcloud_sync_endpoint_type(
|
||||
self, ctxt, subcloud_name, endpoint_type_list=None
|
||||
):
|
||||
try:
|
||||
self.gswm.add_subcloud_sync_endpoint_type(
|
||||
ctxt, subcloud_name,
|
||||
endpoint_type_list=endpoint_type_list)
|
||||
ctxt, subcloud_name, endpoint_type_list=endpoint_type_list
|
||||
)
|
||||
except Exception as ex:
|
||||
LOG.warning('Add subcloud endpoint type failed for %s: %s',
|
||||
subcloud_name, str(ex))
|
||||
LOG.warning(
|
||||
"Add subcloud endpoint type failed for %s: %s", subcloud_name, str(ex)
|
||||
)
|
||||
raise
|
||||
|
||||
@request_context
|
||||
def remove_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
|
||||
endpoint_type_list=None):
|
||||
def remove_subcloud_sync_endpoint_type(
|
||||
self, ctxt, subcloud_name, endpoint_type_list=None
|
||||
):
|
||||
try:
|
||||
self.gswm.remove_subcloud_sync_endpoint_type(
|
||||
ctxt, subcloud_name,
|
||||
endpoint_type_list=endpoint_type_list)
|
||||
ctxt, subcloud_name, endpoint_type_list=endpoint_type_list
|
||||
)
|
||||
except Exception as ex:
|
||||
LOG.warning('Remove subcloud endpoint type failed for %s: %s',
|
||||
subcloud_name, str(ex))
|
||||
LOG.warning(
|
||||
"Remove subcloud endpoint type failed for %s: %s",
|
||||
subcloud_name,
|
||||
str(ex),
|
||||
)
|
||||
raise
|
||||
|
||||
@request_context
|
||||
@@ -332,7 +354,7 @@ class EngineWorkerService(service.Service):
|
||||
if self._rpc_server:
|
||||
self._rpc_server.stop()
|
||||
self._rpc_server.wait()
|
||||
LOG.info('Engine-worker service stopped successfully')
|
||||
LOG.info("Engine-worker service stopped successfully")
|
||||
except Exception as ex:
|
||||
LOG.error(f"Failed to stop engine-worker service: {str(ex)}")
|
||||
|
||||
|
@@ -35,17 +35,16 @@ class ComputeSyncThread(SyncThread):
|
||||
"""Manages tasks related to resource management for nova."""
|
||||
|
||||
def __init__(self, subcloud_name, endpoint_type=None, engine_id=None):
|
||||
super(ComputeSyncThread, self).__init__(subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
engine_id=engine_id)
|
||||
super(ComputeSyncThread, self).__init__(
|
||||
subcloud_name, endpoint_type=endpoint_type, engine_id=engine_id
|
||||
)
|
||||
self.region_name = subcloud_name
|
||||
self.endpoint_type = consts.ENDPOINT_TYPE_COMPUTE
|
||||
self.sync_handler_map = {
|
||||
consts.RESOURCE_TYPE_COMPUTE_FLAVOR: self.sync_compute_resource,
|
||||
consts.RESOURCE_TYPE_COMPUTE_KEYPAIR: self.sync_compute_resource,
|
||||
consts.RESOURCE_TYPE_COMPUTE_QUOTA_SET: self.sync_compute_resource,
|
||||
consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET:
|
||||
self.sync_compute_resource,
|
||||
consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET: self.sync_compute_resource,
|
||||
}
|
||||
self.audit_resources = [
|
||||
consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET,
|
||||
@@ -53,19 +52,22 @@ class ComputeSyncThread(SyncThread):
|
||||
consts.RESOURCE_TYPE_COMPUTE_KEYPAIR,
|
||||
# note: no audit here for quotas, that's handled separately
|
||||
]
|
||||
self.log_extra = {"instance": "{}/{}: ".format(
|
||||
self.region_name, self.endpoint_type)}
|
||||
self.log_extra = {
|
||||
"instance": "{}/{}: ".format(self.region_name, self.endpoint_type)
|
||||
}
|
||||
self.sc_nova_client = None
|
||||
self.initialize()
|
||||
LOG.info("ComputeSyncThread initialized", extra=self.log_extra)
|
||||
|
||||
def initialize_sc_clients(self):
|
||||
super(ComputeSyncThread, self).initialize_sc_clients()
|
||||
if (not self.sc_nova_client and self.sc_admin_session):
|
||||
if not self.sc_nova_client and self.sc_admin_session:
|
||||
self.sc_nova_client = novaclient.Client(
|
||||
'2.38', session=self.sc_admin_session,
|
||||
"2.38",
|
||||
session=self.sc_admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
|
||||
region_name=self.region_name)
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
def initialize(self):
|
||||
# Subcloud may be enabled a while after being added.
|
||||
@@ -75,9 +77,11 @@ class ComputeSyncThread(SyncThread):
|
||||
super(ComputeSyncThread, self).initialize()
|
||||
# todo: update version to 2.53 once on pike
|
||||
self.m_nova_client = novaclient.Client(
|
||||
'2.38', session=self.admin_session,
|
||||
"2.38",
|
||||
session=self.admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD)
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD,
|
||||
)
|
||||
|
||||
self.initialize_sc_clients()
|
||||
LOG.info("session and clients initialized", extra=self.log_extra)
|
||||
@@ -87,19 +91,25 @@ class ComputeSyncThread(SyncThread):
|
||||
# Invoke function with name format "operationtype_resourcetype".
|
||||
# For example: create_flavor()
|
||||
try:
|
||||
func_name = request.orch_job.operation_type + \
|
||||
"_" + rsrc.resource_type
|
||||
func_name = request.orch_job.operation_type + "_" + rsrc.resource_type
|
||||
getattr(self, func_name)(request, rsrc)
|
||||
except AttributeError:
|
||||
LOG.error("{} not implemented for {}"
|
||||
.format(request.orch_job.operation_type,
|
||||
rsrc.resource_type))
|
||||
LOG.error(
|
||||
"{} not implemented for {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type
|
||||
)
|
||||
)
|
||||
raise exceptions.SyncRequestFailed
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.error("sync_compute_resource: {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.error(
|
||||
"sync_compute_resource: {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.SyncRequestFailed:
|
||||
raise
|
||||
@@ -109,7 +119,7 @@ class ComputeSyncThread(SyncThread):
|
||||
|
||||
# ---- Override common audit functions ----
|
||||
def get_resource_id(self, resource_type, resource):
|
||||
if hasattr(resource, 'master_id'):
|
||||
if hasattr(resource, "master_id"):
|
||||
# If resource from DB, return master resource id
|
||||
# from master cloud
|
||||
return resource.master_id
|
||||
@@ -118,10 +128,11 @@ class ComputeSyncThread(SyncThread):
|
||||
if resource_type == consts.RESOURCE_TYPE_COMPUTE_KEYPAIR:
|
||||
# User_id field is set in _info data by audit query code.
|
||||
return utils.keypair_construct_id(
|
||||
resource.id, resource._info['keypair']['user_id'])
|
||||
resource.id, resource._info["keypair"]["user_id"]
|
||||
)
|
||||
if resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET:
|
||||
# We only care about the default class.
|
||||
return 'default'
|
||||
return "default"
|
||||
|
||||
# Nothing special for other resources (flavor)
|
||||
return resource.id
|
||||
@@ -130,12 +141,13 @@ class ComputeSyncThread(SyncThread):
|
||||
if resource_type == consts.RESOURCE_TYPE_COMPUTE_FLAVOR:
|
||||
return jsonutils.dumps(resource._info)
|
||||
elif resource_type == consts.RESOURCE_TYPE_COMPUTE_KEYPAIR:
|
||||
return jsonutils.dumps(resource._info.get('keypair'))
|
||||
return jsonutils.dumps(resource._info.get("keypair"))
|
||||
elif resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET:
|
||||
return jsonutils.dumps(resource._info)
|
||||
else:
|
||||
return super(ComputeSyncThread, self).get_resource_info(
|
||||
resource_type, resource, operation_type)
|
||||
resource_type, resource, operation_type
|
||||
)
|
||||
|
||||
def get_subcloud_resources(self, resource_type):
|
||||
self.initialize_sc_clients()
|
||||
@@ -144,8 +156,9 @@ class ComputeSyncThread(SyncThread):
|
||||
elif resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET:
|
||||
return self.get_quota_class_resources(self.sc_nova_client)
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
|
||||
def get_master_resources(self, resource_type):
|
||||
@@ -154,8 +167,9 @@ class ComputeSyncThread(SyncThread):
|
||||
elif resource_type == consts.RESOURCE_TYPE_COMPUTE_QUOTA_CLASS_SET:
|
||||
return self.get_quota_class_resources(self.m_nova_client)
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
|
||||
def same_resource(self, resource_type, m_resource, sc_resource):
|
||||
@@ -169,15 +183,20 @@ class ComputeSyncThread(SyncThread):
|
||||
return True
|
||||
|
||||
def audit_discrepancy(self, resource_type, m_resource, sc_resources):
|
||||
if resource_type in [consts.RESOURCE_TYPE_COMPUTE_FLAVOR,
|
||||
consts.RESOURCE_TYPE_COMPUTE_KEYPAIR]:
|
||||
if resource_type in [
|
||||
consts.RESOURCE_TYPE_COMPUTE_FLAVOR,
|
||||
consts.RESOURCE_TYPE_COMPUTE_KEYPAIR,
|
||||
]:
|
||||
# It could be that the flavor details are different
|
||||
# between master cloud and subcloud now.
|
||||
# Thus, delete the flavor before creating it again.
|
||||
# Dependants (ex: flavor-access) will be created again.
|
||||
self.schedule_work(self.endpoint_type, resource_type,
|
||||
self.get_resource_id(resource_type, m_resource),
|
||||
consts.OPERATION_TYPE_DELETE)
|
||||
self.schedule_work(
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
self.get_resource_id(resource_type, m_resource),
|
||||
consts.OPERATION_TYPE_DELETE,
|
||||
)
|
||||
|
||||
# For quota classes there is no delete operation, so we just want
|
||||
# to update the existing class. Nothing to do here.
|
||||
@@ -188,21 +207,21 @@ class ComputeSyncThread(SyncThread):
|
||||
# ---- Flavor & dependants (flavor-access, extra-spec) ----
|
||||
def create_flavor(self, request, rsrc):
|
||||
flavor_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
name = flavor_dict['name']
|
||||
ram = flavor_dict['ram']
|
||||
vcpus = flavor_dict['vcpus']
|
||||
disk = flavor_dict['disk']
|
||||
name = flavor_dict["name"]
|
||||
ram = flavor_dict["ram"]
|
||||
vcpus = flavor_dict["vcpus"]
|
||||
disk = flavor_dict["disk"]
|
||||
kwargs = {}
|
||||
# id is always passed in by proxy
|
||||
kwargs['flavorid'] = flavor_dict['id']
|
||||
if 'OS-FLV-EXT-DATA:ephemeral' in flavor_dict:
|
||||
kwargs['ephemeral'] = flavor_dict['OS-FLV-EXT-DATA:ephemeral']
|
||||
if 'swap' in flavor_dict and flavor_dict['swap']:
|
||||
kwargs['swap'] = flavor_dict['swap']
|
||||
if 'rxtx_factor' in flavor_dict:
|
||||
kwargs['rxtx_factor'] = flavor_dict['rxtx_factor']
|
||||
if 'os-flavor-access:is_public' in flavor_dict:
|
||||
kwargs['is_public'] = flavor_dict['os-flavor-access:is_public']
|
||||
kwargs["flavorid"] = flavor_dict["id"]
|
||||
if "OS-FLV-EXT-DATA:ephemeral" in flavor_dict:
|
||||
kwargs["ephemeral"] = flavor_dict["OS-FLV-EXT-DATA:ephemeral"]
|
||||
if "swap" in flavor_dict and flavor_dict["swap"]:
|
||||
kwargs["swap"] = flavor_dict["swap"]
|
||||
if "rxtx_factor" in flavor_dict:
|
||||
kwargs["rxtx_factor"] = flavor_dict["rxtx_factor"]
|
||||
if "os-flavor-access:is_public" in flavor_dict:
|
||||
kwargs["is_public"] = flavor_dict["os-flavor-access:is_public"]
|
||||
|
||||
# todo: maybe we can bypass all the above and just directly call
|
||||
# self.sc_nova_client.flavors._create("/flavors", body, "flavor")
|
||||
@@ -210,25 +229,29 @@ class ComputeSyncThread(SyncThread):
|
||||
newflavor = None
|
||||
try:
|
||||
newflavor = self.sc_nova_client.flavors.create(
|
||||
name, ram, vcpus, disk, **kwargs)
|
||||
name, ram, vcpus, disk, **kwargs
|
||||
)
|
||||
except novaclient_exceptions.Conflict as e:
|
||||
if "already exists" in str(e):
|
||||
# FlavorExists or FlavorIdExists.
|
||||
LOG.info("Flavor {} already exists in subcloud"
|
||||
.format(name), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor {} already exists in subcloud".format(name),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# Compare the flavor details and recreate flavor if required.
|
||||
newflavor = self.recreate_flavor_if_reqd(name, ram, vcpus,
|
||||
disk, kwargs)
|
||||
newflavor = self.recreate_flavor_if_reqd(name, ram, vcpus, disk, kwargs)
|
||||
else:
|
||||
LOG.exception(e)
|
||||
if not newflavor:
|
||||
raise exceptions.SyncRequestFailed
|
||||
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, newflavor.id)
|
||||
LOG.info("Flavor {}:{} [{}/{}] created"
|
||||
.format(rsrc.id, subcloud_rsrc_id, name, newflavor.id),
|
||||
extra=self.log_extra)
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id, newflavor.id)
|
||||
LOG.info(
|
||||
"Flavor {}:{} [{}/{}] created".format(
|
||||
rsrc.id, subcloud_rsrc_id, name, newflavor.id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def recreate_flavor_if_reqd(self, name, ram, vcpus, disk, kwargs):
|
||||
# Both the flavor name and the flavor id must be unique.
|
||||
@@ -239,25 +262,28 @@ class ComputeSyncThread(SyncThread):
|
||||
# and recreate the flavor only if required.
|
||||
newflavor = None
|
||||
try:
|
||||
master_flavor = self.m_nova_client.flavors.get(kwargs['flavorid'])
|
||||
master_flavor = self.m_nova_client.flavors.get(kwargs["flavorid"])
|
||||
subcloud_flavor = None
|
||||
sc_flavors = self.sc_nova_client.flavors.list(is_public=None)
|
||||
for sc_flavor in sc_flavors:
|
||||
# subcloud flavor might have the same name and/or the same id
|
||||
if name == sc_flavor.name or \
|
||||
kwargs['flavorid'] == sc_flavor.id:
|
||||
if name == sc_flavor.name or kwargs["flavorid"] == sc_flavor.id:
|
||||
subcloud_flavor = sc_flavor
|
||||
break
|
||||
if master_flavor and subcloud_flavor:
|
||||
if self.same_flavor(master_flavor, subcloud_flavor):
|
||||
newflavor = subcloud_flavor
|
||||
else:
|
||||
LOG.info("recreate_flavor, deleting {}:{}".format(
|
||||
subcloud_flavor.name, subcloud_flavor.id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"recreate_flavor, deleting {}:{}".format(
|
||||
subcloud_flavor.name, subcloud_flavor.id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
self.sc_nova_client.flavors.delete(subcloud_flavor.id)
|
||||
newflavor = self.sc_nova_client.flavors.create(
|
||||
name, ram, vcpus, disk, **kwargs)
|
||||
name, ram, vcpus, disk, **kwargs
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailed
|
||||
@@ -268,26 +294,31 @@ class ComputeSyncThread(SyncThread):
|
||||
if not subcloud_rsrc:
|
||||
return
|
||||
try:
|
||||
self.sc_nova_client.flavors.delete(
|
||||
subcloud_rsrc.subcloud_resource_id)
|
||||
self.sc_nova_client.flavors.delete(subcloud_rsrc.subcloud_resource_id)
|
||||
except novaclient_exceptions.NotFound:
|
||||
# Flavor already deleted in subcloud, carry on.
|
||||
LOG.info("ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc.delete()
|
||||
# Master Resource can be deleted only when all subcloud resources
|
||||
# are deleted along with corresponding orch_job and orch_requests.
|
||||
LOG.info("Flavor {}:{} [{}] deleted".format(rsrc.id, subcloud_rsrc.id,
|
||||
subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor {}:{} [{}] deleted".format(
|
||||
rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def action_flavor(self, request, rsrc):
|
||||
action_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
|
||||
if not subcloud_rsrc:
|
||||
LOG.error("Subcloud resource missing for {}:{}"
|
||||
.format(rsrc, action_dict),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Subcloud resource missing for {}:{}".format(rsrc, action_dict),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
switcher = {
|
||||
@@ -298,35 +329,45 @@ class ComputeSyncThread(SyncThread):
|
||||
}
|
||||
action = list(action_dict.keys())[0]
|
||||
if action not in list(switcher.keys()):
|
||||
LOG.error("Unsupported flavor action {}".format(action),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Unsupported flavor action {}".format(action), extra=self.log_extra
|
||||
)
|
||||
return
|
||||
LOG.info("Flavor action [{}]: {}".format(action, action_dict),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor action [{}]: {}".format(action, action_dict), extra=self.log_extra
|
||||
)
|
||||
switcher[action](rsrc, action, action_dict, subcloud_rsrc)
|
||||
|
||||
def add_tenant_access(self, rsrc, action, action_dict, subcloud_rsrc):
|
||||
tenant_id = action_dict[action]['tenant']
|
||||
tenant_id = action_dict[action]["tenant"]
|
||||
try:
|
||||
self.sc_nova_client.flavor_access.add_tenant_access(
|
||||
subcloud_rsrc.subcloud_resource_id, tenant_id)
|
||||
subcloud_rsrc.subcloud_resource_id, tenant_id
|
||||
)
|
||||
except novaclient_exceptions.Conflict:
|
||||
LOG.info("Flavor-access already present {}:{}".format(rsrc, action_dict),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor-access already present {}:{}".format(rsrc, action_dict),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def remove_tenant_access(self, rsrc, action, action_dict, subcloud_rsrc):
|
||||
tenant_id = action_dict[action]['tenant']
|
||||
tenant_id = action_dict[action]["tenant"]
|
||||
try:
|
||||
self.sc_nova_client.flavor_access.remove_tenant_access(
|
||||
subcloud_rsrc.subcloud_resource_id, tenant_id)
|
||||
subcloud_rsrc.subcloud_resource_id, tenant_id
|
||||
)
|
||||
except novaclient_exceptions.NotFound:
|
||||
LOG.info("Flavor-access already deleted {}:{}".format(rsrc, action_dict),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor-access already deleted {}:{}".format(rsrc, action_dict),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def set_extra_specs(self, rsrc, action, action_dict, subcloud_rsrc):
|
||||
flavor = novaclient_utils.find_resource(
|
||||
self.sc_nova_client.flavors,
|
||||
subcloud_rsrc.subcloud_resource_id, is_public=None)
|
||||
subcloud_rsrc.subcloud_resource_id,
|
||||
is_public=None,
|
||||
)
|
||||
flavor.set_keys(action_dict[action])
|
||||
# No need to handle "extra-spec already exists" case.
|
||||
# Nova throws no exception for that.
|
||||
@@ -334,21 +375,27 @@ class ComputeSyncThread(SyncThread):
|
||||
def unset_extra_specs(self, rsrc, action, action_dict, subcloud_rsrc):
|
||||
flavor = novaclient_utils.find_resource(
|
||||
self.sc_nova_client.flavors,
|
||||
subcloud_rsrc.subcloud_resource_id, is_public=None)
|
||||
subcloud_rsrc.subcloud_resource_id,
|
||||
is_public=None,
|
||||
)
|
||||
|
||||
es_metadata = action_dict[action]
|
||||
metadata = {}
|
||||
# extra_spec keys passed in could be of format "key1"
|
||||
# or "key1;key2;key3"
|
||||
for metadatum in es_metadata.split(';'):
|
||||
for metadatum in es_metadata.split(";"):
|
||||
if metadatum:
|
||||
metadata[metadatum] = None
|
||||
|
||||
try:
|
||||
flavor.unset_keys(list(metadata.keys()))
|
||||
except novaclient_exceptions.NotFound:
|
||||
LOG.info("Extra-spec {} not found {}:{}".format(
|
||||
list(metadata.keys()), rsrc, action_dict), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Extra-spec {} not found {}:{}".format(
|
||||
list(metadata.keys()), rsrc, action_dict
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def get_flavor_resources(self, nc):
|
||||
try:
|
||||
@@ -363,9 +410,10 @@ class ComputeSyncThread(SyncThread):
|
||||
except novaclient_exceptions.NotFound:
|
||||
# flavor/flavor_access just got deleted
|
||||
# (after flavors.list)
|
||||
LOG.info("Flavor/flavor_access not found [{}]"
|
||||
.format(flavor.id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Flavor/flavor_access not found [{}]".format(flavor.id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
flavor.attach_fa = []
|
||||
else:
|
||||
flavor.attach_fa = []
|
||||
@@ -374,25 +422,32 @@ class ComputeSyncThread(SyncThread):
|
||||
# it can be audited later in audit_dependants()
|
||||
flavor.attach_es = flavor.get_keys()
|
||||
return flavors
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_flavor: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_flavor: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
return None
|
||||
|
||||
def same_flavor(self, f1, f2):
|
||||
return (f1.name == f2.name and
|
||||
f1.vcpus == f2.vcpus and
|
||||
f1.ram == f2.ram and
|
||||
f1.disk == f2.disk and
|
||||
f1.swap == f2.swap and
|
||||
f1.rxtx_factor == f2.rxtx_factor and
|
||||
f1.is_public == f2.is_public and
|
||||
f1.ephemeral == f2.ephemeral)
|
||||
return (
|
||||
f1.name == f2.name
|
||||
and f1.vcpus == f2.vcpus
|
||||
and f1.ram == f2.ram
|
||||
and f1.disk == f2.disk
|
||||
and f1.swap == f2.swap
|
||||
and f1.rxtx_factor == f2.rxtx_factor
|
||||
and f1.is_public == f2.is_public
|
||||
and f1.ephemeral == f2.ephemeral
|
||||
)
|
||||
|
||||
def audit_dependants(self, resource_type, m_resource, sc_resource):
|
||||
num_of_audit_jobs = 0
|
||||
@@ -400,9 +455,11 @@ class ComputeSyncThread(SyncThread):
|
||||
return num_of_audit_jobs
|
||||
if resource_type == consts.RESOURCE_TYPE_COMPUTE_FLAVOR:
|
||||
num_of_audit_jobs += self.audit_flavor_access(
|
||||
resource_type, m_resource, sc_resource)
|
||||
resource_type, m_resource, sc_resource
|
||||
)
|
||||
num_of_audit_jobs += self.audit_extra_specs(
|
||||
resource_type, m_resource, sc_resource)
|
||||
resource_type, m_resource, sc_resource
|
||||
)
|
||||
return num_of_audit_jobs
|
||||
|
||||
def audit_flavor_access(self, resource_type, m_resource, sc_resource):
|
||||
@@ -422,20 +479,28 @@ class ComputeSyncThread(SyncThread):
|
||||
break
|
||||
if not found:
|
||||
action_dict = {
|
||||
consts.ACTION_ADDTENANTACCESS: {"tenant": m_fa.tenant_id}}
|
||||
consts.ACTION_ADDTENANTACCESS: {"tenant": m_fa.tenant_id}
|
||||
}
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type, m_resource.id,
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
m_resource.id,
|
||||
consts.OPERATION_TYPE_ACTION,
|
||||
jsonutils.dumps(action_dict))
|
||||
jsonutils.dumps(action_dict),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
|
||||
for sc_fa in sc_fa_attachment:
|
||||
action_dict = {
|
||||
consts.ACTION_REMOVETENANTACCESS: {"tenant": sc_fa.tenant_id}}
|
||||
consts.ACTION_REMOVETENANTACCESS: {"tenant": sc_fa.tenant_id}
|
||||
}
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type, m_resource.id,
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
m_resource.id,
|
||||
consts.OPERATION_TYPE_ACTION,
|
||||
jsonutils.dumps(action_dict))
|
||||
jsonutils.dumps(action_dict),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
|
||||
return num_of_audit_jobs
|
||||
@@ -462,8 +527,12 @@ class ComputeSyncThread(SyncThread):
|
||||
if metadata:
|
||||
action_dict = {consts.ACTION_EXTRASPECS_POST: metadata}
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type, m_flavor.id,
|
||||
consts.OPERATION_TYPE_ACTION, jsonutils.dumps(action_dict))
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
m_flavor.id,
|
||||
consts.OPERATION_TYPE_ACTION,
|
||||
jsonutils.dumps(action_dict),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
|
||||
keys_to_delete = ""
|
||||
@@ -472,8 +541,12 @@ class ComputeSyncThread(SyncThread):
|
||||
if keys_to_delete:
|
||||
action_dict = {consts.ACTION_EXTRASPECS_DELETE: keys_to_delete}
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type, m_flavor.id,
|
||||
consts.OPERATION_TYPE_ACTION, jsonutils.dumps(action_dict))
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
m_flavor.id,
|
||||
consts.OPERATION_TYPE_ACTION,
|
||||
jsonutils.dumps(action_dict),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
|
||||
return num_of_audit_jobs
|
||||
@@ -482,30 +555,32 @@ class ComputeSyncThread(SyncThread):
|
||||
def create_keypair(self, request, rsrc):
|
||||
keypair_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
name, user_id = utils.keypair_deconstruct_id(rsrc.master_id)
|
||||
log_str = rsrc.master_id + ' ' + name + '/' + user_id
|
||||
log_str = rsrc.master_id + " " + name + "/" + user_id
|
||||
kwargs = {}
|
||||
kwargs['user_id'] = user_id
|
||||
if 'public_key' in keypair_dict:
|
||||
kwargs['public_key'] = keypair_dict['public_key']
|
||||
if 'type' in keypair_dict:
|
||||
kwargs['key_type'] = keypair_dict['type']
|
||||
log_str += "/" + kwargs['key_type']
|
||||
kwargs["user_id"] = user_id
|
||||
if "public_key" in keypair_dict:
|
||||
kwargs["public_key"] = keypair_dict["public_key"]
|
||||
if "type" in keypair_dict:
|
||||
kwargs["key_type"] = keypair_dict["type"]
|
||||
log_str += "/" + kwargs["key_type"]
|
||||
newkeypair = None
|
||||
try:
|
||||
newkeypair = self.sc_nova_client.keypairs.create(name, **kwargs)
|
||||
except novaclient_exceptions.Conflict:
|
||||
# KeyPairExists: keypair with same name already exists.
|
||||
LOG.info("Keypair {} already exists in subcloud"
|
||||
.format(log_str), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Keypair {} already exists in subcloud".format(log_str),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
newkeypair = self.recreate_keypair(name, kwargs)
|
||||
if not newkeypair:
|
||||
raise exceptions.SyncRequestFailed
|
||||
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, rsrc.master_id)
|
||||
LOG.info("Keypair {}:{} [{}] created".format(rsrc.id,
|
||||
subcloud_rsrc_id, log_str),
|
||||
extra=self.log_extra)
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info(
|
||||
"Keypair {}:{} [{}] created".format(rsrc.id, subcloud_rsrc_id, log_str),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def recreate_keypair(self, name, kwargs):
|
||||
newkeypair = None
|
||||
@@ -515,13 +590,13 @@ class ComputeSyncThread(SyncThread):
|
||||
# This is different from recreate_flavor_if_reqd().
|
||||
# Here for keypair, name and user_id are already available
|
||||
# and query api can be avoided.
|
||||
delete_kw = {'user_id': kwargs['user_id']}
|
||||
LOG.info("recreate_keypair, deleting {}:{}"
|
||||
.format(name, delete_kw),
|
||||
extra=self.log_extra)
|
||||
delete_kw = {"user_id": kwargs["user_id"]}
|
||||
LOG.info(
|
||||
"recreate_keypair, deleting {}:{}".format(name, delete_kw),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
self.sc_nova_client.keypairs.delete(name, **delete_kw)
|
||||
newkeypair = self.sc_nova_client.keypairs.create(
|
||||
name, **kwargs)
|
||||
newkeypair = self.sc_nova_client.keypairs.create(name, **kwargs)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailed
|
||||
@@ -532,22 +607,27 @@ class ComputeSyncThread(SyncThread):
|
||||
if not subcloud_rsrc:
|
||||
return
|
||||
name, user_id = utils.keypair_deconstruct_id(rsrc.master_id)
|
||||
log_str = subcloud_rsrc.subcloud_resource_id + ' ' + \
|
||||
name + '/' + user_id
|
||||
log_str = subcloud_rsrc.subcloud_resource_id + " " + name + "/" + user_id
|
||||
kwargs = {}
|
||||
kwargs['user_id'] = user_id
|
||||
kwargs["user_id"] = user_id
|
||||
try:
|
||||
self.sc_nova_client.keypairs.delete(name, **kwargs)
|
||||
except novaclient_exceptions.NotFound:
|
||||
# Keypair already deleted in subcloud, carry on.
|
||||
LOG.info("Keypair {} not found in subcloud, may be already deleted"
|
||||
.format(log_str), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Keypair {} not found in subcloud, may be already deleted".format(
|
||||
log_str
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc.delete()
|
||||
# Master Resource can be deleted only when all subcloud resources
|
||||
# are deleted along with corresponding orch_job and orch_requests.
|
||||
# pylint: disable=E1101
|
||||
LOG.info("Keypair {}:{} [{}] deleted".format(rsrc.id, subcloud_rsrc.id,
|
||||
log_str), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Keypair {}:{} [{}] deleted".format(rsrc.id, subcloud_rsrc.id, log_str),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def get_all_resources(self, resource_type):
|
||||
if resource_type == consts.RESOURCE_TYPE_COMPUTE_KEYPAIR:
|
||||
@@ -561,7 +641,8 @@ class ComputeSyncThread(SyncThread):
|
||||
users_with_kps = set()
|
||||
for user in users:
|
||||
user_keypairs = self.get_keypair_resources(
|
||||
self.m_nova_client, user.id)
|
||||
self.m_nova_client, user.id
|
||||
)
|
||||
if user_keypairs:
|
||||
m_resources.extend(user_keypairs)
|
||||
users_with_kps.add(user.id)
|
||||
@@ -570,38 +651,45 @@ class ComputeSyncThread(SyncThread):
|
||||
# master cloud
|
||||
for userid in users_with_kps:
|
||||
sc_user_keypairs = self.get_keypair_resources(
|
||||
self.sc_nova_client, userid)
|
||||
self.sc_nova_client, userid
|
||||
)
|
||||
if sc_user_keypairs:
|
||||
sc_resources.extend(sc_user_keypairs)
|
||||
LOG.info("get_all_resources: users_with_kps={}"
|
||||
.format(users_with_kps), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get_all_resources: users_with_kps={}".format(users_with_kps),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return m_resources, db_resources, sc_resources
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_all_resources: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_all_resources: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None, None, None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
return None, None, None
|
||||
else:
|
||||
return super(ComputeSyncThread, self).get_all_resources(
|
||||
resource_type)
|
||||
return super(ComputeSyncThread, self).get_all_resources(resource_type)
|
||||
|
||||
def get_keypair_resources(self, nc, user_id):
|
||||
keypairs = nc.keypairs.list(user_id)
|
||||
for keypair in keypairs:
|
||||
keypair._info['keypair']['user_id'] = user_id
|
||||
keypair._info["keypair"]["user_id"] = user_id
|
||||
return keypairs
|
||||
|
||||
def same_keypair(self, k1, k2):
|
||||
return (k1.name == k2.name
|
||||
and k1.type == k2.type
|
||||
and k1.fingerprint == k2.fingerprint
|
||||
and (k1._info['keypair']['user_id'] ==
|
||||
k2._info['keypair']['user_id'])
|
||||
)
|
||||
return (
|
||||
k1.name == k2.name
|
||||
and k1.type == k2.type
|
||||
and k1.fingerprint == k2.fingerprint
|
||||
and (k1._info["keypair"]["user_id"] == k2._info["keypair"]["user_id"])
|
||||
)
|
||||
|
||||
# ---- quota_set resource operations ----
|
||||
def put_compute_quota_set(self, request, rsrc):
|
||||
@@ -611,26 +699,27 @@ class ComputeSyncThread(SyncThread):
|
||||
quota_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
# Extract the user_id if there is one.
|
||||
user_id = quota_dict.pop('user_id', None)
|
||||
user_id = quota_dict.pop("user_id", None)
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Force the update in case existing usage is higher.
|
||||
quota_dict['force'] = True
|
||||
quota_dict["force"] = True
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_nova_client.quotas.update(project_id, user_id=user_id,
|
||||
**quota_dict)
|
||||
self.sc_nova_client.quotas.update(project_id, user_id=user_id, **quota_dict)
|
||||
# Persist the subcloud resource. (Not really applicable for quotas.)
|
||||
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info("Updated quotas {} for tenant {} and user {}"
|
||||
.format(quota_dict, rsrc.master_id, user_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated quotas {} for tenant {} and user {}".format(
|
||||
quota_dict, rsrc.master_id, user_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_compute_quota_set(self, request, rsrc):
|
||||
# There's tricky behaviour here, pay attention!
|
||||
@@ -653,7 +742,7 @@ class ComputeSyncThread(SyncThread):
|
||||
req_info = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
# Extract the user_id if there is one.
|
||||
user_id = req_info.pop('user_id', None)
|
||||
user_id = req_info.pop("user_id", None)
|
||||
|
||||
# Delete the quota set in the subcloud. If user_id is None this will
|
||||
# also delete the quota-sets for all users within this project.
|
||||
@@ -677,21 +766,19 @@ class ComputeSyncThread(SyncThread):
|
||||
quota_dict = quota_resource.to_dict()
|
||||
|
||||
# Get rid of the "id" field before doing any calculations
|
||||
quota_dict.pop('id', None)
|
||||
quota_dict.pop("id", None)
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Force the update in case existing usage is higher.
|
||||
quota_dict['force'] = True
|
||||
quota_dict["force"] = True
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_nova_client.quotas.update(project_id, user_id=user_id,
|
||||
**quota_dict)
|
||||
self.sc_nova_client.quotas.update(project_id, user_id=user_id, **quota_dict)
|
||||
|
||||
# ---- quota_set resource operations ----
|
||||
def put_quota_class_set(self, request, rsrc):
|
||||
@@ -702,16 +789,17 @@ class ComputeSyncThread(SyncThread):
|
||||
quota_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
# If this is coming from the audit we need to remove the "id" field.
|
||||
quota_dict.pop('id', None)
|
||||
quota_dict.pop("id", None)
|
||||
|
||||
# Apply the new quota class limits to the subcloud.
|
||||
self.sc_nova_client.quota_classes.update(class_id, **quota_dict)
|
||||
|
||||
# Persist the subcloud resource. (Not really applicable for quotas.)
|
||||
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info("Updated quota classes {} for class {}"
|
||||
.format(quota_dict, rsrc.master_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated quota classes {} for class {}".format(quota_dict, rsrc.master_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
# This will only be called by the audit code.
|
||||
def create_quota_class_set(self, request, rsrc):
|
||||
@@ -726,13 +814,18 @@ class ComputeSyncThread(SyncThread):
|
||||
# We only care about the "default" class since it's the only one
|
||||
# that actually affects nova.
|
||||
try:
|
||||
quota_class = nc.quota_classes.get('default')
|
||||
quota_class = nc.quota_classes.get("default")
|
||||
return [quota_class]
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_quota_class: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_quota_class: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
# Copyright 2017-2018, 2022 Wind River
|
||||
# Copyright 2017-2018, 2022, 2024 Wind River
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -35,17 +35,15 @@ class NetworkSyncThread(SyncThread):
|
||||
"""Manages tasks related to resource management for neutron."""
|
||||
|
||||
def __init__(self, subcloud_name, endpoint_type=None, engine_id=None):
|
||||
super(NetworkSyncThread, self).__init__(subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
engine_id=engine_id)
|
||||
super(NetworkSyncThread, self).__init__(
|
||||
subcloud_name, endpoint_type=endpoint_type, engine_id=engine_id
|
||||
)
|
||||
self.region_name = subcloud_name
|
||||
self.endpoint_type = consts.ENDPOINT_TYPE_NETWORK
|
||||
self.sync_handler_map = {
|
||||
consts.RESOURCE_TYPE_NETWORK_QUOTA_SET: self.sync_network_resource,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
|
||||
self.sync_network_resource,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
|
||||
self.sync_network_resource,
|
||||
consts.RESOURCE_TYPE_NETWORK_QUOTA_SET: self.sync_net_resource,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP: self.sync_net_resource,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE: self.sync_net_resource,
|
||||
}
|
||||
# Security group needs to come before security group rule to ensure
|
||||
# that the group exists by the time we try to create the rules.
|
||||
@@ -54,19 +52,22 @@ class NetworkSyncThread(SyncThread):
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
|
||||
# note: no audit here for quotas, that's handled separately
|
||||
]
|
||||
self.log_extra = {"instance": "{}/{}: ".format(
|
||||
self.region_name, self.endpoint_type)}
|
||||
self.log_extra = {
|
||||
"instance": "{}/{}: ".format(self.region_name, self.endpoint_type)
|
||||
}
|
||||
self.sc_neutron_client = None
|
||||
self.initialize()
|
||||
LOG.info("NetworkSyncThread initialized", extra=self.log_extra)
|
||||
|
||||
def initialize_sc_clients(self):
|
||||
super(NetworkSyncThread, self).initialize_sc_clients()
|
||||
if (not self.sc_neutron_client and self.sc_admin_session):
|
||||
if not self.sc_neutron_client and self.sc_admin_session:
|
||||
self.sc_neutron_client = neutronclient.Client(
|
||||
"2.0", session=self.sc_admin_session,
|
||||
"2.0",
|
||||
session=self.sc_admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
|
||||
region_name=self.region_name)
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
def initialize(self):
|
||||
# Subcloud may be enabled a while after being added.
|
||||
@@ -75,31 +76,39 @@ class NetworkSyncThread(SyncThread):
|
||||
# get the most up-to-date service catalog.
|
||||
super(NetworkSyncThread, self).initialize()
|
||||
self.m_neutron_client = neutronclient.Client(
|
||||
"2.0", session=self.admin_session,
|
||||
"2.0",
|
||||
session=self.admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD)
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD,
|
||||
)
|
||||
|
||||
self.initialize_sc_clients()
|
||||
LOG.info("session and clients initialized", extra=self.log_extra)
|
||||
|
||||
def sync_network_resource(self, request, rsrc):
|
||||
def sync_net_resource(self, request, rsrc):
|
||||
self.initialize_sc_clients()
|
||||
# Invoke function with name format "operationtype_resourcetype".
|
||||
# For example: create_flavor()
|
||||
try:
|
||||
func_name = request.orch_job.operation_type + \
|
||||
"_" + rsrc.resource_type
|
||||
func_name = request.orch_job.operation_type + "_" + rsrc.resource_type
|
||||
getattr(self, func_name)(request, rsrc)
|
||||
except AttributeError:
|
||||
LOG.error("{} not implemented for {}"
|
||||
.format(request.orch_job.operation_type,
|
||||
rsrc.resource_type))
|
||||
LOG.error(
|
||||
"{} not implemented for {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type
|
||||
)
|
||||
)
|
||||
raise exceptions.SyncRequestFailed
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.error("sync_network_resource: {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.error(
|
||||
"sync_net_resource: {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.SyncRequestFailed:
|
||||
raise
|
||||
@@ -118,19 +127,21 @@ class NetworkSyncThread(SyncThread):
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_neutron_client.update_quota(project_id, {"quota": quota_dict})
|
||||
|
||||
# Persist the subcloud resource. (Not really applicable for quotas.)
|
||||
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info("Updated quotas {} for tenant {} and user {}"
|
||||
.format(quota_dict, rsrc.master_id, user_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated quotas {} for tenant {} and user {}".format(
|
||||
quota_dict, rsrc.master_id, user_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_network_quota_set(self, request, rsrc):
|
||||
# When deleting the quota-set in the master cloud, we don't actually
|
||||
@@ -140,14 +151,13 @@ class NetworkSyncThread(SyncThread):
|
||||
user_id = None
|
||||
|
||||
# Get the new master quotas
|
||||
quota_dict = self.m_neutron_client.show_quota(project_id)['quota']
|
||||
quota_dict = self.m_neutron_client.show_quota(project_id)["quota"]
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_neutron_client.update_quota(project_id, {"quota": quota_dict})
|
||||
@@ -164,14 +174,16 @@ class NetworkSyncThread(SyncThread):
|
||||
|
||||
# Create the security group in the subcloud
|
||||
sec_group = self.sc_neutron_client.create_security_group(body)
|
||||
sec_group_id = sec_group['security_group']['id']
|
||||
sec_group_id = sec_group["security_group"]["id"]
|
||||
|
||||
# Persist the subcloud resource.
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id,
|
||||
sec_group_id)
|
||||
LOG.info("Created security group {}:{} [{}]"
|
||||
.format(rsrc.id, subcloud_rsrc_id, sec_group_dict['name']),
|
||||
extra=self.log_extra)
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id, sec_group_id)
|
||||
LOG.info(
|
||||
"Created security group {}:{} [{}]".format(
|
||||
rsrc.id, subcloud_rsrc_id, sec_group_dict["name"]
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def put_security_group(self, request, rsrc):
|
||||
sec_group_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
@@ -179,20 +191,27 @@ class NetworkSyncThread(SyncThread):
|
||||
|
||||
sec_group_subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
|
||||
if not sec_group_subcloud_rsrc:
|
||||
LOG.error("Unable to update security group {}:{},"
|
||||
"cannot find equivalent security group in subcloud."
|
||||
.format(rsrc, sec_group_dict),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Unable to update security group {}:{},"
|
||||
"cannot find equivalent security group in subcloud.".format(
|
||||
rsrc, sec_group_dict
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
# Update the security group in the subcloud
|
||||
sec_group = self.sc_neutron_client.update_security_group(
|
||||
sec_group_subcloud_rsrc.subcloud_resource_id, body)
|
||||
sec_group = sec_group['security_group']
|
||||
sec_group_subcloud_rsrc.subcloud_resource_id, body
|
||||
)
|
||||
sec_group = sec_group["security_group"]
|
||||
|
||||
LOG.info("Updated security group: {}:{} [{}]"
|
||||
.format(rsrc.id, sec_group['id'], sec_group['name']),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated security group: {}:{} [{}]".format(
|
||||
rsrc.id, sec_group["id"], sec_group["name"]
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_security_group(self, request, rsrc):
|
||||
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
|
||||
@@ -200,19 +219,24 @@ class NetworkSyncThread(SyncThread):
|
||||
return
|
||||
try:
|
||||
self.sc_neutron_client.delete_security_group(
|
||||
subcloud_rsrc.subcloud_resource_id)
|
||||
subcloud_rsrc.subcloud_resource_id
|
||||
)
|
||||
except neutronclient_exceptions.NotFound:
|
||||
# security group already deleted in subcloud, carry on.
|
||||
LOG.info("ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc.delete()
|
||||
# Master Resource can be deleted only when all subcloud resources
|
||||
# are deleted along with corresponding orch_job and orch_requests.
|
||||
# pylint: disable=E1101
|
||||
LOG.info("Security group {}:{} [{}] deleted"
|
||||
.format(rsrc.id, subcloud_rsrc.id,
|
||||
subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Security group {}:{} [{}] deleted".format(
|
||||
rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def post_security_group_rule(self, request, rsrc):
|
||||
sec_group_rule_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
@@ -225,8 +249,8 @@ class NetworkSyncThread(SyncThread):
|
||||
|
||||
try:
|
||||
sec_group_rule_dict = self.update_resource_refs(
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
|
||||
sec_group_rule_dict)
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE, sec_group_rule_dict
|
||||
)
|
||||
except exceptions.SubcloudResourceNotFound:
|
||||
# If we couldn't find the equivalent internal resource refs,
|
||||
# we don't know what to create in the subcloud.
|
||||
@@ -237,20 +261,23 @@ class NetworkSyncThread(SyncThread):
|
||||
# Create the security group in the subcloud
|
||||
try:
|
||||
rule = self.sc_neutron_client.create_security_group_rule(body)
|
||||
rule_id = rule['security_group_rule']['id']
|
||||
rule_id = rule["security_group_rule"]["id"]
|
||||
except neutronclient_exceptions.Conflict:
|
||||
# This can happen if we try to create a rule that is already there.
|
||||
# If this happens, we'll update our mapping on the next audit.
|
||||
LOG.info("Problem creating security group rule {}, neutron says"
|
||||
"it's a duplicate.".format(sec_group_rule_dict))
|
||||
LOG.info(
|
||||
"Problem creating security group rule {}, neutron says"
|
||||
"it's a duplicate.".format(sec_group_rule_dict)
|
||||
)
|
||||
# No point in retrying.
|
||||
raise exceptions.SyncRequestFailed
|
||||
|
||||
# Persist the subcloud resource.
|
||||
self.persist_db_subcloud_resource(rsrc.id, rule_id)
|
||||
LOG.info("Created security group rule {}:{}"
|
||||
.format(rsrc.id, rule_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Created security group rule {}:{}".format(rsrc.id, rule_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_security_group_rule(self, request, rsrc):
|
||||
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
|
||||
@@ -258,54 +285,63 @@ class NetworkSyncThread(SyncThread):
|
||||
return
|
||||
try:
|
||||
self.sc_neutron_client.delete_security_group_rule(
|
||||
subcloud_rsrc.subcloud_resource_id)
|
||||
subcloud_rsrc.subcloud_resource_id
|
||||
)
|
||||
except neutronclient_exceptions.NotFound:
|
||||
# security group rule already deleted in subcloud, carry on.
|
||||
LOG.info("ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"ResourceNotFound in subcloud, may be already deleted",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc.delete()
|
||||
# Master Resource can be deleted only when all subcloud resources
|
||||
# are deleted along with corresponding orch_job and orch_requests.
|
||||
# pylint: disable=E1101
|
||||
LOG.info("Security group rule {}:{} [{}] deleted"
|
||||
.format(rsrc.id, subcloud_rsrc.id,
|
||||
subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Security group rule {}:{} [{}] deleted".format(
|
||||
rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
# ---- Override common audit functions ----
|
||||
|
||||
def get_resource_id(self, resource_type, resource):
|
||||
if hasattr(resource, 'master_id'):
|
||||
if hasattr(resource, "master_id"):
|
||||
# If resource from DB, return master resource id
|
||||
# from master cloud
|
||||
return resource.master_id
|
||||
|
||||
# Else, it is OpenStack resource retrieved from master cloud
|
||||
if resource_type in (consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE):
|
||||
return resource['id']
|
||||
if resource_type in (
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
|
||||
):
|
||||
return resource["id"]
|
||||
|
||||
def get_resource_info(self, resource_type, resource, operation_type=None):
|
||||
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
|
||||
if isinstance(resource, dict):
|
||||
tmp = resource.copy()
|
||||
del tmp['id']
|
||||
del tmp["id"]
|
||||
return jsonutils.dumps(tmp)
|
||||
else:
|
||||
return jsonutils.dumps(
|
||||
resource._info.get(
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP))
|
||||
resource._info.get(consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP)
|
||||
)
|
||||
elif resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
|
||||
if isinstance(resource, dict):
|
||||
tmp = resource.copy()
|
||||
del tmp['id']
|
||||
del tmp["id"]
|
||||
return jsonutils.dumps(tmp)
|
||||
else:
|
||||
return jsonutils.dumps(resource._info.get(
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE))
|
||||
return jsonutils.dumps(
|
||||
resource._info.get(consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE)
|
||||
)
|
||||
else:
|
||||
return super(NetworkSyncThread, self).get_resource_info(
|
||||
resource_type, resource, operation_type)
|
||||
resource_type, resource, operation_type
|
||||
)
|
||||
|
||||
def get_resources(self, resource_type, client):
|
||||
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
|
||||
@@ -313,8 +349,9 @@ class NetworkSyncThread(SyncThread):
|
||||
elif resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
|
||||
return self.get_security_group_rules(client)
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
|
||||
def get_subcloud_resources(self, resource_type):
|
||||
@@ -333,48 +370,60 @@ class NetworkSyncThread(SyncThread):
|
||||
return True
|
||||
|
||||
def audit_discrepancy(self, resource_type, m_resource, sc_resources):
|
||||
if resource_type in [consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE]:
|
||||
if resource_type in [
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
|
||||
]:
|
||||
# It could be that the group/rule details are different
|
||||
# between master cloud and subcloud now.
|
||||
# Thus, delete the resource before creating it again.
|
||||
self.schedule_work(self.endpoint_type, resource_type,
|
||||
self.get_resource_id(resource_type, m_resource),
|
||||
consts.OPERATION_TYPE_DELETE)
|
||||
self.schedule_work(
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
self.get_resource_id(resource_type, m_resource),
|
||||
consts.OPERATION_TYPE_DELETE,
|
||||
)
|
||||
# Return true to try creating the resource again
|
||||
return True
|
||||
|
||||
def map_subcloud_resource(self, resource_type, m_r, m_rsrc_db,
|
||||
sc_resources):
|
||||
def map_subcloud_resource(self, resource_type, m_r, m_rsrc_db, sc_resources):
|
||||
# Map an existing subcloud resource to an existing master resource.
|
||||
# If a mapping is created the function should return True.
|
||||
# It is expected that update_resource_refs() has been called on m_r.
|
||||
|
||||
# Used for security groups since there are a couple of default
|
||||
# groups (and rules) that get created in the subcloud.
|
||||
if resource_type in (consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE):
|
||||
if resource_type in (
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
|
||||
):
|
||||
|
||||
for sc_r in sc_resources:
|
||||
if self.same_resource(resource_type, m_r, sc_r):
|
||||
LOG.info(
|
||||
"Mapping resource {} to existing subcloud resource {}"
|
||||
.format(m_r, sc_r), extra=self.log_extra)
|
||||
"Mapping resource {} to existing subcloud resource {}".format(
|
||||
m_r, sc_r
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# If the resource is not even in master cloud resource DB,
|
||||
# create it first.
|
||||
rsrc = m_rsrc_db
|
||||
if not rsrc:
|
||||
master_id = self.get_resource_id(resource_type, m_r)
|
||||
rsrc = resource.Resource(
|
||||
self.ctxt, resource_type=resource_type,
|
||||
master_id=master_id)
|
||||
self.ctxt, resource_type=resource_type, master_id=master_id
|
||||
)
|
||||
rsrc.create()
|
||||
LOG.info("Resource created in DB {}/{}/{}".format(
|
||||
rsrc.id, # pylint: disable=E1101
|
||||
resource_type, master_id))
|
||||
LOG.info(
|
||||
"Resource created in DB {}/{}/{}".format(
|
||||
rsrc.id, # pylint: disable=E1101
|
||||
resource_type,
|
||||
master_id,
|
||||
)
|
||||
)
|
||||
|
||||
self.persist_db_subcloud_resource(rsrc.id,
|
||||
sc_r['id'])
|
||||
self.persist_db_subcloud_resource(rsrc.id, sc_r["id"])
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -384,48 +433,61 @@ class NetworkSyncThread(SyncThread):
|
||||
m_r = m_r.copy()
|
||||
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
|
||||
|
||||
if m_r.get('security_group_id') is not None:
|
||||
if m_r.get("security_group_id") is not None:
|
||||
# If the security group id is in the dict then it is for the
|
||||
# master region, and we need to update it with the equivalent
|
||||
# id from the subcloud.
|
||||
master_sec_group_id = m_r['security_group_id']
|
||||
master_sec_group_id = m_r["security_group_id"]
|
||||
sec_group_rsrc = resource.Resource.get_by_type_and_master_id(
|
||||
self.ctxt, consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
master_sec_group_id)
|
||||
self.ctxt,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
master_sec_group_id,
|
||||
)
|
||||
sec_group_subcloud_rsrc = self.get_db_subcloud_resource(
|
||||
sec_group_rsrc.id) # pylint: disable=E1101
|
||||
sec_group_rsrc.id # pylint: disable=E1101
|
||||
)
|
||||
if sec_group_subcloud_rsrc:
|
||||
m_r['security_group_id'] = \
|
||||
m_r["security_group_id"] = (
|
||||
sec_group_subcloud_rsrc.subcloud_resource_id
|
||||
)
|
||||
else:
|
||||
LOG.error(
|
||||
"Unable to update security group id in {},"
|
||||
"cannot find equivalent security group in subcloud."
|
||||
.format(m_r), extra=self.log_extra)
|
||||
"cannot find equivalent security group in subcloud.".format(
|
||||
m_r
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SubcloudResourceNotFound(
|
||||
resource=sec_group_rsrc.id) # pylint: disable=E1101
|
||||
resource=sec_group_rsrc.id # pylint: disable=E1101
|
||||
)
|
||||
|
||||
if m_r.get('remote_group_id') is not None:
|
||||
if m_r.get("remote_group_id") is not None:
|
||||
# If the remote group id is in the dict then it is for the
|
||||
# master region, and we need to update it with the equivalent
|
||||
# id from the subcloud.
|
||||
master_remote_group_id = m_r['remote_group_id']
|
||||
remote_group_rsrc = \
|
||||
resource.Resource.get_by_type_and_master_id(
|
||||
self.ctxt, consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
master_remote_group_id)
|
||||
master_remote_group_id = m_r["remote_group_id"]
|
||||
remote_group_rsrc = resource.Resource.get_by_type_and_master_id(
|
||||
self.ctxt,
|
||||
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
|
||||
master_remote_group_id,
|
||||
)
|
||||
remote_group_subcloud_rsrc = self.get_db_subcloud_resource(
|
||||
remote_group_rsrc.id) # pylint: disable=E1101
|
||||
remote_group_rsrc.id # pylint: disable=E1101
|
||||
)
|
||||
if remote_group_subcloud_rsrc:
|
||||
m_r['remote_group_id'] = \
|
||||
m_r["remote_group_id"] = (
|
||||
remote_group_subcloud_rsrc.subcloud_resource_id
|
||||
)
|
||||
else:
|
||||
LOG.error(
|
||||
"Unable to update remote group id in {},"
|
||||
"cannot find equivalent remote group in subcloud."
|
||||
.format(m_r), extra=self.log_extra)
|
||||
"cannot find equivalent remote group in subcloud.".format(m_r),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SubcloudResourceNotFound(
|
||||
resource=sec_group_rsrc.id) # pylint: disable=E1101
|
||||
resource=sec_group_rsrc.id # pylint: disable=E1101
|
||||
)
|
||||
return m_r
|
||||
|
||||
# This will only be called by the audit code.
|
||||
@@ -441,38 +503,44 @@ class NetworkSyncThread(SyncThread):
|
||||
# between regions.
|
||||
# TODO(kbujold): This solution only works if we have one domain within
|
||||
# keystone.
|
||||
qc1_tenant_name = sdk.OpenStackDriver().get_project_by_id(
|
||||
qc1['tenant_id']).name
|
||||
qc2_tenant_name = sdk.OpenStackDriver(
|
||||
self.region_name).get_project_by_id(
|
||||
qc2['tenant_id']).name
|
||||
qc1_tenant_name = sdk.OpenStackDriver().get_project_by_id(qc1["tenant_id"]).name
|
||||
qc2_tenant_name = (
|
||||
sdk.OpenStackDriver(self.region_name)
|
||||
.get_project_by_id(qc2["tenant_id"])
|
||||
.name
|
||||
)
|
||||
|
||||
return (qc1['description'] == qc2['description'] and
|
||||
qc1_tenant_name == qc2_tenant_name and
|
||||
qc1['name'] == qc2['name'])
|
||||
return (
|
||||
qc1["description"] == qc2["description"]
|
||||
and qc1_tenant_name == qc2_tenant_name
|
||||
and qc1["name"] == qc2["name"]
|
||||
)
|
||||
|
||||
def same_security_group_rule(self, qc1, qc2):
|
||||
# Ignore id, created_at, updated_at, and revision_number
|
||||
|
||||
# Fetch the tenant name from the project. Tenant id are different
|
||||
# between regions.
|
||||
qc1_tenant_name = sdk.OpenStackDriver().get_project_by_id(
|
||||
qc1['tenant_id']).name
|
||||
qc2_tenant_name = sdk.OpenStackDriver(
|
||||
self.region_name).get_project_by_id(
|
||||
qc2['tenant_id']).name
|
||||
qc1_tenant_name = sdk.OpenStackDriver().get_project_by_id(qc1["tenant_id"]).name
|
||||
qc2_tenant_name = (
|
||||
sdk.OpenStackDriver(self.region_name)
|
||||
.get_project_by_id(qc2["tenant_id"])
|
||||
.name
|
||||
)
|
||||
|
||||
return (qc1['description'] == qc2['description'] and
|
||||
qc1_tenant_name == qc2_tenant_name and
|
||||
qc1['project_id'] == qc2['project_id'] and
|
||||
qc1['direction'] == qc2['direction'] and
|
||||
qc1['protocol'] == qc2['protocol'] and
|
||||
qc1['ethertype'] == qc2['ethertype'] and
|
||||
qc1['remote_group_id'] == qc2['remote_group_id'] and
|
||||
qc1['security_group_id'] == qc2['security_group_id'] and
|
||||
qc1['remote_ip_prefix'] == qc2['remote_ip_prefix'] and
|
||||
qc1['port_range_min'] == qc2['port_range_min'] and
|
||||
qc1['port_range_max'] == qc2['port_range_max'])
|
||||
return (
|
||||
qc1["description"] == qc2["description"]
|
||||
and qc1_tenant_name == qc2_tenant_name
|
||||
and qc1["project_id"] == qc2["project_id"]
|
||||
and qc1["direction"] == qc2["direction"]
|
||||
and qc1["protocol"] == qc2["protocol"]
|
||||
and qc1["ethertype"] == qc2["ethertype"]
|
||||
and qc1["remote_group_id"] == qc2["remote_group_id"]
|
||||
and qc1["security_group_id"] == qc2["security_group_id"]
|
||||
and qc1["remote_ip_prefix"] == qc2["remote_ip_prefix"]
|
||||
and qc1["port_range_min"] == qc2["port_range_min"]
|
||||
and qc1["port_range_max"] == qc2["port_range_max"]
|
||||
)
|
||||
|
||||
def get_security_groups(self, nc):
|
||||
try:
|
||||
@@ -481,15 +549,20 @@ class NetworkSyncThread(SyncThread):
|
||||
# when making a new group. tags would require special handling,
|
||||
# and security_group_rules is handled separately.
|
||||
groups = nc.list_security_groups(
|
||||
retrieve_all=True,
|
||||
fields=['id', 'name', 'description', 'tenant_id'])
|
||||
groups = groups['security_groups']
|
||||
retrieve_all=True, fields=["id", "name", "description", "tenant_id"]
|
||||
)
|
||||
groups = groups["security_groups"]
|
||||
return groups
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_flavor: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_flavor: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
@@ -498,27 +571,32 @@ class NetworkSyncThread(SyncThread):
|
||||
def get_security_group_rules(self, nc):
|
||||
try:
|
||||
rules = nc.list_security_group_rules(retrieve_all=True)
|
||||
rules = rules['security_group_rules']
|
||||
rules = rules["security_group_rules"]
|
||||
for rule in rules:
|
||||
# We don't need these for comparing/creating security groups
|
||||
# and/or they're not allowed in POST calls.
|
||||
del rule['created_at']
|
||||
del rule['updated_at']
|
||||
del rule['revision_number']
|
||||
del rule["created_at"]
|
||||
del rule["updated_at"]
|
||||
del rule["revision_number"]
|
||||
# These would have to be handled separately, not yet supported.
|
||||
rule.pop('tags', None)
|
||||
rule.pop("tags", None)
|
||||
# Some rules have a blank description as an empty string, some
|
||||
# as None, depending on whether they were auto-created during
|
||||
# security group creation or added later. Convert the empty
|
||||
# strings to None.
|
||||
if rule['description'] == '':
|
||||
rule['description'] = None
|
||||
if rule["description"] == "":
|
||||
rule["description"] = None
|
||||
return rules
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_flavor: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_flavor: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
@@ -25,7 +25,7 @@ from oslo_utils import timeutils
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers.openstack.sdk_platform import (
|
||||
OptimizedOpenStackDriver as OpenStackDriver
|
||||
OptimizedOpenStackDriver as OpenStackDriver,
|
||||
)
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon.endpoint_cache import build_subcloud_endpoint
|
||||
@@ -46,36 +46,41 @@ LOG = logging.getLogger(__name__)
|
||||
class SysinvSyncThread(SyncThread):
|
||||
"""Manages tasks related to distributed cloud orchestration for sysinv."""
|
||||
|
||||
SYSINV_MODIFY_RESOURCES = [consts.RESOURCE_TYPE_SYSINV_USER,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO]
|
||||
SYSINV_MODIFY_RESOURCES = [
|
||||
consts.RESOURCE_TYPE_SYSINV_USER,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO,
|
||||
]
|
||||
|
||||
SYSINV_CREATE_RESOURCES = [consts.RESOURCE_TYPE_SYSINV_CERTIFICATE,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO]
|
||||
SYSINV_CREATE_RESOURCES = [
|
||||
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO,
|
||||
]
|
||||
|
||||
CERTIFICATE_SIG_NULL = 'NoCertificate'
|
||||
RESOURCE_UUID_NULL = 'NoResourceUUID'
|
||||
CERTIFICATE_SIG_NULL = "NoCertificate"
|
||||
RESOURCE_UUID_NULL = "NoResourceUUID"
|
||||
|
||||
SYNC_CERTIFICATES = ["ssl_ca", "openstack_ca"]
|
||||
|
||||
def __init__(self, subcloud_name, endpoint_type=None, management_ip=None,
|
||||
engine_id=None):
|
||||
super(SysinvSyncThread, self).__init__(subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
management_ip=management_ip,
|
||||
engine_id=engine_id)
|
||||
def __init__(
|
||||
self, subcloud_name, endpoint_type=None, management_ip=None, engine_id=None
|
||||
):
|
||||
super(SysinvSyncThread, self).__init__(
|
||||
subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
management_ip=management_ip,
|
||||
engine_id=engine_id,
|
||||
)
|
||||
if not self.endpoint_type:
|
||||
self.endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
|
||||
self.sync_handler_map = {
|
||||
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE:
|
||||
self.sync_platform_resource,
|
||||
consts.RESOURCE_TYPE_SYSINV_USER:
|
||||
self.sync_platform_resource,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
self.sync_platform_resource
|
||||
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: self.sync_platform_resource,
|
||||
consts.RESOURCE_TYPE_SYSINV_USER: self.sync_platform_resource,
|
||||
consts.RESOURCE_TYPE_SYSINV_FERNET_REPO: self.sync_platform_resource,
|
||||
}
|
||||
self.region_name = subcloud_name
|
||||
self.log_extra = {"instance": "{}/{}: ".format(
|
||||
self.region_name, self.endpoint_type)}
|
||||
self.log_extra = {
|
||||
"instance": "{}/{}: ".format(self.region_name, self.endpoint_type)
|
||||
}
|
||||
|
||||
self.audit_resources = [
|
||||
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE,
|
||||
@@ -90,17 +95,19 @@ class SysinvSyncThread(SyncThread):
|
||||
def initialize_sc_clients(self):
|
||||
super().initialize_sc_clients()
|
||||
|
||||
sc_sysinv_url = build_subcloud_endpoint(self.management_ip, 'sysinv')
|
||||
LOG.debug(f"Built sc_sysinv_url {sc_sysinv_url} for subcloud "
|
||||
f"{self.subcloud_name}")
|
||||
sc_sysinv_url = build_subcloud_endpoint(self.management_ip, "sysinv")
|
||||
LOG.debug(
|
||||
f"Built sc_sysinv_url {sc_sysinv_url} for subcloud " f"{self.subcloud_name}"
|
||||
)
|
||||
|
||||
self.sc_sysinv_client = SysinvClient(
|
||||
region=self.subcloud_name,
|
||||
session=self.sc_admin_session,
|
||||
endpoint=sc_sysinv_url)
|
||||
endpoint=sc_sysinv_url,
|
||||
)
|
||||
|
||||
def get_master_sysinv_client(self):
|
||||
return get_master_os_client(['sysinv']).sysinv_client
|
||||
return get_master_os_client(["sysinv"]).sysinv_client
|
||||
|
||||
def get_sc_sysinv_client(self):
|
||||
if self.sc_sysinv_client is None:
|
||||
@@ -115,106 +122,130 @@ class SysinvSyncThread(SyncThread):
|
||||
LOG.info("Obj:%s, func:%s" % (type(self), s_func_name))
|
||||
getattr(self, s_func_name)(self.get_sc_sysinv_client(), request, rsrc)
|
||||
except AttributeError:
|
||||
LOG.error("{} not implemented for {}"
|
||||
.format(request.orch_job.operation_type,
|
||||
rsrc.resource_type))
|
||||
LOG.error(
|
||||
"{} not implemented for {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type
|
||||
)
|
||||
)
|
||||
raise exceptions.SyncRequestFailed
|
||||
except exceptions.CertificateExpiredException as e:
|
||||
LOG.info("{} {} aborted: {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type,
|
||||
str(e)), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} {} aborted: {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestAbortedBySystem
|
||||
except (exceptions.ConnectionRefused, exceptions.TimeOut,
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("{} {} region_name {} exception {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type,
|
||||
self.region_name, str(e)), extra=self.log_extra)
|
||||
except (
|
||||
exceptions.ConnectionRefused,
|
||||
exceptions.TimeOut,
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"{} {} region_name {} exception {}".format(
|
||||
request.orch_job.operation_type,
|
||||
rsrc.resource_type,
|
||||
self.region_name,
|
||||
str(e),
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.NotAuthorized:
|
||||
LOG.info("{} {} region_name {} not authorized".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type,
|
||||
self.region_name), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} {} region_name {} not authorized".format(
|
||||
request.orch_job.operation_type,
|
||||
rsrc.resource_type,
|
||||
self.region_name,
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
OpenStackDriver.delete_region_clients(self.region_name)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
def update_certificate(self, sysinv_client, signature,
|
||||
certificate=None, data=None):
|
||||
def update_certificate(self, sysinv_client, signature, certificate=None, data=None):
|
||||
|
||||
try:
|
||||
icertificate = sysinv_client.update_certificate(
|
||||
signature, certificate=certificate, data=data)
|
||||
signature, certificate=certificate, data=data
|
||||
)
|
||||
return icertificate
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("update_certificate error {} region_name".format(e),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"update_certificate error {} region_name".format(e),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
@staticmethod
|
||||
def _decode_certificate_payload(certificate_dict):
|
||||
"""Decode certificate from payload.
|
||||
|
||||
params: certificate_dict
|
||||
returns: certificate, metadata
|
||||
params: certificate_dict
|
||||
returns: certificate, metadata
|
||||
"""
|
||||
certificate = None
|
||||
metadata = {}
|
||||
content_disposition = 'Content-Disposition'
|
||||
content_disposition = "Content-Disposition"
|
||||
try:
|
||||
content_type = certificate_dict.get('content_type')
|
||||
payload = certificate_dict.get('payload')
|
||||
content_type = certificate_dict.get("content_type")
|
||||
payload = certificate_dict.get("payload")
|
||||
multipart_data = MultipartDecoder(payload, content_type)
|
||||
for part in multipart_data.parts:
|
||||
if ('name="passphrase"' in part.headers.get(
|
||||
content_disposition)):
|
||||
metadata.update({'passphrase': part.content})
|
||||
elif ('name="mode"' in part.headers.get(
|
||||
content_disposition)):
|
||||
metadata.update({'mode': part.content})
|
||||
elif ('name="file"' in part.headers.get(
|
||||
content_disposition)):
|
||||
if 'name="passphrase"' in part.headers.get(content_disposition):
|
||||
metadata.update({"passphrase": part.content})
|
||||
elif 'name="mode"' in part.headers.get(content_disposition):
|
||||
metadata.update({"mode": part.content})
|
||||
elif 'name="file"' in part.headers.get(content_disposition):
|
||||
certificate = part.content
|
||||
except Exception as e:
|
||||
LOG.warn("No certificate decode e={}".format(e))
|
||||
|
||||
LOG.info("_decode_certificate_payload metadata={}".format(
|
||||
metadata))
|
||||
LOG.info("_decode_certificate_payload metadata={}".format(metadata))
|
||||
return certificate, metadata
|
||||
|
||||
def create_certificate(self, sysinv_client, request, rsrc):
|
||||
LOG.info("create_certificate resource_info={}".format(
|
||||
request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"create_certificate resource_info={}".format(
|
||||
request.orch_job.resource_info
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
certificate_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
payload = certificate_dict.get('payload')
|
||||
payload = certificate_dict.get("payload")
|
||||
|
||||
if payload and 'expiry_date' in payload:
|
||||
if payload and "expiry_date" in payload:
|
||||
expiry_datetime = timeutils.normalize_time(
|
||||
timeutils.parse_isotime(payload['expiry_date']))
|
||||
timeutils.parse_isotime(payload["expiry_date"])
|
||||
)
|
||||
|
||||
if timeutils.utcnow() > expiry_datetime:
|
||||
LOG.info("create_certificate Certificate %s has expired at %s"
|
||||
% (payload['signature'], str(expiry_datetime)))
|
||||
LOG.info(
|
||||
"create_certificate Certificate %s has expired at %s"
|
||||
% (payload["signature"], str(expiry_datetime))
|
||||
)
|
||||
raise exceptions.CertificateExpiredException
|
||||
else:
|
||||
LOG.info("create_certificate No payload found in resource_info"
|
||||
"{}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"create_certificate No payload found in resource_info"
|
||||
"{}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
certificate, metadata = self._decode_certificate_payload(
|
||||
certificate_dict)
|
||||
certificate, metadata = self._decode_certificate_payload(certificate_dict)
|
||||
|
||||
if isinstance(payload, dict):
|
||||
if payload.get('certtype') not in self.SYNC_CERTIFICATES:
|
||||
if payload.get("certtype") not in self.SYNC_CERTIFICATES:
|
||||
return
|
||||
signature = payload.get('signature')
|
||||
signature = payload.get("signature")
|
||||
LOG.info("signature from dict={}".format(signature))
|
||||
else:
|
||||
if metadata.get('mode') not in self.SYNC_CERTIFICATES:
|
||||
if metadata.get("mode") not in self.SYNC_CERTIFICATES:
|
||||
return
|
||||
signature = rsrc.master_id
|
||||
LOG.info("signature from master_id={}".format(signature))
|
||||
@@ -223,25 +254,24 @@ class SysinvSyncThread(SyncThread):
|
||||
signature = rsrc.master_id
|
||||
if signature and signature != self.CERTIFICATE_SIG_NULL:
|
||||
icertificate = self.update_certificate(
|
||||
sysinv_client,
|
||||
signature,
|
||||
certificate=certificate,
|
||||
data=metadata)
|
||||
sysinv_client, signature, certificate=certificate, data=metadata
|
||||
)
|
||||
else:
|
||||
LOG.info("skipping signature={}".format(signature))
|
||||
|
||||
# Ensure subcloud resource is persisted to the DB for later
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, signature)
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id, signature)
|
||||
|
||||
cert_bodys = icertificate.get('certificates')
|
||||
sub_certs_updated = [str(cert_body.get('signature'))
|
||||
for cert_body in cert_bodys]
|
||||
cert_bodys = icertificate.get("certificates")
|
||||
sub_certs_updated = [
|
||||
str(cert_body.get("signature")) for cert_body in cert_bodys
|
||||
]
|
||||
|
||||
LOG.info("certificate {} {} [{}] updated with subcloud certificates:"
|
||||
" {}".format(rsrc.id, subcloud_rsrc_id, signature,
|
||||
sub_certs_updated),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"certificate {} {} [{}] updated with subcloud certificates:"
|
||||
" {}".format(rsrc.id, subcloud_rsrc_id, signature, sub_certs_updated),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_certificate(self, sysinv_client, request, rsrc):
|
||||
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
|
||||
@@ -258,24 +288,28 @@ class SysinvSyncThread(SyncThread):
|
||||
if not cert_to_delete:
|
||||
raise dccommon_exceptions.CertificateNotFound(
|
||||
region_name=self.region_name,
|
||||
signature=subcloud_rsrc.subcloud_resource_id)
|
||||
signature=subcloud_rsrc.subcloud_resource_id,
|
||||
)
|
||||
sysinv_client.delete_certificate(cert_to_delete)
|
||||
except dccommon_exceptions.CertificateNotFound:
|
||||
# Certificate already deleted in subcloud, carry on.
|
||||
LOG.info("Certificate not in subcloud, may be already deleted",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Certificate not in subcloud, may be already deleted",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("delete_certificate error {}".format(e),
|
||||
extra=self.log_extra)
|
||||
LOG.info("delete_certificate error {}".format(e), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
subcloud_rsrc.delete()
|
||||
# Master Resource can be deleted only when all subcloud resources
|
||||
# are deleted along with corresponding orch_job and orch_requests.
|
||||
LOG.info("Certificate {}:{} [{}] deleted".format(
|
||||
rsrc.id, subcloud_rsrc.id,
|
||||
subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Certificate {}:{} [{}] deleted".format(
|
||||
rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def sync_certificates(self, sysinv_client, request, rsrc):
|
||||
switcher = {
|
||||
@@ -287,11 +321,16 @@ class SysinvSyncThread(SyncThread):
|
||||
func = switcher[request.orch_job.operation_type]
|
||||
try:
|
||||
func(sysinv_client, request, rsrc)
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("sync_certificates: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"sync_certificates: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.CertificateExpiredException as e:
|
||||
LOG.exception(e)
|
||||
@@ -300,64 +339,68 @@ class SysinvSyncThread(SyncThread):
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
def update_user(self, sysinv_client, passwd_hash,
|
||||
root_sig, passwd_expiry_days):
|
||||
LOG.info("update_user={} {} {}".format(
|
||||
passwd_hash, root_sig, passwd_expiry_days),
|
||||
extra=self.log_extra)
|
||||
def update_user(self, sysinv_client, passwd_hash, root_sig, passwd_expiry_days):
|
||||
LOG.info(
|
||||
"update_user={} {} {}".format(passwd_hash, root_sig, passwd_expiry_days),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
try:
|
||||
iuser = sysinv_client.update_user(passwd_hash,
|
||||
root_sig,
|
||||
passwd_expiry_days)
|
||||
iuser = sysinv_client.update_user(passwd_hash, root_sig, passwd_expiry_days)
|
||||
return iuser
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("update_user error {} region_name".format(e),
|
||||
extra=self.log_extra)
|
||||
LOG.info("update_user error {} region_name".format(e), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
def sync_iuser(self, sysinv_client, request, rsrc):
|
||||
# The system is populated with user entry for sysadmin.
|
||||
LOG.info("sync_user resource_info={}".format(
|
||||
request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"sync_user resource_info={}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
user_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
payload = user_dict.get('payload')
|
||||
payload = user_dict.get("payload")
|
||||
|
||||
passwd_hash = None
|
||||
if isinstance(payload, list):
|
||||
for ipayload in payload:
|
||||
if ipayload.get('path') == '/passwd_hash':
|
||||
passwd_hash = ipayload.get('value')
|
||||
elif ipayload.get('path') == '/root_sig':
|
||||
root_sig = ipayload.get('value')
|
||||
elif ipayload.get('path') == '/passwd_expiry_days':
|
||||
passwd_expiry_days = ipayload.get('value')
|
||||
if ipayload.get("path") == "/passwd_hash":
|
||||
passwd_hash = ipayload.get("value")
|
||||
elif ipayload.get("path") == "/root_sig":
|
||||
root_sig = ipayload.get("value")
|
||||
elif ipayload.get("path") == "/passwd_expiry_days":
|
||||
passwd_expiry_days = ipayload.get("value")
|
||||
else:
|
||||
passwd_hash = payload.get('passwd_hash')
|
||||
root_sig = payload.get('root_sig')
|
||||
passwd_expiry_days = payload.get('passwd_expiry_days')
|
||||
passwd_hash = payload.get("passwd_hash")
|
||||
root_sig = payload.get("root_sig")
|
||||
passwd_expiry_days = payload.get("passwd_expiry_days")
|
||||
|
||||
LOG.info("sync_user from dict passwd_hash={} root_sig={} "
|
||||
"passwd_expiry_days={}".format(
|
||||
passwd_hash, root_sig, passwd_expiry_days),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"sync_user from dict passwd_hash={} root_sig={} "
|
||||
"passwd_expiry_days={}".format(passwd_hash, root_sig, passwd_expiry_days),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
if not passwd_hash:
|
||||
LOG.info("sync_user no user update found in resource_info"
|
||||
"{}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"sync_user no user update found in resource_info"
|
||||
"{}".format(request.orch_job.resource_info),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
iuser = self.update_user(sysinv_client, passwd_hash, root_sig,
|
||||
passwd_expiry_days)
|
||||
iuser = self.update_user(
|
||||
sysinv_client, passwd_hash, root_sig, passwd_expiry_days
|
||||
)
|
||||
|
||||
# Ensure subcloud resource is persisted to the DB for later
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, iuser.uuid)
|
||||
LOG.info("User sysadmin {}:{} [{}] updated"
|
||||
.format(rsrc.id, subcloud_rsrc_id, passwd_hash),
|
||||
extra=self.log_extra)
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id, iuser.uuid)
|
||||
LOG.info(
|
||||
"User sysadmin {}:{} [{}] updated".format(
|
||||
rsrc.id, subcloud_rsrc_id, passwd_hash
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def sync_fernet_repo(self, sysinv_client, request, rsrc):
|
||||
switcher = {
|
||||
@@ -369,83 +412,108 @@ class SysinvSyncThread(SyncThread):
|
||||
func = switcher[request.orch_job.operation_type]
|
||||
try:
|
||||
func(sysinv_client, request, rsrc)
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("sync_fernet_resources: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"sync_fernet_resources: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
def create_fernet_repo(self, sysinv_client, request, rsrc):
|
||||
LOG.info("create_fernet_repo region {} resource_info={}".format(
|
||||
self.region_name,
|
||||
request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"create_fernet_repo region {} resource_info={}".format(
|
||||
self.region_name, request.orch_job.resource_info
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
resource_info = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
try:
|
||||
sysinv_client.post_fernet_repo(
|
||||
FernetKeyManager.from_resource_info(resource_info))
|
||||
FernetKeyManager.from_resource_info(resource_info)
|
||||
)
|
||||
# Ensure subcloud resource is persisted to the DB for later
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, rsrc.master_id)
|
||||
rsrc.id, rsrc.master_id
|
||||
)
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("create_fernet_repo error {}".format(e),
|
||||
extra=self.log_extra)
|
||||
LOG.info("create_fernet_repo error {}".format(e), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
LOG.info("fernet_repo {} {} {} created".format(rsrc.id,
|
||||
subcloud_rsrc_id, resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"fernet_repo {} {} {} created".format(
|
||||
rsrc.id, subcloud_rsrc_id, resource_info
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def update_fernet_repo(self, sysinv_client, request, rsrc):
|
||||
LOG.info("update_fernet_repo region {} resource_info={}".format(
|
||||
self.region_name,
|
||||
request.orch_job.resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"update_fernet_repo region {} resource_info={}".format(
|
||||
self.region_name, request.orch_job.resource_info
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
resource_info = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
try:
|
||||
sysinv_client.put_fernet_repo(
|
||||
FernetKeyManager.from_resource_info(resource_info))
|
||||
FernetKeyManager.from_resource_info(resource_info)
|
||||
)
|
||||
# Ensure subcloud resource is persisted to the DB for later
|
||||
subcloud_rsrc_id = self.persist_db_subcloud_resource(
|
||||
rsrc.id, rsrc.master_id)
|
||||
rsrc.id, rsrc.master_id
|
||||
)
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("update_fernet_repo error {}".format(e),
|
||||
extra=self.log_extra)
|
||||
LOG.info("update_fernet_repo error {}".format(e), extra=self.log_extra)
|
||||
raise exceptions.SyncRequestFailedRetry
|
||||
|
||||
LOG.info("fernet_repo {} {} {} update".format(rsrc.id,
|
||||
subcloud_rsrc_id, resource_info),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"fernet_repo {} {} {} update".format(
|
||||
rsrc.id, subcloud_rsrc_id, resource_info
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
# SysInv Audit Related
|
||||
def get_master_resources(self, resource_type):
|
||||
LOG.debug("get_master_resources thread:{}".format(
|
||||
threading.currentThread().getName()), extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"get_master_resources thread:{}".format(
|
||||
threading.currentThread().getName()
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
try:
|
||||
if resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE:
|
||||
return self.get_certificates_resources(
|
||||
self.get_master_sysinv_client())
|
||||
return self.get_certificates_resources(self.get_master_sysinv_client())
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_USER:
|
||||
return [self.get_user_resource(self.get_master_sysinv_client())]
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
return [self.get_fernet_resources(self.get_master_sysinv_client())]
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
return None
|
||||
|
||||
def get_subcloud_resources(self, resource_type):
|
||||
LOG.debug("get_subcloud_resources thread:{}".format(
|
||||
threading.currentThread().getName()), extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"get_subcloud_resources thread:{}".format(
|
||||
threading.currentThread().getName()
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
try:
|
||||
if resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE:
|
||||
return self.get_certificates_resources(self.get_sc_sysinv_client())
|
||||
@@ -454,28 +522,36 @@ class SysinvSyncThread(SyncThread):
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
return [self.get_fernet_resources(self.get_sc_sysinv_client())]
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
except (exceptions.ConnectionRefused, exceptions.TimeOut,
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get subcloud_resources {}: subcloud {} is not reachable"
|
||||
"[{}]".format(resource_type,
|
||||
self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
exceptions.ConnectionRefused,
|
||||
exceptions.TimeOut,
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get subcloud_resources {}: subcloud {} is not reachable"
|
||||
"[{}]".format(resource_type, self.region_name, str(e)),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# None will force skip of audit
|
||||
return None
|
||||
except exceptions.NotAuthorized as e:
|
||||
LOG.info("get subcloud_resources {}: subcloud {} not authorized"
|
||||
"[{}]".format(resource_type,
|
||||
self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get subcloud_resources {}: subcloud {} not authorized"
|
||||
"[{}]".format(resource_type, self.region_name, str(e)),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
OpenStackDriver.delete_region_clients(self.region_name)
|
||||
return None
|
||||
except (AttributeError, TypeError) as e:
|
||||
LOG.info("get subcloud_resources {} error {}".format(
|
||||
resource_type, e), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get subcloud_resources {} error {}".format(resource_type, e),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
@@ -484,18 +560,19 @@ class SysinvSyncThread(SyncThread):
|
||||
def post_audit(self):
|
||||
# TODO(lzhu1): This should be revisited once the master cache service
|
||||
# is implemented.
|
||||
OpenStackDriver.delete_region_clients_for_thread(self.region_name, "audit")
|
||||
OpenStackDriver.delete_region_clients_for_thread(
|
||||
self.region_name, 'audit')
|
||||
OpenStackDriver.delete_region_clients_for_thread(
|
||||
dccommon_consts.CLOUD_0, 'audit')
|
||||
dccommon_consts.CLOUD_0, "audit"
|
||||
)
|
||||
|
||||
def get_certificates_resources(self, sysinv_client):
|
||||
certificate_list = sysinv_client.get_certificates()
|
||||
# Only sync the specified certificates to subclouds
|
||||
filtered_list = [certificate
|
||||
for certificate in certificate_list
|
||||
if certificate.certtype in
|
||||
self.SYNC_CERTIFICATES]
|
||||
filtered_list = [
|
||||
certificate
|
||||
for certificate in certificate_list
|
||||
if certificate.certtype in self.SYNC_CERTIFICATES
|
||||
]
|
||||
return filtered_list
|
||||
|
||||
def get_user_resource(self, sysinv_client):
|
||||
@@ -507,15 +584,15 @@ class SysinvSyncThread(SyncThread):
|
||||
|
||||
def get_resource_id(self, resource_type, resource):
|
||||
if resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE:
|
||||
if hasattr(resource, 'signature'):
|
||||
LOG.debug("get_resource_id signature={}".format(
|
||||
resource.signature))
|
||||
if hasattr(resource, "signature"):
|
||||
LOG.debug("get_resource_id signature={}".format(resource.signature))
|
||||
if resource.signature is None:
|
||||
return self.CERTIFICATE_SIG_NULL
|
||||
return resource.signature
|
||||
elif hasattr(resource, 'master_id'):
|
||||
LOG.debug("get_resource_id master_id signature={}".format(
|
||||
resource.master_id))
|
||||
elif hasattr(resource, "master_id"):
|
||||
LOG.debug(
|
||||
"get_resource_id master_id signature={}".format(resource.master_id)
|
||||
)
|
||||
if resource.master_id is None:
|
||||
# master_id cannot be None
|
||||
return self.CERTIFICATE_SIG_NULL
|
||||
@@ -524,52 +601,56 @@ class SysinvSyncThread(SyncThread):
|
||||
LOG.error("no get_resource_id for certificate")
|
||||
return self.CERTIFICATE_SIG_NULL
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
LOG.debug("get_resource_id {} resource={}".format(
|
||||
resource_type, resource))
|
||||
LOG.debug("get_resource_id {} resource={}".format(resource_type, resource))
|
||||
return FERNET_REPO_MASTER_ID
|
||||
else:
|
||||
if hasattr(resource, 'uuid'):
|
||||
LOG.debug("get_resource_id {} uuid={}".format(
|
||||
resource_type, resource.uuid))
|
||||
if hasattr(resource, "uuid"):
|
||||
LOG.debug(
|
||||
"get_resource_id {} uuid={}".format(resource_type, resource.uuid)
|
||||
)
|
||||
return resource.uuid
|
||||
else:
|
||||
LOG.debug("get_resource_id NO uuid resource_type={}".format(
|
||||
resource_type))
|
||||
LOG.debug(
|
||||
"get_resource_id NO uuid resource_type={}".format(resource_type)
|
||||
)
|
||||
return self.RESOURCE_UUID_NULL # master_id cannot be None
|
||||
|
||||
def same_certificate(self, i1, i2):
|
||||
LOG.debug("same_certificate i1={}, i2={}".format(i1, i2),
|
||||
extra=self.log_extra)
|
||||
LOG.debug("same_certificate i1={}, i2={}".format(i1, i2), extra=self.log_extra)
|
||||
same = True
|
||||
if i1.signature and (i1.signature != i2.signature):
|
||||
if i1.signature == self.CERTIFICATE_SIG_NULL:
|
||||
return True
|
||||
same = False
|
||||
if ((i1.expiry_date and i1.expiry_date != i2.expiry_date) or
|
||||
(i1.start_date and i1.start_date != i2.start_date)):
|
||||
if (i1.expiry_date and i1.expiry_date != i2.expiry_date) or (
|
||||
i1.start_date and i1.start_date != i2.start_date
|
||||
):
|
||||
same = False
|
||||
|
||||
if not same:
|
||||
LOG.info("same_certificate differs i1={}, i2={}".format(i1, i2),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"same_certificate differs i1={}, i2={}".format(i1, i2),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
return same
|
||||
|
||||
def same_user(self, i1, i2):
|
||||
LOG.debug("same_user i1={}, i2={}".format(i1, i2),
|
||||
extra=self.log_extra)
|
||||
LOG.debug("same_user i1={}, i2={}".format(i1, i2), extra=self.log_extra)
|
||||
same_user = True
|
||||
if (i1.passwd_hash != i2.passwd_hash or
|
||||
i1.passwd_expiry_days != i2.passwd_expiry_days):
|
||||
if (
|
||||
i1.passwd_hash != i2.passwd_hash
|
||||
or i1.passwd_expiry_days != i2.passwd_expiry_days
|
||||
):
|
||||
same_user = False
|
||||
return same_user
|
||||
|
||||
def same_fernet_key(self, i1, i2):
|
||||
LOG.debug("same_fernet_repo i1={}, i2={}".format(i1, i2),
|
||||
extra=self.log_extra)
|
||||
LOG.debug("same_fernet_repo i1={}, i2={}".format(i1, i2), extra=self.log_extra)
|
||||
same_fernet = True
|
||||
if (FernetKeyManager.get_resource_hash(i1) !=
|
||||
FernetKeyManager.get_resource_hash(i2)):
|
||||
if FernetKeyManager.get_resource_hash(i1) != FernetKeyManager.get_resource_hash(
|
||||
i2
|
||||
):
|
||||
same_fernet = False
|
||||
return same_fernet
|
||||
|
||||
@@ -581,47 +662,59 @@ class SysinvSyncThread(SyncThread):
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
return self.same_fernet_key(m_resource, sc_resource)
|
||||
else:
|
||||
LOG.warn("same_resource() unexpected resource_type {}".format(
|
||||
resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.warn(
|
||||
"same_resource() unexpected resource_type {}".format(resource_type),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def audit_discrepancy(self, resource_type, m_resource, sc_resources):
|
||||
# Return true to try the audit_action
|
||||
if (resource_type in self.SYSINV_MODIFY_RESOURCES or
|
||||
resource_type in self.SYSINV_CREATE_RESOURCES):
|
||||
if (
|
||||
resource_type in self.SYSINV_MODIFY_RESOURCES
|
||||
or resource_type in self.SYSINV_CREATE_RESOURCES
|
||||
):
|
||||
# The resource differs, signal to perform the audit_action
|
||||
return True
|
||||
|
||||
LOG.info("audit_discrepancy resource_type {} default action".format(
|
||||
resource_type), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_discrepancy resource_type {} default action".format(resource_type),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return False
|
||||
|
||||
def audit_action(self, resource_type, finding, resource, sc_source=None):
|
||||
if resource_type in self.SYSINV_MODIFY_RESOURCES:
|
||||
LOG.info("audit_action: {}/{}"
|
||||
.format(finding, resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_action: {}/{}".format(finding, resource_type),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
num_of_audit_jobs = 0
|
||||
if finding == AUDIT_RESOURCE_MISSING:
|
||||
# The missing resource should be created by underlying subcloud
|
||||
# thus action is to update for a 'missing' resource
|
||||
# should not get here since audit discrepency will handle this
|
||||
resource_id = self.get_resource_id(resource_type, resource)
|
||||
self.schedule_work(self.endpoint_type, resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_PATCH,
|
||||
self.get_resource_info(
|
||||
resource_type, resource))
|
||||
self.schedule_work(
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_PATCH,
|
||||
self.get_resource_info(resource_type, resource),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
else:
|
||||
LOG.warn("unexpected finding {} resource_type {}".format(
|
||||
finding, resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.warn(
|
||||
"unexpected finding {} resource_type {}".format(
|
||||
finding, resource_type
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return num_of_audit_jobs
|
||||
elif resource_type in self.SYSINV_CREATE_RESOURCES:
|
||||
LOG.info("audit_action: {}/{}"
|
||||
.format(finding, resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_action: {}/{}".format(finding, resource_type),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# Default actions are create & delete. Can be overridden
|
||||
# in resource implementation
|
||||
num_of_audit_jobs = 0
|
||||
@@ -638,46 +731,60 @@ class SysinvSyncThread(SyncThread):
|
||||
if finding == AUDIT_RESOURCE_MISSING:
|
||||
# default action is create for a 'missing' resource
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type,
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_CREATE,
|
||||
self.get_resource_info(
|
||||
resource_type, resource,
|
||||
consts.OPERATION_TYPE_CREATE))
|
||||
resource_type, resource, consts.OPERATION_TYPE_CREATE
|
||||
),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
elif finding == AUDIT_RESOURCE_EXTRA:
|
||||
# default action is delete for a 'extra' resource
|
||||
self.schedule_work(self.endpoint_type, resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_DELETE)
|
||||
self.schedule_work(
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_DELETE,
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
return num_of_audit_jobs
|
||||
else: # use default audit_action
|
||||
return super(SysinvSyncThread, self).audit_action(
|
||||
resource_type,
|
||||
finding,
|
||||
resource)
|
||||
resource_type, finding, resource
|
||||
)
|
||||
|
||||
def get_resource_info(self, resource_type,
|
||||
resource, operation_type=None):
|
||||
payload_resources = [consts.RESOURCE_TYPE_SYSINV_CERTIFICATE,
|
||||
consts.RESOURCE_TYPE_SYSINV_USER]
|
||||
def get_resource_info(self, resource_type, resource, operation_type=None):
|
||||
payload_resources = [
|
||||
consts.RESOURCE_TYPE_SYSINV_CERTIFICATE,
|
||||
consts.RESOURCE_TYPE_SYSINV_USER,
|
||||
]
|
||||
if resource_type in payload_resources:
|
||||
if 'payload' not in resource._info:
|
||||
if "payload" not in resource._info:
|
||||
dumps = jsonutils.dumps({"payload": resource._info})
|
||||
else:
|
||||
dumps = jsonutils.dumps(resource._info)
|
||||
LOG.info("get_resource_info resource_type={} dumps={}".format(
|
||||
resource_type, dumps),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get_resource_info resource_type={} dumps={}".format(
|
||||
resource_type, dumps
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return dumps
|
||||
elif resource_type == consts.RESOURCE_TYPE_SYSINV_FERNET_REPO:
|
||||
LOG.info("get_resource_info resource_type={} resource={}".format(
|
||||
resource_type, resource), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get_resource_info resource_type={} resource={}".format(
|
||||
resource_type, resource
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return jsonutils.dumps(resource)
|
||||
else:
|
||||
LOG.warn("get_resource_info unsupported resource {}".format(
|
||||
resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.warn(
|
||||
"get_resource_info unsupported resource {}".format(resource_type),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return super(SysinvSyncThread, self).get_resource_info(
|
||||
resource_type, resource, operation_type)
|
||||
resource_type, resource, operation_type
|
||||
)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2017-2018 Wind River
|
||||
# Copyright 2017-2018, 2024 Wind River
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -32,23 +32,22 @@ class VolumeSyncThread(SyncThread):
|
||||
"""Manages tasks related to resource management for cinder."""
|
||||
|
||||
def __init__(self, subcloud_name, endpoint_type=None, engine_id=None):
|
||||
super(VolumeSyncThread, self).__init__(subcloud_name,
|
||||
endpoint_type=endpoint_type,
|
||||
engine_id=engine_id)
|
||||
super(VolumeSyncThread, self).__init__(
|
||||
subcloud_name, endpoint_type=endpoint_type, engine_id=engine_id
|
||||
)
|
||||
self.region_name = subcloud_name
|
||||
self.endpoint_type = consts.ENDPOINT_TYPE_VOLUME
|
||||
self.sync_handler_map = {
|
||||
consts.RESOURCE_TYPE_VOLUME_QUOTA_SET: self.sync_volume_resource,
|
||||
consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET:
|
||||
self.sync_volume_resource,
|
||||
consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET: self.sync_volume_resource,
|
||||
}
|
||||
self.audit_resources = [
|
||||
consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET,
|
||||
# note: no audit here for quotas, that's handled separately
|
||||
]
|
||||
self.log_extra = {"instance": "{}/{}: ".format(
|
||||
self.region_name,
|
||||
self.endpoint_type)}
|
||||
self.log_extra = {
|
||||
"instance": "{}/{}: ".format(self.region_name, self.endpoint_type)
|
||||
}
|
||||
# define the subcloud clients
|
||||
self.sc_cinder_client = None
|
||||
self.initialize()
|
||||
@@ -56,11 +55,13 @@ class VolumeSyncThread(SyncThread):
|
||||
|
||||
def initialize_sc_clients(self):
|
||||
super(VolumeSyncThread, self).initialize_sc_clients()
|
||||
if (not self.sc_cinder_client and self.sc_admin_session):
|
||||
if not self.sc_cinder_client and self.sc_admin_session:
|
||||
self.sc_cinder_client = cinderclient.Client(
|
||||
"3.0", session=self.sc_admin_session,
|
||||
"3.0",
|
||||
session=self.sc_admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_ADMIN,
|
||||
region_name=self.region_name)
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
def initialize(self):
|
||||
# Subcloud may be enabled a while after being added.
|
||||
@@ -69,9 +70,11 @@ class VolumeSyncThread(SyncThread):
|
||||
# get the most up-to-date service catalog.
|
||||
super(VolumeSyncThread, self).initialize()
|
||||
self.m_cinder_client = cinderclient.Client(
|
||||
"3.0", session=self.admin_session,
|
||||
"3.0",
|
||||
session=self.admin_session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_INTERNAL,
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD)
|
||||
region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD,
|
||||
)
|
||||
|
||||
self.initialize_sc_clients()
|
||||
LOG.info("session and clients initialized", extra=self.log_extra)
|
||||
@@ -81,26 +84,33 @@ class VolumeSyncThread(SyncThread):
|
||||
# Invoke function with name format "operationtype_resourcetype".
|
||||
# For example: create_flavor()
|
||||
try:
|
||||
func_name = request.orch_job.operation_type + \
|
||||
"_" + rsrc.resource_type
|
||||
func_name = request.orch_job.operation_type + "_" + rsrc.resource_type
|
||||
getattr(self, func_name)(request, rsrc)
|
||||
except keystone_exceptions.EndpointNotFound:
|
||||
# Cinder is optional in the subcloud, so this isn't considered
|
||||
# an error.
|
||||
LOG.info("sync_volume_resource: {} does not have a volume "
|
||||
"endpoint in keystone"
|
||||
.format(self.region_name),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"sync_volume_resource: {} does not have a volume "
|
||||
"endpoint in keystone".format(self.region_name),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
except AttributeError:
|
||||
LOG.error("{} not implemented for {}"
|
||||
.format(request.orch_job.operation_type,
|
||||
rsrc.resource_type))
|
||||
LOG.error(
|
||||
"{} not implemented for {}".format(
|
||||
request.orch_job.operation_type, rsrc.resource_type
|
||||
)
|
||||
)
|
||||
raise exceptions.SyncRequestFailed
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.error("sync_volume_resource: {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.error(
|
||||
"sync_volume_resource: {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
raise exceptions.SyncRequestTimeout
|
||||
except exceptions.SyncRequestFailed:
|
||||
raise
|
||||
@@ -119,23 +129,25 @@ class VolumeSyncThread(SyncThread):
|
||||
|
||||
# The client code may set a tenant_id field. If so, remove it
|
||||
# since it's not defined in the API.
|
||||
quota_dict.pop('tenant_id', None)
|
||||
quota_dict.pop("tenant_id", None)
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_cinder_client.quotas.update(project_id, **quota_dict)
|
||||
|
||||
# Persist the subcloud resource. (Not really applicable for quotas.)
|
||||
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info("Updated quotas {} for tenant {} and user {}"
|
||||
.format(quota_dict, rsrc.master_id, user_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated quotas {} for tenant {} and user {}".format(
|
||||
quota_dict, rsrc.master_id, user_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def delete_volume_quota_set(self, request, rsrc):
|
||||
# When deleting the quota-set in the master cloud, we don't actually
|
||||
@@ -149,14 +161,13 @@ class VolumeSyncThread(SyncThread):
|
||||
quota_dict = self.m_cinder_client.quotas.get(project_id).to_dict()
|
||||
|
||||
# Remove the 'id' key before doing calculations.
|
||||
quota_dict.pop('id', None)
|
||||
quota_dict.pop("id", None)
|
||||
|
||||
# Calculate the new limits for this subcloud (factoring in the
|
||||
# existing usage).
|
||||
quota_dict = \
|
||||
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict,
|
||||
self.region_name)
|
||||
quota_dict = quota_manager.QuotaManager.calculate_subcloud_project_quotas(
|
||||
project_id, user_id, quota_dict, self.region_name
|
||||
)
|
||||
|
||||
# Apply the limits to the subcloud.
|
||||
self.sc_cinder_client.quotas.update(project_id, **quota_dict)
|
||||
@@ -175,36 +186,39 @@ class VolumeSyncThread(SyncThread):
|
||||
quota_dict = jsonutils.loads(request.orch_job.resource_info)
|
||||
|
||||
# If this is coming from the audit we need to remove the "id" field.
|
||||
quota_dict.pop('id', None)
|
||||
quota_dict.pop("id", None)
|
||||
|
||||
# The client code may set a class name. If so, remove it since it's
|
||||
# not defined in the API.
|
||||
quota_dict.pop('class_name', None)
|
||||
quota_dict.pop("class_name", None)
|
||||
|
||||
# Apply the new quota class limits to the subcloud.
|
||||
self.sc_cinder_client.quota_classes.update(class_id, **quota_dict)
|
||||
|
||||
# Persist the subcloud resource. (Not really applicable for quotas.)
|
||||
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
|
||||
LOG.info("Updated quota classes {} for class {}"
|
||||
.format(quota_dict, rsrc.master_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updated quota classes {} for class {}".format(quota_dict, rsrc.master_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
# ---- Override common audit functions ----
|
||||
def get_resource_id(self, resource_type, resource):
|
||||
if resource_type == consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET:
|
||||
# We only care about the default class.
|
||||
return 'default'
|
||||
return "default"
|
||||
else:
|
||||
return super(VolumeSyncThread, self).get_resource_id(
|
||||
resource_type, resource)
|
||||
resource_type, resource
|
||||
)
|
||||
|
||||
def get_resource_info(self, resource_type, resource, operation_type=None):
|
||||
if resource_type == consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET:
|
||||
return jsonutils.dumps(resource._info)
|
||||
else:
|
||||
return super(VolumeSyncThread, self).get_resource_info(
|
||||
resource_type, resource, operation_type)
|
||||
resource_type, resource, operation_type
|
||||
)
|
||||
|
||||
def get_subcloud_resources(self, resource_type):
|
||||
if resource_type == consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET:
|
||||
@@ -212,16 +226,18 @@ class VolumeSyncThread(SyncThread):
|
||||
self.initialize_sc_clients()
|
||||
return self.get_quota_class_resources(self.sc_cinder_client)
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
|
||||
def get_master_resources(self, resource_type):
|
||||
if resource_type == consts.RESOURCE_TYPE_VOLUME_QUOTA_CLASS_SET:
|
||||
return self.get_quota_class_resources(self.m_cinder_client)
|
||||
else:
|
||||
LOG.error("Wrong resource type {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.error(
|
||||
"Wrong resource type {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
return None
|
||||
|
||||
def same_resource(self, resource_type, m_resource, sc_resource):
|
||||
@@ -243,19 +259,25 @@ class VolumeSyncThread(SyncThread):
|
||||
# We only care about the "default" class since it's the only one
|
||||
# that actually affects cinder.
|
||||
try:
|
||||
quota_class = nc.quota_classes.get('default')
|
||||
quota_class = nc.quota_classes.get("default")
|
||||
return [quota_class]
|
||||
except (keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure) as e:
|
||||
LOG.info("get_quota_class: subcloud {} is not reachable [{}]"
|
||||
.format(self.region_name,
|
||||
str(e)), extra=self.log_extra)
|
||||
except (
|
||||
keystone_exceptions.connection.ConnectTimeout,
|
||||
keystone_exceptions.ConnectFailure,
|
||||
) as e:
|
||||
LOG.info(
|
||||
"get_quota_class: subcloud {} is not reachable [{}]".format(
|
||||
self.region_name, str(e)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except keystone_exceptions.EndpointNotFound:
|
||||
LOG.info("get_quota_class: subcloud {} does not have a volume "
|
||||
"endpoint in keystone"
|
||||
.format(self.region_name),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"get_quota_class: subcloud {} does not have a volume "
|
||||
"endpoint in keystone".format(self.region_name),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
@@ -50,18 +50,18 @@ from keystoneclient import client as keystoneclient
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# sync request states, should be in SyncRequest class
|
||||
STATE_QUEUED = 'queued'
|
||||
STATE_IN_PROGRESS = 'in-progress'
|
||||
STATE_TIMEDOUT = 'timedout'
|
||||
STATE_ABORTED = 'aborted'
|
||||
STATE_FAILED = 'failed'
|
||||
STATE_COMPLETED = 'completed'
|
||||
STATE_QUEUED = "queued"
|
||||
STATE_IN_PROGRESS = "in-progress"
|
||||
STATE_TIMEDOUT = "timedout"
|
||||
STATE_ABORTED = "aborted"
|
||||
STATE_FAILED = "failed"
|
||||
STATE_COMPLETED = "completed"
|
||||
|
||||
# Audit findings
|
||||
AUDIT_RESOURCE_MISSING = 'missing'
|
||||
AUDIT_RESOURCE_EXTRA = 'extra_resource'
|
||||
AUDIT_RESOURCE_MISSING = "missing"
|
||||
AUDIT_RESOURCE_EXTRA = "extra_resource"
|
||||
|
||||
AUDIT_LOCK_NAME = 'dcorch-audit'
|
||||
AUDIT_LOCK_NAME = "dcorch-audit"
|
||||
|
||||
|
||||
def get_master_os_client(region_clients=None):
|
||||
@@ -70,12 +70,13 @@ def get_master_os_client(region_clients=None):
|
||||
# sessions for the subclouds.
|
||||
try:
|
||||
os_client = sdk.OptimizedOpenStackDriver(
|
||||
region_name=dccommon_consts.CLOUD_0,
|
||||
region_clients=region_clients)
|
||||
region_name=dccommon_consts.CLOUD_0, region_clients=region_clients
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error(
|
||||
"Failed to get os_client for "
|
||||
f"{dccommon_consts.CLOUD_0}/{region_clients}: {e}.")
|
||||
f"{dccommon_consts.CLOUD_0}/{region_clients}: {e}."
|
||||
)
|
||||
raise e
|
||||
return os_client
|
||||
|
||||
@@ -93,10 +94,11 @@ class SyncThread(object):
|
||||
# used by the audit to cache the master resources
|
||||
master_resources_dict = collections.defaultdict(dict)
|
||||
|
||||
def __init__(self, subcloud_name, endpoint_type=None, management_ip=None,
|
||||
engine_id=None):
|
||||
self.endpoint_type = endpoint_type # endpoint type
|
||||
self.subcloud_name = subcloud_name # subcloud name
|
||||
def __init__(
|
||||
self, subcloud_name, endpoint_type=None, management_ip=None, engine_id=None
|
||||
):
|
||||
self.endpoint_type = endpoint_type # endpoint type
|
||||
self.subcloud_name = subcloud_name # subcloud name
|
||||
self.management_ip = management_ip
|
||||
self.engine_id = engine_id
|
||||
self.ctxt = context.get_admin_context()
|
||||
@@ -104,8 +106,7 @@ class SyncThread(object):
|
||||
self.master_region_name = dccommon_consts.CLOUD_0
|
||||
self.audit_resources = []
|
||||
|
||||
self.log_extra = {
|
||||
"instance": self.subcloud_name + ": "}
|
||||
self.log_extra = {"instance": self.subcloud_name + ": "}
|
||||
self.dcmanager_state_rpc_client = dcmanager_rpc_client.SubcloudStateClient()
|
||||
self.dcmanager_rpc_client = dcmanager_rpc_client.ManagerClient()
|
||||
|
||||
@@ -118,9 +119,7 @@ class SyncThread(object):
|
||||
def should_exit(self):
|
||||
# Return whether the sync/audit threads should exit.
|
||||
try:
|
||||
db_api.subcloud_sync_get(
|
||||
self.ctxt, self.subcloud_name,
|
||||
self.endpoint_type)
|
||||
db_api.subcloud_sync_get(self.ctxt, self.subcloud_name, self.endpoint_type)
|
||||
except exceptions.SubcloudSyncNotFound:
|
||||
return True
|
||||
|
||||
@@ -138,7 +137,8 @@ class SyncThread(object):
|
||||
# We only enable syncing if the subcloud is online and the initial
|
||||
# sync has completed.
|
||||
if subcloud.availability_status == dccommon_consts.AVAILABILITY_ONLINE and (
|
||||
subcloud.initial_sync_state == consts.INITIAL_SYNC_STATE_COMPLETED):
|
||||
subcloud.initial_sync_state == consts.INITIAL_SYNC_STATE_COMPLETED
|
||||
):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -156,7 +156,8 @@ class SyncThread(object):
|
||||
config.password,
|
||||
config.project_name,
|
||||
config.project_domain_name,
|
||||
timeout=60)
|
||||
timeout=60,
|
||||
)
|
||||
elif self.endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST_OS:
|
||||
config = cfg.CONF.openstack_cache
|
||||
self.admin_session = OptimizedEndpointCache.get_admin_session(
|
||||
@@ -166,32 +167,34 @@ class SyncThread(object):
|
||||
config.admin_password,
|
||||
config.admin_tenant,
|
||||
config.admin_project_domain_name,
|
||||
timeout=60)
|
||||
timeout=60,
|
||||
)
|
||||
else:
|
||||
raise exceptions.EndpointNotSupported(
|
||||
endpoint=self.endpoint_type)
|
||||
raise exceptions.EndpointNotSupported(endpoint=self.endpoint_type)
|
||||
|
||||
# keystone client
|
||||
self.ks_client = keystoneclient.Client(
|
||||
session=self.admin_session,
|
||||
region_name=dccommon_consts.CLOUD_0)
|
||||
session=self.admin_session, region_name=dccommon_consts.CLOUD_0
|
||||
)
|
||||
# dcdbsync client
|
||||
self.dbs_client = dbsyncclient.Client(
|
||||
endpoint_type=consts.DBS_ENDPOINT_INTERNAL,
|
||||
session=self.admin_session,
|
||||
region_name=dccommon_consts.CLOUD_0)
|
||||
region_name=dccommon_consts.CLOUD_0,
|
||||
)
|
||||
|
||||
def initialize_sc_clients(self):
|
||||
# base implementation of initializing the subcloud specific
|
||||
# clients, only used by the subclasses.
|
||||
# The specific SyncThread subclasses may extend this
|
||||
if (not self.sc_admin_session):
|
||||
if not self.sc_admin_session:
|
||||
# Subclouds will use token from the Subcloud specific Keystone,
|
||||
# so define a session against that subcloud's keystone endpoint
|
||||
self.sc_auth_url = build_subcloud_endpoint(
|
||||
self.management_ip, 'keystone')
|
||||
LOG.debug(f"Built sc_auth_url {self.sc_auth_url} for subcloud "
|
||||
f"{self.subcloud_name}")
|
||||
self.sc_auth_url = build_subcloud_endpoint(self.management_ip, "keystone")
|
||||
LOG.debug(
|
||||
f"Built sc_auth_url {self.sc_auth_url} for subcloud "
|
||||
f"{self.subcloud_name}"
|
||||
)
|
||||
|
||||
if self.endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST:
|
||||
config = cfg.CONF.endpoint_cache
|
||||
@@ -202,7 +205,8 @@ class SyncThread(object):
|
||||
config.password,
|
||||
config.project_name,
|
||||
config.project_domain_name,
|
||||
timeout=60)
|
||||
timeout=60,
|
||||
)
|
||||
elif self.endpoint_type in dccommon_consts.ENDPOINT_TYPES_LIST_OS:
|
||||
config = cfg.CONF.openstack_cache
|
||||
self.sc_admin_session = OptimizedEndpointCache.get_admin_session(
|
||||
@@ -212,7 +216,8 @@ class SyncThread(object):
|
||||
config.admin_password,
|
||||
config.admin_tenant,
|
||||
config.admin_project_domain_name,
|
||||
timeout=60)
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
def initial_sync(self):
|
||||
# Return True to indicate initial sync success
|
||||
@@ -225,15 +230,19 @@ class SyncThread(object):
|
||||
def get_db_subcloud_resource(self, rsrc_id):
|
||||
try:
|
||||
subcloud = Subcloud.get_by_name(self.ctxt, self.subcloud_name)
|
||||
subcloud_rsrc = \
|
||||
subcloud_resource.SubcloudResource. \
|
||||
get_by_resource_and_subcloud(
|
||||
self.ctxt, rsrc_id, subcloud.id) # pylint: disable=E1101
|
||||
subcloud_rsrc = (
|
||||
subcloud_resource.SubcloudResource.get_by_resource_and_subcloud(
|
||||
self.ctxt, rsrc_id, subcloud.id
|
||||
)
|
||||
) # pylint: disable=E1101
|
||||
return subcloud_rsrc
|
||||
except exceptions.SubcloudResourceNotFound:
|
||||
LOG.info("{} not found in subcloud {} resource table".format(
|
||||
rsrc_id, subcloud.id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} not found in subcloud {} resource table".format(
|
||||
rsrc_id, subcloud.id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return None
|
||||
|
||||
def persist_db_subcloud_resource(self, db_rsrc_id, subcloud_rsrc_id):
|
||||
@@ -248,9 +257,11 @@ class SyncThread(object):
|
||||
if not subcloud_rsrc:
|
||||
subcloud = Subcloud.get_by_name(self.ctxt, self.subcloud_name)
|
||||
subcloud_rsrc = subcloud_resource.SubcloudResource(
|
||||
self.ctxt, subcloud_resource_id=subcloud_rsrc_id,
|
||||
self.ctxt,
|
||||
subcloud_resource_id=subcloud_rsrc_id,
|
||||
resource_id=db_rsrc_id,
|
||||
subcloud_id=subcloud.id) # pylint: disable=E1101
|
||||
subcloud_id=subcloud.id,
|
||||
) # pylint: disable=E1101
|
||||
# There is no race condition for creation of
|
||||
# subcloud_resource as it is always done from the same thread.
|
||||
subcloud_rsrc.create()
|
||||
@@ -258,42 +269,55 @@ class SyncThread(object):
|
||||
# May be the resource was manually deleted from the subcloud.
|
||||
# So, update the dcorch DB with the new resource id from subcloud.
|
||||
subcloud_rsrc.subcloud_resource_id = subcloud_rsrc_id
|
||||
LOG.info("Updating {}:{} [{}]".format(db_rsrc_id,
|
||||
subcloud_rsrc.subcloud_resource_id, subcloud_rsrc_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Updating {}:{} [{}]".format(
|
||||
db_rsrc_id, subcloud_rsrc.subcloud_resource_id, subcloud_rsrc_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc.save()
|
||||
else:
|
||||
LOG.info("subcloud_rsrc {}:{} [{}] is up-to-date"
|
||||
.format(db_rsrc_id, subcloud_rsrc.subcloud_resource_id,
|
||||
subcloud_rsrc_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"subcloud_rsrc {}:{} [{}] is up-to-date".format(
|
||||
db_rsrc_id, subcloud_rsrc.subcloud_resource_id, subcloud_rsrc_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return subcloud_rsrc.subcloud_resource_id
|
||||
|
||||
def sync_resource(self, sync_request):
|
||||
rsrc = resource.Resource.get_by_id(self.ctxt,
|
||||
sync_request.orch_job.resource_id)
|
||||
rsrc = resource.Resource.get_by_id(self.ctxt, sync_request.orch_job.resource_id)
|
||||
# pylint: disable=E1101
|
||||
handler = self.sync_handler_map[rsrc.resource_type]
|
||||
LOG.info("{} Invoking {} for {} [{}]".format(
|
||||
self.engine_id, handler.__name__, rsrc.resource_type,
|
||||
sync_request.orch_job.operation_type), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} Invoking {} for {} [{}]".format(
|
||||
self.engine_id,
|
||||
handler.__name__,
|
||||
rsrc.resource_type,
|
||||
sync_request.orch_job.operation_type,
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
handler(sync_request, rsrc)
|
||||
|
||||
def set_sync_status(self, sync_status, alarmable=True):
|
||||
# Only report sync_status when managed
|
||||
subcloud_managed = self.is_subcloud_managed()
|
||||
if not subcloud_managed:
|
||||
LOG.debug("set_sync_status: skip update sync update for unmanaged "
|
||||
"subcloud {}".format(
|
||||
self.subcloud_name))
|
||||
LOG.debug(
|
||||
"set_sync_status: skip update sync update for unmanaged "
|
||||
"subcloud {}".format(self.subcloud_name)
|
||||
)
|
||||
return
|
||||
|
||||
subcloud_sync = db_api.subcloud_sync_get(self.ctxt, self.subcloud_name,
|
||||
self.endpoint_type)
|
||||
subcloud_sync = db_api.subcloud_sync_get(
|
||||
self.ctxt, self.subcloud_name, self.endpoint_type
|
||||
)
|
||||
|
||||
if subcloud_sync.sync_status_report_time:
|
||||
delta = timeutils.delta_seconds(
|
||||
subcloud_sync.sync_status_report_time, timeutils.utcnow())
|
||||
subcloud_sync.sync_status_report_time, timeutils.utcnow()
|
||||
)
|
||||
if delta < 3600:
|
||||
if subcloud_sync.sync_status_reported == sync_status:
|
||||
LOG.debug(
|
||||
@@ -305,9 +329,12 @@ class SyncThread(object):
|
||||
)
|
||||
return
|
||||
|
||||
LOG.info("{}: set_sync_status {}, alarmable: {}".format(
|
||||
self.subcloud_name, sync_status, alarmable),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{}: set_sync_status {}, alarmable: {}".format(
|
||||
self.subcloud_name, sync_status, alarmable
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
try:
|
||||
# This block is required to get the real subcloud name
|
||||
@@ -321,40 +348,53 @@ class SyncThread(object):
|
||||
|
||||
# Get the subcloud name from dcmanager database supplying
|
||||
# the dcorch region name
|
||||
subcloud_name = self.dcmanager_rpc_client \
|
||||
.get_subcloud_name_by_region_name(self.ctxt,
|
||||
dcorch_subcloud_region)
|
||||
subcloud_name = self.dcmanager_rpc_client.get_subcloud_name_by_region_name(
|
||||
self.ctxt, dcorch_subcloud_region
|
||||
)
|
||||
|
||||
# Updates the endpoint status supplying the subcloud name and
|
||||
# the region name
|
||||
self.dcmanager_state_rpc_client.update_subcloud_endpoint_status(
|
||||
self.ctxt, subcloud_name, dcorch_subcloud_region,
|
||||
self.endpoint_type, sync_status,
|
||||
alarmable=alarmable)
|
||||
self.ctxt,
|
||||
subcloud_name,
|
||||
dcorch_subcloud_region,
|
||||
self.endpoint_type,
|
||||
sync_status,
|
||||
alarmable=alarmable,
|
||||
)
|
||||
|
||||
db_api.subcloud_sync_update(
|
||||
self.ctxt, dcorch_subcloud_region, self.endpoint_type,
|
||||
values={'sync_status_reported': sync_status,
|
||||
'sync_status_report_time': timeutils.utcnow()})
|
||||
self.ctxt,
|
||||
dcorch_subcloud_region,
|
||||
self.endpoint_type,
|
||||
values={
|
||||
"sync_status_reported": sync_status,
|
||||
"sync_status_report_time": timeutils.utcnow(),
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def sync(self, engine_id):
|
||||
LOG.debug("{}: starting sync routine".format(self.subcloud_name),
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"{}: starting sync routine".format(self.subcloud_name), extra=self.log_extra
|
||||
)
|
||||
region_name = self.subcloud_name
|
||||
sync_requests = []
|
||||
|
||||
sync_requests = orchrequest.OrchRequestList.get_by_attrs(
|
||||
self.ctxt, self.endpoint_type,
|
||||
self.ctxt,
|
||||
self.endpoint_type,
|
||||
target_region_name=region_name,
|
||||
states=self.PENDING_SYNC_REQUEST_STATES)
|
||||
states=self.PENDING_SYNC_REQUEST_STATES,
|
||||
)
|
||||
|
||||
# Early exit in case there are no pending sync requests
|
||||
if not sync_requests:
|
||||
LOG.debug("Sync resources done for subcloud - "
|
||||
"no sync requests",
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Sync resources done for subcloud - " "no sync requests",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
return
|
||||
|
||||
@@ -391,14 +431,16 @@ class SyncThread(object):
|
||||
sync_status_start = dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
|
||||
if not actual_sync_requests:
|
||||
LOG.info("Sync resources done for subcloud - "
|
||||
"no valid sync requests",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Sync resources done for subcloud - " "no valid sync requests",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
elif not self.is_subcloud_enabled():
|
||||
LOG.info("Sync resources done for subcloud - "
|
||||
"subcloud is disabled",
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Sync resources done for subcloud - " "subcloud is disabled",
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
# Subcloud is enabled and there are pending sync requests, so
|
||||
@@ -431,8 +473,7 @@ class SyncThread(object):
|
||||
# completed for tracking/debugging purpose
|
||||
# and tag it for purge when its deleted
|
||||
# time exceeds the data retention period.
|
||||
request.state = \
|
||||
consts.ORCH_REQUEST_STATE_COMPLETED
|
||||
request.state = consts.ORCH_REQUEST_STATE_COMPLETED
|
||||
request.deleted = 1
|
||||
request.deleted_at = timeutils.utcnow()
|
||||
request.save()
|
||||
@@ -456,19 +497,16 @@ class SyncThread(object):
|
||||
# todo: raise "unable to sync this
|
||||
# subcloud/endpoint" alarm with fmapi
|
||||
request.try_count += 1
|
||||
request.state = \
|
||||
consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.state = consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.save()
|
||||
retry_count += 1
|
||||
# we'll retry
|
||||
except exceptions.SyncRequestFailed:
|
||||
request.state = \
|
||||
consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.state = consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.save()
|
||||
retry_count = self.MAX_RETRY
|
||||
except exceptions.SyncRequestAbortedBySystem:
|
||||
request.state = \
|
||||
consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.state = consts.ORCH_REQUEST_STATE_FAILED
|
||||
request.save()
|
||||
retry_count = self.MAX_RETRY
|
||||
request_aborted = True
|
||||
@@ -481,66 +519,90 @@ class SyncThread(object):
|
||||
# Endpoint not reachable, throw away all the sync requests.
|
||||
LOG.info(
|
||||
"EndpointNotReachable, {} sync requests pending".format(
|
||||
len(actual_sync_requests)), extra=self.log_extra)
|
||||
len(actual_sync_requests)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# del sync_requests[:] #This fails due to:
|
||||
# 'OrchRequestList' object does not support item deletion
|
||||
|
||||
sync_requests = orchrequest.OrchRequestList.get_by_attrs(
|
||||
self.ctxt, self.endpoint_type,
|
||||
self.ctxt,
|
||||
self.endpoint_type,
|
||||
target_region_name=region_name,
|
||||
states=self.PENDING_SYNC_REQUEST_STATES)
|
||||
states=self.PENDING_SYNC_REQUEST_STATES,
|
||||
)
|
||||
|
||||
if (sync_requests and
|
||||
sync_status_start != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC):
|
||||
if (
|
||||
sync_requests
|
||||
and sync_status_start != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
):
|
||||
self.set_sync_status(dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
LOG.info(
|
||||
"End of resource sync out-of-sync. {} sync request(s)".format(
|
||||
len(sync_requests)), extra=self.log_extra)
|
||||
len(sync_requests)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
elif sync_requests and request_aborted:
|
||||
if sync_status_start != dccommon_consts.SYNC_STATUS_OUT_OF_SYNC:
|
||||
self.set_sync_status(dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
LOG.info(
|
||||
"End of resource sync out-of-sync. {} sync request(s): "
|
||||
"request_aborted".format(len(sync_requests)),
|
||||
extra=self.log_extra)
|
||||
extra=self.log_extra,
|
||||
)
|
||||
elif sync_status_start != dccommon_consts.SYNC_STATUS_IN_SYNC:
|
||||
self.set_sync_status(dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
LOG.info(
|
||||
"End of resource sync in-sync. {} sync request(s)".format(
|
||||
len(sync_requests)), extra=self.log_extra)
|
||||
len(sync_requests)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
LOG.info("Sync resources done for subcloud - "
|
||||
"synced {} request(s)".format(len(actual_sync_requests)),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Sync resources done for subcloud - "
|
||||
"synced {} request(s)".format(len(actual_sync_requests)),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
def run_sync_audit(self, engine_id=None):
|
||||
if self.endpoint_type in cfg.CONF.disable_audit_endpoints:
|
||||
LOG.warn("Audit disabled!", extra=self.log_extra)
|
||||
return
|
||||
LOG.debug("Engine id={}: sync_audit started".format(engine_id),
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Engine id={}: sync_audit started".format(engine_id), extra=self.log_extra
|
||||
)
|
||||
self.sync_audit(engine_id)
|
||||
|
||||
def sync_audit(self, engine_id):
|
||||
LOG.debug("Engine id={}: starting sync audit".format(engine_id),
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Engine id={}: starting sync audit".format(engine_id), extra=self.log_extra
|
||||
)
|
||||
|
||||
most_recent_failed_request = \
|
||||
most_recent_failed_request = (
|
||||
orchrequest.OrchRequest.get_most_recent_failed_request(self.ctxt)
|
||||
)
|
||||
|
||||
if most_recent_failed_request:
|
||||
LOG.debug('Most recent failed request id=%s, timestamp=%s',
|
||||
most_recent_failed_request.id,
|
||||
most_recent_failed_request.updated_at)
|
||||
LOG.debug(
|
||||
"Most recent failed request id=%s, timestamp=%s",
|
||||
most_recent_failed_request.id,
|
||||
most_recent_failed_request.updated_at,
|
||||
)
|
||||
else:
|
||||
LOG.debug('There are no failed requests.')
|
||||
LOG.debug("There are no failed requests.")
|
||||
|
||||
total_num_of_audit_jobs = 0
|
||||
for resource_type in self.audit_resources:
|
||||
if not self.is_subcloud_enabled() or self.should_exit():
|
||||
LOG.info("{}: aborting sync audit, as subcloud is disabled"
|
||||
.format(threading.currentThread().getName()),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{}: aborting sync audit, as subcloud is disabled".format(
|
||||
threading.currentThread().getName()
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return
|
||||
|
||||
# Skip resources with outstanding sync requests
|
||||
@@ -551,50 +613,69 @@ class SyncThread(object):
|
||||
consts.ORCH_REQUEST_IN_PROGRESS,
|
||||
]
|
||||
sync_requests = orchrequest.OrchRequestList.get_by_attrs(
|
||||
self.ctxt, self.endpoint_type, resource_type=resource_type,
|
||||
target_region_name=region_name, states=states)
|
||||
abort_resources = [req.orch_job.source_resource_id
|
||||
for req in sync_requests]
|
||||
self.ctxt,
|
||||
self.endpoint_type,
|
||||
resource_type=resource_type,
|
||||
target_region_name=region_name,
|
||||
states=states,
|
||||
)
|
||||
abort_resources = [req.orch_job.source_resource_id for req in sync_requests]
|
||||
if len(sync_requests) > 0:
|
||||
LOG.info("Will not audit {}. {} sync request(s) pending"
|
||||
.format(abort_resources, len(sync_requests)),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Will not audit {}. {} sync request(s) pending".format(
|
||||
abort_resources, len(sync_requests)
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
num_of_audit_jobs = 0
|
||||
try:
|
||||
m_resources, db_resources, sc_resources = \
|
||||
self.get_all_resources(resource_type)
|
||||
m_resources, db_resources, sc_resources = self.get_all_resources(
|
||||
resource_type
|
||||
)
|
||||
|
||||
# todo: delete entries in db_resources with no corresponding
|
||||
# entry in m_resources?
|
||||
|
||||
if sc_resources is None or m_resources is None:
|
||||
return
|
||||
LOG.debug("Audit {}".format(
|
||||
resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.debug("Auditing {}: master={} db={} sc={}".format(
|
||||
resource_type, m_resources, db_resources, sc_resources),
|
||||
extra=self.log_extra)
|
||||
LOG.debug("Audit {}".format(resource_type), extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Auditing {}: master={} db={} sc={}".format(
|
||||
resource_type, m_resources, db_resources, sc_resources
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
num_of_audit_jobs += self.audit_find_missing(
|
||||
resource_type, m_resources, db_resources, sc_resources,
|
||||
abort_resources)
|
||||
resource_type,
|
||||
m_resources,
|
||||
db_resources,
|
||||
sc_resources,
|
||||
abort_resources,
|
||||
)
|
||||
num_of_audit_jobs += self.audit_find_extra(
|
||||
resource_type, m_resources, db_resources, sc_resources,
|
||||
abort_resources)
|
||||
resource_type,
|
||||
m_resources,
|
||||
db_resources,
|
||||
sc_resources,
|
||||
abort_resources,
|
||||
)
|
||||
except Exception:
|
||||
LOG.exception("Unexpected error while auditing %s",
|
||||
resource_type)
|
||||
LOG.exception("Unexpected error while auditing %s", resource_type)
|
||||
|
||||
# Extra resources in subcloud are not impacted by the audit.
|
||||
|
||||
if not num_of_audit_jobs:
|
||||
LOG.debug("Clean audit run for {}".format(resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Clean audit run for {}".format(resource_type), extra=self.log_extra
|
||||
)
|
||||
else:
|
||||
LOG.info("{} num_of_audit_jobs for {}".
|
||||
format(num_of_audit_jobs, resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} num_of_audit_jobs for {}".format(
|
||||
num_of_audit_jobs, resource_type
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
|
||||
total_num_of_audit_jobs += num_of_audit_jobs
|
||||
|
||||
@@ -602,7 +683,8 @@ class SyncThread(object):
|
||||
# Soft delete all failed requests in the previous sync audit.
|
||||
try:
|
||||
orchrequest.OrchRequest.delete_previous_failed_requests(
|
||||
self.ctxt, most_recent_failed_request.updated_at)
|
||||
self.ctxt, most_recent_failed_request.updated_at
|
||||
)
|
||||
except Exception:
|
||||
# shouldn't get here
|
||||
LOG.exception("Unexpected error!")
|
||||
@@ -614,13 +696,17 @@ class SyncThread(object):
|
||||
else:
|
||||
# set sync_request for this subcloud/endpoint
|
||||
db_api.subcloud_sync_update(
|
||||
self.ctxt, self.subcloud_name, self.endpoint_type,
|
||||
values={'sync_request': consts.SYNC_STATUS_REQUESTED})
|
||||
self.ctxt,
|
||||
self.subcloud_name,
|
||||
self.endpoint_type,
|
||||
values={"sync_request": consts.SYNC_STATUS_REQUESTED},
|
||||
)
|
||||
|
||||
LOG.debug("{}: done sync audit".format(
|
||||
threading.currentThread().getName()), extra=self.log_extra)
|
||||
SyncThread.set_sync_request(
|
||||
self.ctxt, self.subcloud_name, self.endpoint_type)
|
||||
LOG.debug(
|
||||
"{}: done sync audit".format(threading.currentThread().getName()),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
SyncThread.set_sync_request(self.ctxt, self.subcloud_name, self.endpoint_type)
|
||||
self.post_audit()
|
||||
|
||||
def post_audit(self):
|
||||
@@ -634,9 +720,9 @@ class SyncThread(object):
|
||||
LOG.debug("Reset the cached master resources.")
|
||||
SyncThread.master_resources_dict = collections.defaultdict(dict)
|
||||
|
||||
def audit_find_missing(self, resource_type, m_resources,
|
||||
db_resources, sc_resources,
|
||||
abort_resources):
|
||||
def audit_find_missing(
|
||||
self, resource_type, m_resources, db_resources, sc_resources, abort_resources
|
||||
):
|
||||
"""Find missing resources in subcloud.
|
||||
|
||||
- Input param db_resources is modified in this routine
|
||||
@@ -649,8 +735,10 @@ class SyncThread(object):
|
||||
for m_r in m_resources:
|
||||
master_id = self.get_resource_id(resource_type, m_r)
|
||||
if master_id in abort_resources:
|
||||
LOG.info("audit_find_missing: Aborting audit for {}"
|
||||
.format(master_id), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_find_missing: Aborting audit for {}".format(master_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
# There are pending jobs for this resource, abort audit
|
||||
continue
|
||||
@@ -684,77 +772,95 @@ class SyncThread(object):
|
||||
db_sc_resource = self.get_db_subcloud_resource(m_rsrc_db.id)
|
||||
if db_sc_resource:
|
||||
if not db_sc_resource.is_managed():
|
||||
LOG.info("Resource {} is not managed"
|
||||
.format(master_id), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Resource {} is not managed".format(master_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
continue
|
||||
sc_rsrc_present = False
|
||||
for sc_r in sc_resources:
|
||||
sc_id = self.get_resource_id(resource_type, sc_r)
|
||||
if sc_id == db_sc_resource.subcloud_resource_id:
|
||||
if self.same_resource(resource_type,
|
||||
m_r_updated, sc_r):
|
||||
LOG.debug("Resource type {} {} is in-sync"
|
||||
.format(resource_type, master_id),
|
||||
extra=self.log_extra)
|
||||
if self.same_resource(resource_type, m_r_updated, sc_r):
|
||||
LOG.debug(
|
||||
"Resource type {} {} is in-sync".format(
|
||||
resource_type, master_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
num_of_audit_jobs += self.audit_dependants(
|
||||
resource_type, m_r, sc_r)
|
||||
resource_type, m_r, sc_r
|
||||
)
|
||||
sc_rsrc_present = True
|
||||
break
|
||||
if not sc_rsrc_present:
|
||||
LOG.info(
|
||||
"Subcloud resource {} found in master cloud & DB, "
|
||||
"but the exact same resource not found in subcloud"
|
||||
.format(db_sc_resource.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
"but the exact same resource not found in subcloud".format(
|
||||
db_sc_resource.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# Subcloud resource is present in DB, but the check
|
||||
# for same_resource() was negative. Either the resource
|
||||
# disappeared from subcloud or the resource details
|
||||
# are different from that of master cloud. Let the
|
||||
# resource implementation decide on the audit action.
|
||||
missing_resource = self.audit_discrepancy(
|
||||
resource_type, m_r, sc_resources)
|
||||
resource_type, m_r, sc_resources
|
||||
)
|
||||
else:
|
||||
LOG.info("Subcloud res {} not found in DB, will create"
|
||||
.format(master_id), extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Subcloud res {} not found in DB, will create".format(
|
||||
master_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# Check and see if there are any subcloud resources that
|
||||
# match the master resource, and if so set up mappings.
|
||||
# This returns true if it finds a match.
|
||||
if self.map_subcloud_resource(resource_type, m_r_updated,
|
||||
m_rsrc_db, sc_resources):
|
||||
if self.map_subcloud_resource(
|
||||
resource_type, m_r_updated, m_rsrc_db, sc_resources
|
||||
):
|
||||
continue
|
||||
missing_resource = True
|
||||
|
||||
else: # master_resource not in resource DB
|
||||
LOG.info("{} not found in DB, will create it"
|
||||
.format(master_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"{} not found in DB, will create it".format(master_id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# Check and see if there are any subcloud resources that
|
||||
# match the master resource, and if so set up mappings.
|
||||
# This returns true if it finds a match.
|
||||
# This is for the case where the resource is not even in dcorch
|
||||
# resource DB (ie, resource has not been tracked by dcorch yet)
|
||||
if self.map_subcloud_resource(resource_type, m_r,
|
||||
m_rsrc_db, sc_resources):
|
||||
if self.map_subcloud_resource(
|
||||
resource_type, m_r, m_rsrc_db, sc_resources
|
||||
):
|
||||
continue
|
||||
missing_resource = True
|
||||
|
||||
if missing_resource:
|
||||
# Resource is missing from subcloud, take action
|
||||
num_of_audit_jobs += self.audit_action(
|
||||
resource_type, AUDIT_RESOURCE_MISSING, m_r)
|
||||
resource_type, AUDIT_RESOURCE_MISSING, m_r
|
||||
)
|
||||
|
||||
# As the subcloud resource is missing, invoke
|
||||
# the hook for dependants with no subcloud resource.
|
||||
# Resource implementation should handle this.
|
||||
num_of_audit_jobs += self.audit_dependants(
|
||||
resource_type, m_r, None)
|
||||
if (num_of_audit_jobs != 0):
|
||||
LOG.info("audit_find_missing {} num_of_audit_jobs".
|
||||
format(num_of_audit_jobs), extra=self.log_extra)
|
||||
num_of_audit_jobs += self.audit_dependants(resource_type, m_r, None)
|
||||
if num_of_audit_jobs != 0:
|
||||
LOG.info(
|
||||
"audit_find_missing {} num_of_audit_jobs".format(num_of_audit_jobs),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
return num_of_audit_jobs
|
||||
|
||||
def audit_find_extra(self, resource_type, m_resources,
|
||||
db_resources, sc_resources, abort_resources):
|
||||
def audit_find_extra(
|
||||
self, resource_type, m_resources, db_resources, sc_resources, abort_resources
|
||||
):
|
||||
"""Find extra resources in subcloud.
|
||||
|
||||
- Input param db_resources is expected to be a
|
||||
@@ -768,21 +874,29 @@ class SyncThread(object):
|
||||
for db_resource in db_resources:
|
||||
if db_resource.master_id:
|
||||
if db_resource.master_id in abort_resources:
|
||||
LOG.info("audit_find_extra: Aborting audit for {}"
|
||||
.format(db_resource.master_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_find_extra: Aborting audit for {}".format(
|
||||
db_resource.master_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
# There are pending jobs for this resource, abort audit
|
||||
continue
|
||||
|
||||
LOG.debug("Extra resource ({}) in DB".format(db_resource.id),
|
||||
extra=self.log_extra)
|
||||
LOG.debug(
|
||||
"Extra resource ({}) in DB".format(db_resource.id),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
subcloud_rsrc = self.get_db_subcloud_resource(db_resource.id)
|
||||
if subcloud_rsrc:
|
||||
if not subcloud_rsrc.is_managed():
|
||||
LOG.info("Resource {} is not managed"
|
||||
.format(subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Resource {} is not managed".format(
|
||||
subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
continue
|
||||
|
||||
# check if the resource exists in subcloud, no need to
|
||||
@@ -793,20 +907,24 @@ class SyncThread(object):
|
||||
# after an audit (not through api-proxy), then user deletes
|
||||
# that resource manually in the subcloud before the
|
||||
# next audit.
|
||||
if not self.resource_exists_in_subcloud(subcloud_rsrc,
|
||||
sc_resources):
|
||||
if not self.resource_exists_in_subcloud(
|
||||
subcloud_rsrc, sc_resources
|
||||
):
|
||||
continue
|
||||
|
||||
LOG.info("Resource ({}) and subcloud resource ({}) "
|
||||
"not in sync with master cloud"
|
||||
.format(db_resource.master_id,
|
||||
subcloud_rsrc.subcloud_resource_id),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Resource ({}) and subcloud resource ({}) "
|
||||
"not in sync with master cloud".format(
|
||||
db_resource.master_id, subcloud_rsrc.subcloud_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
# There is extra resource in the subcloud, take action.
|
||||
# Note that the resource is in dcorch DB, but not
|
||||
# actually present in the master cloud.
|
||||
num_of_audit_jobs += self.audit_action(
|
||||
resource_type, AUDIT_RESOURCE_EXTRA, db_resource)
|
||||
resource_type, AUDIT_RESOURCE_EXTRA, db_resource
|
||||
)
|
||||
else:
|
||||
# Resource is present in resource table, but not in
|
||||
# subcloud_resource table. We have also established that
|
||||
@@ -819,24 +937,38 @@ class SyncThread(object):
|
||||
|
||||
return num_of_audit_jobs
|
||||
|
||||
def schedule_work(self, endpoint_type, resource_type,
|
||||
source_resource_id, operation_type,
|
||||
resource_info=None):
|
||||
LOG.info("Scheduling {} work for {}/{}".format(
|
||||
operation_type, resource_type, source_resource_id),
|
||||
extra=self.log_extra)
|
||||
def schedule_work(
|
||||
self,
|
||||
endpoint_type,
|
||||
resource_type,
|
||||
source_resource_id,
|
||||
operation_type,
|
||||
resource_info=None,
|
||||
):
|
||||
LOG.info(
|
||||
"Scheduling {} work for {}/{}".format(
|
||||
operation_type, resource_type, source_resource_id
|
||||
),
|
||||
extra=self.log_extra,
|
||||
)
|
||||
try:
|
||||
subcloud = Subcloud.get_by_name(self.ctxt, self.subcloud_name)
|
||||
utils.enqueue_work(
|
||||
self.ctxt, endpoint_type, resource_type,
|
||||
source_resource_id, operation_type, resource_info,
|
||||
subcloud=subcloud)
|
||||
self.ctxt,
|
||||
endpoint_type,
|
||||
resource_type,
|
||||
source_resource_id,
|
||||
operation_type,
|
||||
resource_info,
|
||||
subcloud=subcloud,
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.info("Exception in schedule_work: {}".format(str(e)),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"Exception in schedule_work: {}".format(str(e)), extra=self.log_extra
|
||||
)
|
||||
|
||||
def get_resource_id(self, resource_type, resource):
|
||||
if hasattr(resource, 'master_id'):
|
||||
if hasattr(resource, "master_id"):
|
||||
# If resource from DB, return master resource id
|
||||
# from master cloud
|
||||
return resource.master_id
|
||||
@@ -881,8 +1013,7 @@ class SyncThread(object):
|
||||
def has_same_ids(self, resource_type, m_resource, sc_resource):
|
||||
return False
|
||||
|
||||
def map_subcloud_resource(self, resource_type, m_r, m_rsrc_db,
|
||||
sc_resources):
|
||||
def map_subcloud_resource(self, resource_type, m_r, m_rsrc_db, sc_resources):
|
||||
# Child classes can override this function to map an existing subcloud
|
||||
# resource to an existing master resource. If a mapping is created
|
||||
# the function should return True.
|
||||
@@ -909,9 +1040,9 @@ class SyncThread(object):
|
||||
return True
|
||||
|
||||
def audit_action(self, resource_type, finding, resource):
|
||||
LOG.info("audit_action: {}/{}"
|
||||
.format(finding, resource_type),
|
||||
extra=self.log_extra)
|
||||
LOG.info(
|
||||
"audit_action: {}/{}".format(finding, resource_type), extra=self.log_extra
|
||||
)
|
||||
# Default actions are create & delete. Can be overridden
|
||||
# in resource implementation
|
||||
num_of_audit_jobs = 0
|
||||
@@ -920,19 +1051,24 @@ class SyncThread(object):
|
||||
if finding == AUDIT_RESOURCE_MISSING:
|
||||
# default action is create for a 'missing' resource
|
||||
self.schedule_work(
|
||||
self.endpoint_type, resource_type,
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_CREATE,
|
||||
self.get_resource_info(
|
||||
resource_type, resource,
|
||||
consts.OPERATION_TYPE_CREATE))
|
||||
resource_type, resource, consts.OPERATION_TYPE_CREATE
|
||||
),
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
elif finding == AUDIT_RESOURCE_EXTRA:
|
||||
# default action is delete for an 'extra_resource'
|
||||
# resource passed in is db_resource (resource in dcorch DB)
|
||||
self.schedule_work(self.endpoint_type, resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_DELETE)
|
||||
self.schedule_work(
|
||||
self.endpoint_type,
|
||||
resource_type,
|
||||
resource_id,
|
||||
consts.OPERATION_TYPE_DELETE,
|
||||
)
|
||||
num_of_audit_jobs += 1
|
||||
return num_of_audit_jobs
|
||||
|
||||
@@ -947,5 +1083,8 @@ class SyncThread(object):
|
||||
@classmethod
|
||||
def set_sync_request(cls, ctxt, subcloud_name, endpoint_type):
|
||||
db_api.subcloud_sync_update(
|
||||
ctxt, subcloud_name, endpoint_type,
|
||||
values={'sync_request': consts.SYNC_STATUS_REQUESTED})
|
||||
ctxt,
|
||||
subcloud_name,
|
||||
endpoint_type,
|
||||
values={"sync_request": consts.SYNC_STATUS_REQUESTED},
|
||||
)
|
||||
|
@@ -46,7 +46,7 @@ class BaseTestIdentitySyncThread(OrchestratorTestCase, mixins.BaseMixin):
|
||||
)
|
||||
|
||||
self.method = lambda *args: None
|
||||
self.resource_name = ''
|
||||
self.resource_name = ""
|
||||
self.resource_ref = None
|
||||
self.resource_ref_name = None
|
||||
self.resource_add = lambda: None
|
||||
@@ -57,7 +57,7 @@ class BaseTestIdentitySyncThread(OrchestratorTestCase, mixins.BaseMixin):
|
||||
|
||||
def _create_request_and_resource_mocks(self):
|
||||
self.request = mock.MagicMock()
|
||||
self.request.orch_job.resource_info = f'{{\"id\": {RESOURCE_ID}}}'
|
||||
self.request.orch_job.resource_info = f'{{"id": {RESOURCE_ID}}}'
|
||||
self.request.orch_job.source_resource_id = SOURCE_RESOURCE_ID
|
||||
|
||||
self.rsrc = mock.MagicMock
|
||||
@@ -66,17 +66,19 @@ class BaseTestIdentitySyncThread(OrchestratorTestCase, mixins.BaseMixin):
|
||||
|
||||
def _create_subcloud_and_subcloud_resource(self):
|
||||
values = {
|
||||
'software_version': '10.04',
|
||||
'management_state': dccommon_consts.MANAGEMENT_MANAGED,
|
||||
'availability_status': dccommon_consts.AVAILABILITY_ONLINE,
|
||||
'initial_sync_state': '',
|
||||
'capabilities': {},
|
||||
'management_ip': '192.168.0.1'
|
||||
"software_version": "10.04",
|
||||
"management_state": dccommon_consts.MANAGEMENT_MANAGED,
|
||||
"availability_status": dccommon_consts.AVAILABILITY_ONLINE,
|
||||
"initial_sync_state": "",
|
||||
"capabilities": {},
|
||||
"management_ip": "192.168.0.1",
|
||||
}
|
||||
self.subcloud = db_api.subcloud_create(self.ctx, 'subcloud', values)
|
||||
self.subcloud = db_api.subcloud_create(self.ctx, "subcloud", values)
|
||||
self.subcloud_resource = subcloud_resource.SubcloudResource(
|
||||
self.ctx, subcloud_resource_id=self.rsrc.master_id,
|
||||
resource_id=self.rsrc.id, subcloud_id=self.subcloud.id
|
||||
self.ctx,
|
||||
subcloud_resource_id=self.rsrc.master_id,
|
||||
resource_id=self.rsrc.id,
|
||||
subcloud_id=self.subcloud.id,
|
||||
)
|
||||
self.subcloud_resource.create()
|
||||
|
||||
@@ -123,19 +125,14 @@ class BaseTestIdentitySyncThread(OrchestratorTestCase, mixins.BaseMixin):
|
||||
self.method(self.request, self.rsrc)
|
||||
|
||||
def _execute_and_assert_exception(self, exception):
|
||||
self.assertRaises(
|
||||
exception,
|
||||
self.method,
|
||||
self.request,
|
||||
self.rsrc
|
||||
)
|
||||
self.assertRaises(exception, self.method, self.request, self.rsrc)
|
||||
|
||||
def _assert_log(self, level, message, extra=mock.ANY):
|
||||
if level == 'info':
|
||||
if level == "info":
|
||||
self.log.info.assert_called_with(message, extra=extra)
|
||||
elif level == 'error':
|
||||
elif level == "error":
|
||||
self.log.error.assert_called_with(message, extra=extra)
|
||||
elif level == 'debug':
|
||||
elif level == "debug":
|
||||
self.log.debug.assert_called_with(message, extra=extra)
|
||||
|
||||
|
||||
@@ -145,14 +142,14 @@ class BaseTestIdentitySyncThreadUsers(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'user'
|
||||
self.resource_name = "user"
|
||||
self.resource_ref = {
|
||||
self.resource_name: {'id': RESOURCE_ID},
|
||||
'local_user': {'name': 'fake value'}
|
||||
self.resource_name: {"id": RESOURCE_ID},
|
||||
"local_user": {"name": "fake value"},
|
||||
}
|
||||
self.resource_ref_name = self.resource_ref.get('local_user').get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
identity_user_manager.user_detail
|
||||
self.resource_ref_name = self.resource_ref.get("local_user").get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.identity_user_manager.user_detail
|
||||
|
||||
|
||||
class TestIdentitySyncThreadUsersPost(
|
||||
@@ -164,8 +161,8 @@ class TestIdentitySyncThreadUsersPost(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.post_users
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
identity_user_manager.add_user
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.identity_user_manager.add_user
|
||||
|
||||
|
||||
class TestIdentitySyncThreadUsersPut(
|
||||
@@ -177,8 +174,8 @@ class TestIdentitySyncThreadUsersPut(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.put_users
|
||||
self.resource_update = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
identity_user_manager.update_user
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_update = self.sc_dbs_client.identity_user_manager.update_user
|
||||
|
||||
|
||||
class TestIdentitySyncThreadUsersPatch(
|
||||
@@ -191,8 +188,9 @@ class TestIdentitySyncThreadUsersPatch(
|
||||
|
||||
self.method = self.identity_sync_thread.patch_users
|
||||
self.request.orch_job.resource_info = f'{{"{self.resource_name}": {{}}}}'
|
||||
self.resource_keystone_update = self.identity_sync_thread.\
|
||||
get_sc_ks_client().users.update
|
||||
self.resource_keystone_update = (
|
||||
self.identity_sync_thread.get_sc_ks_client().users.update
|
||||
)
|
||||
|
||||
|
||||
class TestIdentitySyncThreadUsersDelete(
|
||||
@@ -204,8 +202,9 @@ class TestIdentitySyncThreadUsersDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_users
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_ks_client().users.delete
|
||||
self.resource_keystone_delete = (
|
||||
self.identity_sync_thread.get_sc_ks_client().users.delete
|
||||
)
|
||||
|
||||
|
||||
class BaseTestIdentitySyncThreadGroups(BaseTestIdentitySyncThread):
|
||||
@@ -214,13 +213,13 @@ class BaseTestIdentitySyncThreadGroups(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'group'
|
||||
self.resource_ref = \
|
||||
{self.resource_name: {'id': RESOURCE_ID, 'name': 'fake value'}}
|
||||
self.resource_ref_name = \
|
||||
self.resource_ref.get(self.resource_name).get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
identity_group_manager.group_detail
|
||||
self.resource_name = "group"
|
||||
self.resource_ref = {
|
||||
self.resource_name: {"id": RESOURCE_ID, "name": "fake value"}
|
||||
}
|
||||
self.resource_ref_name = self.resource_ref.get(self.resource_name).get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.identity_group_manager.group_detail
|
||||
|
||||
|
||||
class TestIdentitySyncThreadGroupsPost(
|
||||
@@ -232,8 +231,8 @@ class TestIdentitySyncThreadGroupsPost(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.post_groups
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
identity_group_manager.add_group
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.identity_group_manager.add_group
|
||||
|
||||
|
||||
class TestIdentitySyncThreadGroupsPut(
|
||||
@@ -245,8 +244,8 @@ class TestIdentitySyncThreadGroupsPut(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.put_groups
|
||||
self.resource_update = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
identity_group_manager.update_group
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_update = self.sc_dbs_client.identity_group_manager.update_group
|
||||
|
||||
|
||||
class TestIdentitySyncThreadGroupsPatch(
|
||||
@@ -259,8 +258,9 @@ class TestIdentitySyncThreadGroupsPatch(
|
||||
|
||||
self.method = self.identity_sync_thread.patch_groups
|
||||
self.request.orch_job.resource_info = f'{{"{self.resource_name}": {{}}}}'
|
||||
self.resource_keystone_update = self.identity_sync_thread.\
|
||||
get_sc_ks_client().groups.update
|
||||
self.resource_keystone_update = (
|
||||
self.identity_sync_thread.get_sc_ks_client().groups.update
|
||||
)
|
||||
|
||||
|
||||
class TestIdentitySyncThreadGroupsDelete(
|
||||
@@ -272,8 +272,9 @@ class TestIdentitySyncThreadGroupsDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_groups
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_ks_client().groups.delete
|
||||
self.resource_keystone_delete = (
|
||||
self.identity_sync_thread.get_sc_ks_client().groups.delete
|
||||
)
|
||||
|
||||
|
||||
class BaseTestIdentitySyncThreadProjects(BaseTestIdentitySyncThread):
|
||||
@@ -282,14 +283,13 @@ class BaseTestIdentitySyncThreadProjects(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'project'
|
||||
self.resource_name = "project"
|
||||
self.resource_ref = {
|
||||
self.resource_name: {'id': RESOURCE_ID, 'name': 'fake value'}
|
||||
self.resource_name: {"id": RESOURCE_ID, "name": "fake value"}
|
||||
}
|
||||
self.resource_ref_name = \
|
||||
self.resource_ref.get(self.resource_name).get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
project_manager.project_detail
|
||||
self.resource_ref_name = self.resource_ref.get(self.resource_name).get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.project_manager.project_detail
|
||||
|
||||
|
||||
class TestIdentitySyncThreadProjectsPost(
|
||||
@@ -301,8 +301,8 @@ class TestIdentitySyncThreadProjectsPost(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.post_projects
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
project_manager.add_project
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.project_manager.add_project
|
||||
|
||||
|
||||
class TestIdentitySyncThreadProjectsPut(
|
||||
@@ -314,8 +314,8 @@ class TestIdentitySyncThreadProjectsPut(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.put_projects
|
||||
self.resource_update = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
project_manager.update_project
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_update = self.sc_dbs_client.project_manager.update_project
|
||||
|
||||
|
||||
class TestIdentitySyncThreadProjectsPatch(
|
||||
@@ -328,8 +328,9 @@ class TestIdentitySyncThreadProjectsPatch(
|
||||
|
||||
self.method = self.identity_sync_thread.patch_projects
|
||||
self.request.orch_job.resource_info = f'{{"{self.resource_name}": {{}}}}'
|
||||
self.resource_keystone_update = self.identity_sync_thread.\
|
||||
get_sc_ks_client().projects.update
|
||||
self.resource_keystone_update = (
|
||||
self.identity_sync_thread.get_sc_ks_client().projects.update
|
||||
)
|
||||
|
||||
|
||||
class TestIdentitySyncThreadProjectsDelete(
|
||||
@@ -341,8 +342,9 @@ class TestIdentitySyncThreadProjectsDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_projects
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_ks_client().projects.delete
|
||||
self.resource_keystone_delete = (
|
||||
self.identity_sync_thread.get_sc_ks_client().projects.delete
|
||||
)
|
||||
|
||||
|
||||
class BaseTestIdentitySyncThreadRoles(BaseTestIdentitySyncThread):
|
||||
@@ -351,14 +353,13 @@ class BaseTestIdentitySyncThreadRoles(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'role'
|
||||
self.resource_name = "role"
|
||||
self.resource_ref = {
|
||||
self.resource_name: {'id': RESOURCE_ID, 'name': 'fake value'}
|
||||
self.resource_name: {"id": RESOURCE_ID, "name": "fake value"}
|
||||
}
|
||||
self.resource_ref_name = \
|
||||
self.resource_ref.get(self.resource_name).get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
role_manager.role_detail
|
||||
self.resource_ref_name = self.resource_ref.get(self.resource_name).get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.role_manager.role_detail
|
||||
|
||||
|
||||
class TestIdentitySyncThreadRolesPost(
|
||||
@@ -370,8 +371,8 @@ class TestIdentitySyncThreadRolesPost(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.post_roles
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
role_manager.add_role
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.role_manager.add_role
|
||||
|
||||
|
||||
class TestIdentitySyncThreadRolesPut(
|
||||
@@ -383,8 +384,8 @@ class TestIdentitySyncThreadRolesPut(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.put_roles
|
||||
self.resource_update = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
role_manager.update_role
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_update = self.sc_dbs_client.role_manager.update_role
|
||||
|
||||
|
||||
class TestIdentitySyncThreadRolesPatch(
|
||||
@@ -397,8 +398,9 @@ class TestIdentitySyncThreadRolesPatch(
|
||||
|
||||
self.method = self.identity_sync_thread.patch_roles
|
||||
self.request.orch_job.resource_info = f'{{"{self.resource_name}": {{}}}}'
|
||||
self.resource_keystone_update = self.identity_sync_thread.\
|
||||
get_sc_ks_client().roles.update
|
||||
self.resource_keystone_update = (
|
||||
self.identity_sync_thread.get_sc_ks_client().roles.update
|
||||
)
|
||||
|
||||
|
||||
class TestIdentitySyncThreadRolesDelete(
|
||||
@@ -410,8 +412,9 @@ class TestIdentitySyncThreadRolesDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_roles
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_ks_client().roles.delete
|
||||
self.resource_keystone_delete = (
|
||||
self.identity_sync_thread.get_sc_ks_client().roles.delete
|
||||
)
|
||||
|
||||
|
||||
class BaseTestIdentitySyncThreadProjectRoleAssignments(BaseTestIdentitySyncThread):
|
||||
@@ -425,7 +428,7 @@ class BaseTestIdentitySyncThreadProjectRoleAssignments(BaseTestIdentitySyncThrea
|
||||
self.role_id = 12
|
||||
self.domain = 13
|
||||
|
||||
self.resource_tags = f'{self.project_id}_{self.actor_id}_{self.role_id}'
|
||||
self.resource_tags = f"{self.project_id}_{self.actor_id}_{self.role_id}"
|
||||
|
||||
|
||||
class TestIdentitySyncThreadProjectRoleAssignmentsPost(
|
||||
@@ -440,14 +443,18 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
|
||||
self.rsrc.master_id = self.resource_tags
|
||||
|
||||
self.mock_sc_role = self._create_mock_object(self.role_id)
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
roles.list.return_value = [self.mock_sc_role]
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
projects.list.return_value = [self._create_mock_object(self.project_id)]
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
domains.list.return_value = [self._create_mock_object(self.project_id)]
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
users.list.return_value = [self._create_mock_object(self.actor_id)]
|
||||
self.identity_sync_thread.get_sc_ks_client().roles.list.return_value = [
|
||||
self.mock_sc_role
|
||||
]
|
||||
self.identity_sync_thread.get_sc_ks_client().projects.list.return_value = [
|
||||
self._create_mock_object(self.project_id)
|
||||
]
|
||||
self.identity_sync_thread.get_sc_ks_client().domains.list.return_value = [
|
||||
self._create_mock_object(self.project_id)
|
||||
]
|
||||
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = [
|
||||
self._create_mock_object(self.actor_id)
|
||||
]
|
||||
|
||||
def _create_mock_object(self, id):
|
||||
mock_object = mock.MagicMock()
|
||||
@@ -460,84 +467,88 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
|
||||
|
||||
self._execute()
|
||||
self._assert_log(
|
||||
'info', f"Created Keystone role assignment {self.rsrc.id}:"
|
||||
f"{self.rsrc.master_id} [{self.rsrc.master_id}]"
|
||||
"info",
|
||||
f"Created Keystone role assignment {self.rsrc.id}:"
|
||||
f"{self.rsrc.master_id} [{self.rsrc.master_id}]",
|
||||
)
|
||||
|
||||
def test_post_succeeds_with_sc_group(self):
|
||||
"""Test post succeeds with sc group"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
users.list.return_value = []
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
groups.list.return_value = [self._create_mock_object(self.actor_id)]
|
||||
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = []
|
||||
self.identity_sync_thread.get_sc_ks_client().groups.list.return_value = [
|
||||
self._create_mock_object(self.actor_id)
|
||||
]
|
||||
|
||||
self._execute()
|
||||
self._assert_log(
|
||||
'info', f"Created Keystone role assignment {self.rsrc.id}:"
|
||||
f"{self.rsrc.master_id} [{self.rsrc.master_id}]"
|
||||
"info",
|
||||
f"Created Keystone role assignment {self.rsrc.id}:"
|
||||
f"{self.rsrc.master_id} [{self.rsrc.master_id}]",
|
||||
)
|
||||
|
||||
def test_post_fails_with_invalid_resource_tags(self):
|
||||
"""Test post fails with invalid resource tags"""
|
||||
|
||||
self.rsrc.master_id = f'{self.project_id}_{self.actor_id}'
|
||||
self.rsrc.master_id = f"{self.project_id}_{self.actor_id}"
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', f"Malformed resource tag {self.rsrc.id} expected to be in "
|
||||
"format: ProjectID_UserID_RoleID."
|
||||
"error",
|
||||
f"Malformed resource tag {self.rsrc.id} expected to be in "
|
||||
"format: ProjectID_UserID_RoleID.",
|
||||
)
|
||||
|
||||
def test_post_fails_without_sc_role(self):
|
||||
"""Test post fails without sc role"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
roles.list.return_value = []
|
||||
self.identity_sync_thread.get_sc_ks_client().roles.list.return_value = []
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "Unable to assign role to user on project reference "
|
||||
"error",
|
||||
"Unable to assign role to user on project reference "
|
||||
f"{self.rsrc}:{self.role_id}, cannot "
|
||||
"find equivalent Keystone Role in subcloud."
|
||||
"find equivalent Keystone Role in subcloud.",
|
||||
)
|
||||
|
||||
def test_post_fails_without_sc_proj(self):
|
||||
"""Test post fails without sc proj"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
projects.list.return_value = []
|
||||
self.identity_sync_thread.get_sc_ks_client().projects.list.return_value = []
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "Unable to assign role to user on project reference "
|
||||
"error",
|
||||
"Unable to assign role to user on project reference "
|
||||
f"{self.rsrc}:{self.project_id}, cannot "
|
||||
"find equivalent Keystone Project in subcloud"
|
||||
"find equivalent Keystone Project in subcloud",
|
||||
)
|
||||
|
||||
def test_post_fails_wihtout_sc_user_and_sc_group(self):
|
||||
"""Test post fails without sc user and sc group"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
users.list.return_value = []
|
||||
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = []
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "Unable to assign role to user/group on project "
|
||||
"error",
|
||||
"Unable to assign role to user/group on project "
|
||||
f"reference {self.rsrc}:{self.actor_id}, cannot find "
|
||||
"equivalent Keystone User/Group in subcloud."
|
||||
"equivalent Keystone User/Group in subcloud.",
|
||||
)
|
||||
|
||||
def test_post_fails_without_role_ref(self):
|
||||
"""Test post fails without role ref"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
role_assignments.list.return_value = []
|
||||
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
|
||||
sc_ks_client.role_assignments.list.return_value = []
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "Unable to update Keystone role assignment "
|
||||
f"{self.rsrc.id}:{self.mock_sc_role} "
|
||||
"error",
|
||||
"Unable to update Keystone role assignment "
|
||||
f"{self.rsrc.id}:{self.mock_sc_role}",
|
||||
)
|
||||
|
||||
|
||||
@@ -561,7 +572,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPut(
|
||||
|
||||
self._execute()
|
||||
|
||||
self._assert_log('info', 'IdentitySyncThread initialized')
|
||||
self._assert_log("info", "IdentitySyncThread initialized")
|
||||
self.log.error.assert_not_called()
|
||||
|
||||
|
||||
@@ -581,14 +592,15 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
|
||||
def test_delete_succeeds(self):
|
||||
"""Test delete succeeds"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
role_assignments.list.return_value = []
|
||||
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
|
||||
sc_ks_client.role_assignments.list.return_value = []
|
||||
|
||||
self._execute()
|
||||
|
||||
self._assert_log(
|
||||
'info', "Deleted Keystone role assignment: "
|
||||
f"{self.rsrc.id}:{self.subcloud_resource}"
|
||||
"info",
|
||||
"Deleted Keystone role assignment: "
|
||||
f"{self.rsrc.id}:{self.subcloud_resource}",
|
||||
)
|
||||
|
||||
def test_delete_succeeds_without_assignment_subcloud_rsrc(self):
|
||||
@@ -599,8 +611,9 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
|
||||
self._execute()
|
||||
|
||||
self._assert_log(
|
||||
'error', f"Unable to delete assignment {self.rsrc}, "
|
||||
"cannot find Keystone Role Assignment in subcloud."
|
||||
"error",
|
||||
f"Unable to delete assignment {self.rsrc}, "
|
||||
"cannot find Keystone Role Assignment in subcloud.",
|
||||
)
|
||||
|
||||
def test_delete_succeeds_with_invalid_resource_tags(self):
|
||||
@@ -612,55 +625,67 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
|
||||
self._execute()
|
||||
|
||||
self._assert_log(
|
||||
'error', f"Malformed subcloud resource tag {self.subcloud_resource}, "
|
||||
"error",
|
||||
f"Malformed subcloud resource tag {self.subcloud_resource}, "
|
||||
"expected to be in format: ProjectID_UserID_RoleID or "
|
||||
"ProjectID_GroupID_RoleID."
|
||||
"ProjectID_GroupID_RoleID.",
|
||||
)
|
||||
|
||||
def test_delete_for_user_succeeds_with_keystone_not_found_exception(self):
|
||||
"""Test delete fails for user with keystone not found exception"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
roles.revoke.side_effect = [keystone_exceptions.NotFound, None]
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
role_assignments.list.return_value = []
|
||||
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
|
||||
sc_ks_client.roles.revoke.side_effect = [
|
||||
keystone_exceptions.NotFound,
|
||||
None,
|
||||
]
|
||||
sc_ks_client.role_assignments.list.return_value = []
|
||||
|
||||
self._execute()
|
||||
|
||||
self.log.assert_has_calls([
|
||||
mock.call.info(
|
||||
f"Revoke role assignment: (role {self.role_id}, "
|
||||
f"user {self.actor_id}, project {self.project_id}) "
|
||||
f"not found in {self.subcloud.region_name}, "
|
||||
"considered as deleted.", extra=mock.ANY
|
||||
),
|
||||
mock.call.info(
|
||||
f"Deleted Keystone role assignment: {self.rsrc.id}:"
|
||||
f"{self.subcloud_resource}", extra=mock.ANY
|
||||
)],
|
||||
any_order=False
|
||||
self.log.assert_has_calls(
|
||||
[
|
||||
mock.call.info(
|
||||
f"Revoke role assignment: (role {self.role_id}, "
|
||||
f"user {self.actor_id}, project {self.project_id}) "
|
||||
f"not found in {self.subcloud.region_name}, "
|
||||
"considered as deleted.",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
mock.call.info(
|
||||
f"Deleted Keystone role assignment: {self.rsrc.id}:"
|
||||
f"{self.subcloud_resource}",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
],
|
||||
any_order=False,
|
||||
)
|
||||
|
||||
def test_delete_for_group_succeeds_with_keystone_not_found_exception(self):
|
||||
"""Test delete fails for group with keystone not found exception"""
|
||||
|
||||
self.identity_sync_thread.get_sc_ks_client().\
|
||||
roles.revoke.side_effect = keystone_exceptions.NotFound
|
||||
self.identity_sync_thread.get_sc_ks_client().roles.revoke.side_effect = (
|
||||
keystone_exceptions.NotFound
|
||||
)
|
||||
|
||||
self._execute()
|
||||
|
||||
self.log.assert_has_calls([
|
||||
mock.call.info(
|
||||
f"Revoke role assignment: (role {self.role_id}, "
|
||||
f"group {self.actor_id}, project {self.project_id}) "
|
||||
f"not found in {self.subcloud.region_name}, "
|
||||
"considered as deleted.", extra=mock.ANY
|
||||
),
|
||||
mock.call.info(
|
||||
f"Deleted Keystone role assignment: {self.rsrc.id}:"
|
||||
f"{self.subcloud_resource}", extra=mock.ANY
|
||||
)],
|
||||
any_order=False
|
||||
self.log.assert_has_calls(
|
||||
[
|
||||
mock.call.info(
|
||||
f"Revoke role assignment: (role {self.role_id}, "
|
||||
f"group {self.actor_id}, project {self.project_id}) "
|
||||
f"not found in {self.subcloud.region_name}, "
|
||||
"considered as deleted.",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
mock.call.info(
|
||||
f"Deleted Keystone role assignment: {self.rsrc.id}:"
|
||||
f"{self.subcloud_resource}",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
],
|
||||
any_order=False,
|
||||
)
|
||||
|
||||
def test_delete_fails_without_role_ref(self):
|
||||
@@ -669,8 +694,9 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
|
||||
self._assert_log(
|
||||
'error', "Unable to delete Keystone role assignment "
|
||||
f"{self.rsrc.id}:{self.role_id} "
|
||||
"error",
|
||||
"Unable to delete Keystone role assignment "
|
||||
f"{self.rsrc.id}:{self.role_id} ",
|
||||
)
|
||||
|
||||
|
||||
@@ -680,14 +706,13 @@ class BaseTestIdentitySyncThreadRevokeEvents(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'token revocation event'
|
||||
self.resource_name = "token revocation event"
|
||||
self.resource_ref = {
|
||||
'revocation_event': {'audit_id': RESOURCE_ID, 'name': 'fake value'}
|
||||
"revocation_event": {"audit_id": RESOURCE_ID, "name": "fake value"}
|
||||
}
|
||||
self.resource_ref_name = \
|
||||
self.resource_ref.get('revocation_event').get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
revoke_event_manager.revoke_event_detail
|
||||
self.resource_ref_name = self.resource_ref.get("revocation_event").get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.revoke_event_manager.revoke_event_detail
|
||||
|
||||
|
||||
class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
@@ -701,8 +726,8 @@ class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
self.resource_info = {"token_revoke_event": {"audit_id": RESOURCE_ID}}
|
||||
self.request.orch_job.resource_info = jsonutils.dumps(self.resource_info)
|
||||
self.method = self.identity_sync_thread.post_revoke_events
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
revoke_event_manager.add_revoke_event
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.revoke_event_manager.add_revoke_event
|
||||
|
||||
def test_post_succeeds(self):
|
||||
"""Test post succeeds"""
|
||||
@@ -715,9 +740,10 @@ class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
self._resource_add().assert_called_once()
|
||||
|
||||
self._assert_log(
|
||||
'info', f"Created Keystone {self._get_resource_name()} "
|
||||
"info",
|
||||
f"Created Keystone {self._get_resource_name()} "
|
||||
f"{self._get_rsrc().id}:"
|
||||
f"{self.resource_info.get('token_revoke_event').get('audit_id')}"
|
||||
f"{self.resource_info.get('token_revoke_event').get('audit_id')}",
|
||||
)
|
||||
|
||||
def test_post_fails_without_source_resource_id(self):
|
||||
@@ -727,8 +753,9 @@ class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', f"Received {self._get_resource_name()} create request "
|
||||
"without required subcloud resource id"
|
||||
"error",
|
||||
f"Received {self._get_resource_name()} create request "
|
||||
"without required subcloud resource id",
|
||||
)
|
||||
|
||||
def test_post_fails_with_empty_resource_ref(self):
|
||||
@@ -738,10 +765,11 @@ class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', f"No {self._get_resource_name()} data returned when creating "
|
||||
"error",
|
||||
f"No {self._get_resource_name()} data returned when creating "
|
||||
f"{self._get_resource_name()} with audit_id "
|
||||
f"{self.resource_info.get('token_revoke_event').get('audit_id')} "
|
||||
"in subcloud."
|
||||
"in subcloud.",
|
||||
)
|
||||
|
||||
def test_post_fails_without_resource_records(self):
|
||||
@@ -751,10 +779,11 @@ class BaseTestIdentitySyncThreadRevokeEventsPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "No data retrieved from master cloud for "
|
||||
"error",
|
||||
"No data retrieved from master cloud for "
|
||||
f"{self._get_resource_name()} with audit_id "
|
||||
f"{self.resource_info.get('token_revoke_event').get('audit_id')} "
|
||||
"to create its equivalent in subcloud."
|
||||
"to create its equivalent in subcloud.",
|
||||
)
|
||||
|
||||
|
||||
@@ -767,8 +796,10 @@ class BaseTestIdentitySyncThreadRevokeEventsDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_revoke_events
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_dbs_client().revoke_event_manager.delete_revoke_event
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_keystone_delete = (
|
||||
self.sc_dbs_client.revoke_event_manager.delete_revoke_event
|
||||
)
|
||||
|
||||
def test_delete_succeeds_with_keystone_not_found_exception(self):
|
||||
"""Test delete succeeds with keystone's not found exception
|
||||
@@ -786,20 +817,23 @@ class BaseTestIdentitySyncThreadRevokeEventsDelete(
|
||||
self._execute()
|
||||
|
||||
self._resource_keystone_delete().assert_called_once()
|
||||
self._get_log().assert_has_calls([
|
||||
mock.call.info(
|
||||
f"Delete {self._get_resource_name()}: event "
|
||||
f"{self._get_subcloud_resource().subcloud_resource_id} "
|
||||
f"not found in {self._get_subcloud().region_name}, "
|
||||
"considered as deleted.", extra=mock.ANY
|
||||
),
|
||||
mock.call.info(
|
||||
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
|
||||
f"{self._get_subcloud().id} "
|
||||
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
|
||||
extra=mock.ANY
|
||||
)],
|
||||
any_order=False
|
||||
self._get_log().assert_has_calls(
|
||||
[
|
||||
mock.call.info(
|
||||
f"Delete {self._get_resource_name()}: event "
|
||||
f"{self._get_subcloud_resource().subcloud_resource_id} "
|
||||
f"not found in {self._get_subcloud().region_name}, "
|
||||
"considered as deleted.",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
mock.call.info(
|
||||
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
|
||||
f"{self._get_subcloud().id} "
|
||||
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
],
|
||||
any_order=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -809,14 +843,13 @@ class BaseTestIdentitySyncThreadRevokeEventsForUser(BaseTestIdentitySyncThread):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.resource_name = 'token revocation event'
|
||||
self.resource_name = "token revocation event"
|
||||
self.resource_ref = {
|
||||
'revocation_event': {'audit_id': RESOURCE_ID, 'name': 'fake value'}
|
||||
"revocation_event": {"audit_id": RESOURCE_ID, "name": "fake value"}
|
||||
}
|
||||
self.resource_ref_name = \
|
||||
self.resource_ref.get('revocation_event').get('name')
|
||||
self.resource_detail = self.identity_sync_thread.get_master_dbs_client().\
|
||||
revoke_event_manager.revoke_event_detail
|
||||
self.resource_ref_name = self.resource_ref.get("revocation_event").get("name")
|
||||
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
|
||||
self.resource_detail = self.dbs_client.revoke_event_manager.revoke_event_detail
|
||||
|
||||
|
||||
class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
@@ -828,8 +861,8 @@ class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.post_revoke_events_for_user
|
||||
self.resource_add = self.identity_sync_thread.get_sc_dbs_client().\
|
||||
revoke_event_manager.add_revoke_event
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_add = self.sc_dbs_client.revoke_event_manager.add_revoke_event
|
||||
|
||||
def test_post_succeeds(self):
|
||||
"""Test post succeeds"""
|
||||
@@ -842,9 +875,10 @@ class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
self._resource_add().assert_called_once()
|
||||
|
||||
self._assert_log(
|
||||
'info', f"Created Keystone {self._get_resource_name()} "
|
||||
"info",
|
||||
f"Created Keystone {self._get_resource_name()} "
|
||||
f"{self._get_rsrc().id}:"
|
||||
f"{self._get_request().orch_job.source_resource_id}"
|
||||
f"{self._get_request().orch_job.source_resource_id}",
|
||||
)
|
||||
|
||||
def test_post_fails_without_source_resource_id(self):
|
||||
@@ -854,8 +888,9 @@ class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', f"Received {self._get_resource_name()} create request "
|
||||
"without required subcloud resource id"
|
||||
"error",
|
||||
f"Received {self._get_resource_name()} create request "
|
||||
"without required subcloud resource id",
|
||||
)
|
||||
|
||||
def test_post_fails_with_empty_resource_ref(self):
|
||||
@@ -865,9 +900,10 @@ class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', f"No {self._get_resource_name()} data returned when creating "
|
||||
"error",
|
||||
f"No {self._get_resource_name()} data returned when creating "
|
||||
f"{self._get_resource_name()} with event_id "
|
||||
f"{self._get_request().orch_job.source_resource_id} in subcloud."
|
||||
f"{self._get_request().orch_job.source_resource_id} in subcloud.",
|
||||
)
|
||||
|
||||
def test_post_fails_without_resource_records(self):
|
||||
@@ -877,10 +913,11 @@ class TestIdentitySyncThreadRevokeEventsForUserPost(
|
||||
|
||||
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
|
||||
self._assert_log(
|
||||
'error', "No data retrieved from master cloud for "
|
||||
"error",
|
||||
"No data retrieved from master cloud for "
|
||||
f"{self._get_resource_name()} with event_id "
|
||||
f"{self._get_request().orch_job.source_resource_id} to create its "
|
||||
"equivalent in subcloud."
|
||||
"equivalent in subcloud.",
|
||||
)
|
||||
|
||||
|
||||
@@ -893,8 +930,10 @@ class TestIdentitySyncThreadRevokeEventsForUserDelete(
|
||||
super().setUp()
|
||||
|
||||
self.method = self.identity_sync_thread.delete_revoke_events_for_user
|
||||
self.resource_keystone_delete = self.identity_sync_thread.\
|
||||
get_sc_dbs_client().revoke_event_manager.delete_revoke_event
|
||||
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
|
||||
self.resource_keystone_delete = (
|
||||
self.sc_dbs_client.revoke_event_manager.delete_revoke_event
|
||||
)
|
||||
|
||||
def test_delete_succeeds_with_keystone_not_found_exception(self):
|
||||
"""Test delete succeeds with keystone's not found exception
|
||||
@@ -912,18 +951,21 @@ class TestIdentitySyncThreadRevokeEventsForUserDelete(
|
||||
self._execute()
|
||||
|
||||
self._resource_keystone_delete().assert_called_once()
|
||||
self._get_log().assert_has_calls([
|
||||
mock.call.info(
|
||||
f"Delete {self._get_resource_name()}: event "
|
||||
f"{self._get_subcloud_resource().subcloud_resource_id} "
|
||||
f"not found in {self._get_subcloud().region_name}, "
|
||||
"considered as deleted.", extra=mock.ANY
|
||||
),
|
||||
mock.call.info(
|
||||
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
|
||||
f"{self._get_subcloud().id} "
|
||||
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
|
||||
extra=mock.ANY
|
||||
)],
|
||||
any_order=False
|
||||
self._get_log().assert_has_calls(
|
||||
[
|
||||
mock.call.info(
|
||||
f"Delete {self._get_resource_name()}: event "
|
||||
f"{self._get_subcloud_resource().subcloud_resource_id} "
|
||||
f"not found in {self._get_subcloud().region_name}, "
|
||||
"considered as deleted.",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
mock.call.info(
|
||||
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
|
||||
f"{self._get_subcloud().id} "
|
||||
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
|
||||
extra=mock.ANY,
|
||||
),
|
||||
],
|
||||
any_order=False,
|
||||
)
|
||||
|
@@ -28,6 +28,7 @@ formatted_modules = [
|
||||
"dcorch/api",
|
||||
"dcorch/common",
|
||||
"dcorch/db",
|
||||
"dcorch/engine",
|
||||
]
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user