Apply black formatter to dcorch

This commit applies the Black format to the `dcorch`
files to ensure that it adheres to the Black code style guidelines.

Test Plan:
PASS: Success in stx-distcloud-tox-black

Story: 2011149
Task: 50445

Change-Id: I0f6298293e6a86237723b53164abe892fb54dab0
Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
Hugo Brito
2024-06-26 18:35:55 -03:00
committed by Hugo Nicodemos
parent 51b6e19a2c
commit 70fd84b263
33 changed files with 1319 additions and 1156 deletions

View File

@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -15,5 +14,4 @@
import pbr.version
__version__ = pbr.version.VersionInfo(
'distributedcloud').version_string()
__version__ = pbr.version.VersionInfo("distributedcloud").version_string()

View File

@@ -22,6 +22,7 @@ import logging as std_logging
import sys
import eventlet
eventlet.monkey_patch(os=False)
# pylint: disable=wrong-import-position
@@ -34,11 +35,12 @@ from dcorch.api import api_config # noqa: E402
from dcorch.api import app # noqa: E402
from dcorch.common import config # noqa: E402
from dcorch.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
CONF = cfg.CONF
config.register_options()
LOG = logging.getLogger('dcorch.api')
LOG = logging.getLogger("dcorch.api")
def main():
@@ -54,8 +56,10 @@ def main():
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
workers = 1
LOG.info("Server on http://%(host)s:%(port)s with %(workers)s",
{'host': host, 'port': port, 'workers': workers})
LOG.info(
"Server on http://%(host)s:%(port)s with %(workers)s",
{"host": host, "port": port, "workers": workers},
)
messaging.setup()
systemd.notify_once()
service = wsgi.Server(CONF, "OrchEngine", application, host, port)
@@ -69,5 +73,5 @@ def main():
app.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -22,6 +22,7 @@ import os
import sys
import eventlet
eventlet.monkey_patch(os=False)
# pylint: disable=wrong-import-position
@@ -38,24 +39,23 @@ from dcorch.api.proxy.common import constants # noqa: E402
from dcorch.api.proxy.common import utils # noqa: E402
from dcorch.common import config # noqa: E402
from dcorch.common import messaging # noqa: E402
# pylint: enable=wrong-import-position
proxy_opts = [
cfg.StrOpt('bind_host',
default="0.0.0.0",
help='IP address for api proxy to listen'),
cfg.IntOpt('bind_port',
default=28774,
help='listen port for api proxy'),
cfg.StrOpt('sync_endpoint',
default=None,
help='The endpoint type for the enqueued sync work'),
cfg.StrOpt(
"bind_host", default="0.0.0.0", help="IP address for api proxy to listen"
),
cfg.IntOpt("bind_port", default=28774, help="listen port for api proxy"),
cfg.StrOpt(
"sync_endpoint",
default=None,
help="The endpoint type for the enqueued sync work",
),
]
proxy_cli_opts = [
cfg.StrOpt('type',
default="compute",
help='Type of the proxy service'),
cfg.StrOpt("type", default="compute", help="Type of the proxy service"),
]
CONF = cfg.CONF
@@ -63,13 +63,13 @@ CONF = cfg.CONF
config.register_options()
CONF.register_cli_opts(proxy_cli_opts)
LOG = logging.getLogger('dcorch.api.proxy')
LOG = logging.getLogger("dcorch.api.proxy")
def make_tempdir(tempdir):
if not os.path.isdir(tempdir):
os.makedirs(tempdir)
os.environ['TMPDIR'] = tempdir
os.environ["TMPDIR"] = tempdir
def main():
@@ -94,8 +94,10 @@ def main():
LOG.warning("Wrong worker number, worker = %(workers)s", workers)
workers = 1
LOG.info("Server on http://%(host)s:%(port)s with %(workers)s",
{'host': host, 'port': port, 'workers': workers})
LOG.info(
"Server on http://%(host)s:%(port)s with %(workers)s",
{"host": host, "port": port, "workers": workers},
)
systemd.notify_once()
# For patching and platorm, create a temp directory under /scratch
@@ -117,5 +119,5 @@ def main():
app.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -19,6 +19,7 @@ DC Orchestrators Engine Server.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@@ -30,17 +31,18 @@ from oslo_service import service # noqa: E402
from dcorch.common import config # noqa: E402
from dcorch.common import messaging # noqa: E402
from dcorch.engine import service as engine # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
LOG = logging.getLogger('dcorch.engine')
LOG = logging.getLogger("dcorch.engine")
def main():
logging.register_options(cfg.CONF)
cfg.CONF(project='dcorch', prog='dcorch-engine')
logging.setup(cfg.CONF, 'dcorch-engine')
cfg.CONF(project="dcorch", prog="dcorch-engine")
logging.setup(cfg.CONF, "dcorch-engine")
logging.set_defaults()
messaging.setup()
@@ -53,5 +55,5 @@ def main():
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -10,6 +10,7 @@ DC Orchestrators Engine Server.
"""
import eventlet
eventlet.monkey_patch()
# pylint: disable=wrong-import-position
@@ -22,28 +23,32 @@ from dcmanager.common import messaging as dmanager_messaging # noqa: E402
from dcorch.common import config # noqa: E402
from dcorch.common import messaging # noqa: E402
from dcorch.engine import service as engine # noqa: E402
# pylint: enable=wrong-import-position
_lazy.enable_lazy()
config.register_options()
LOG = logging.getLogger('dcorch.engine-worker')
LOG = logging.getLogger("dcorch.engine-worker")
def main():
logging.register_options(cfg.CONF)
cfg.CONF(project='dcorch', prog='dcorch-engine-worker')
logging.setup(cfg.CONF, 'dcorch-engine-worker')
cfg.CONF(project="dcorch", prog="dcorch-engine-worker")
logging.setup(cfg.CONF, "dcorch-engine-worker")
logging.set_defaults()
messaging.setup()
dmanager_messaging.setup()
LOG.info("Launching dcorch-engine-worker, host=%s, workers=%s ...",
cfg.CONF.host, cfg.CONF.workers)
LOG.info(
"Launching dcorch-engine-worker, host=%s, workers=%s ...",
cfg.CONF.host,
cfg.CONF.workers,
)
srv = engine.EngineWorkerService()
launcher = service.launch(cfg.CONF, srv, workers=cfg.CONF.workers)
launcher.wait()
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -56,37 +56,41 @@ def do_db_clean():
def add_command_parsers(subparsers):
parser = subparsers.add_parser('db_version')
parser = subparsers.add_parser("db_version")
parser.set_defaults(func=do_db_version)
parser = subparsers.add_parser('db_sync')
parser = subparsers.add_parser("db_sync")
parser.set_defaults(func=do_db_sync)
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
parser.add_argument("version", nargs="?")
parser.add_argument("current_version", nargs="?")
parser = subparsers.add_parser('db_clean')
parser = subparsers.add_parser("db_clean")
parser.set_defaults(func=do_db_clean)
parser.add_argument('age_in_days', type=int,
default=1)
parser.add_argument("age_in_days", type=int, default=1)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Show available commands.',
handler=add_command_parsers)
command_opt = cfg.SubCommandOpt(
"command",
title="Commands",
help="Show available commands.",
handler=add_command_parsers,
)
def main():
logging.register_options(CONF)
logging.setup(CONF, 'dcorch-manage')
logging.setup(CONF, "dcorch-manage")
CONF.register_cli_opt(command_opt)
try:
default_config_files = cfg.find_config_files('dcorch',
'dcorch-engine')
CONF(sys.argv[1:], project='dcorch', prog='dcorch-manage',
version=version.version_info.version_string(),
default_config_files=default_config_files)
default_config_files = cfg.find_config_files("dcorch", "dcorch-engine")
CONF(
sys.argv[1:],
project="dcorch",
prog="dcorch-manage",
version=version.version_info.version_string(),
default_config_files=default_config_files,
)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
@@ -96,5 +100,5 @@ def main():
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
if __name__ == "__main__":
main()

View File

@@ -1,3 +1,4 @@
# Copyright (c) 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -19,7 +20,7 @@ from dcorch.common import exceptions
from dcorch.drivers import base
LOG = log.getLogger(__name__)
API_VERSION = '2'
API_VERSION = "2"
class CinderClient(base.DriverBase):
@@ -27,11 +28,13 @@ class CinderClient(base.DriverBase):
def __init__(self, region, disabled_quotas, session, endpoint_type):
try:
self.cinder = client.Client(API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type)
self.no_volumes = True if 'volumes' in disabled_quotas else False
self.cinder = client.Client(
API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type,
)
self.no_volumes = True if "volumes" in disabled_quotas else False
except exceptions.ServiceUnavailable:
raise
@@ -45,17 +48,17 @@ class CinderClient(base.DriverBase):
"""
if not self.no_volumes:
try:
quota_usage = self.cinder.quotas.get(
project_id, usage=True)
quota_usage = self.cinder.quotas.get(project_id, usage=True)
quota_usage_dict = quota_usage.to_dict()
del quota_usage_dict['id']
del quota_usage_dict["id"]
resource_usage = defaultdict(dict)
for resource in quota_usage_dict:
# NOTE: May be able to remove "reserved" if
# cinder will never set it. Need to check.
resource_usage[resource] = (
quota_usage_dict[resource]['in_use'] +
quota_usage_dict[resource]['reserved'])
quota_usage_dict[resource]["in_use"]
+ quota_usage_dict[resource]["reserved"]
)
return resource_usage
except exceptions.InternalError:
raise
@@ -63,10 +66,9 @@ class CinderClient(base.DriverBase):
def get_quota_limits(self, project_id):
"""Get the resource limits"""
try:
quotas = self.cinder.quotas.get(
project_id, usage=False)
quotas = self.cinder.quotas.get(project_id, usage=False)
quotas_dict = quotas.to_dict()
del quotas_dict['id']
del quotas_dict["id"]
return quotas_dict
except exceptions.InternalError:
raise

View File

@@ -22,24 +22,24 @@ from dcorch.common import exceptions
from dcorch.drivers import base
LOG = log.getLogger(__name__)
API_VERSION = '2.0'
API_VERSION = "2.0"
class NeutronClient(base.DriverBase):
"""Neutron V2 driver."""
def __init__(self, region, disabled_quotas, session, endpoint_type):
try:
self.neutron = client.Client(
API_VERSION, session=session,
API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type,
)
self.extension_list = self.neutron.list_extensions()
self.disabled_quotas = disabled_quotas
self.no_network = True if 'floatingip' in self.disabled_quotas \
else False
self.is_sec_group_enabled = self.is_extension_supported(
'security-group')
self.no_network = True if "floatingip" in self.disabled_quotas else False
self.is_sec_group_enabled = self.is_extension_supported("security-group")
except exceptions.ServiceUnavailable:
raise
@@ -53,12 +53,13 @@ class NeutronClient(base.DriverBase):
try:
usages = defaultdict(dict)
limits = self.neutron.show_quota_details(project_id)
limits = limits['quota']
limits = limits["quota"]
for resource in limits:
# NOTE: May be able to remove "reserved" if
# neutron will never set it. Need to check.
usages[resource] = (limits[resource]['used'] +
limits[resource]['reserved'])
usages[resource] = (
limits[resource]["used"] + limits[resource]["reserved"]
)
return usages
except exceptions.InternalError:
raise
@@ -68,30 +69,29 @@ class NeutronClient(base.DriverBase):
try:
usages = defaultdict(dict)
opts = {'tenant_id': project_id}
opts = {"tenant_id": project_id}
networks = self.neutron.list_networks(**opts)['networks']
subnets = self.neutron.list_subnets(**opts)['subnets']
ports = self.neutron.list_ports(**opts)['ports']
routers = self.neutron.list_routers(**opts)['routers']
floatingips = self.neutron.list_floatingips(
**opts)['floatingips']
networks = self.neutron.list_networks(**opts)["networks"]
subnets = self.neutron.list_subnets(**opts)["subnets"]
ports = self.neutron.list_ports(**opts)["ports"]
routers = self.neutron.list_routers(**opts)["routers"]
floatingips = self.neutron.list_floatingips(**opts)["floatingips"]
usages['network'] = len(networks)
usages['subnet'] = len(subnets)
usages['port'] = len(ports)
usages['router'] = len(routers)
usages['floatingip'] = len(floatingips)
usages["network"] = len(networks)
usages["subnet"] = len(subnets)
usages["port"] = len(ports)
usages["router"] = len(routers)
usages["floatingip"] = len(floatingips)
if self.is_sec_group_enabled:
security_group_rules = \
self.neutron.list_security_group_rules(
**opts)['security_group_rules']
security_groups = self.neutron.list_security_groups(
**opts)['security_groups']
usages['security_group_rule'] = len(
security_group_rules)
usages['security_group'] = len(security_groups)
security_group_rules = self.neutron.list_security_group_rules(
**opts
)["security_group_rules"]
security_groups = self.neutron.list_security_groups(**opts)[
"security_groups"
]
usages["security_group_rule"] = len(security_group_rules)
usages["security_group"] = len(security_groups)
return usages
except exceptions.InternalError:
raise
@@ -105,7 +105,7 @@ class NeutronClient(base.DriverBase):
resource_limit = {}
if not self.no_network:
limits = self.neutron.show_quota(project_id)
resource_limit = limits['quota']
resource_limit = limits["quota"]
return resource_limit
except exceptions.InternalError:
raise
@@ -114,8 +114,7 @@ class NeutronClient(base.DriverBase):
"""Update the limits"""
try:
if not self.no_network:
return self.neutron.update_quota(project_id,
{"quota": new_quota})
return self.neutron.update_quota(project_id, {"quota": new_quota})
except exceptions.InternalError:
raise
@@ -128,7 +127,7 @@ class NeutronClient(base.DriverBase):
raise
def is_extension_supported(self, extension):
for current_extension in self.extension_list['extensions']:
if extension in current_extension['alias']:
for current_extension in self.extension_list["extensions"]:
if extension in current_extension["alias"]:
return True
return False

View File

@@ -22,7 +22,7 @@ from dcorch.common import exceptions
from dcorch.drivers import base
LOG = log.getLogger(__name__)
API_VERSION = '2.37'
API_VERSION = "2.37"
class NovaClient(base.DriverBase):
@@ -30,15 +30,22 @@ class NovaClient(base.DriverBase):
def __init__(self, region, session, endpoint_type, disabled_quotas=None):
try:
self.nova_client = client.Client(API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type)
self.nova_client = client.Client(
API_VERSION,
session=session,
region_name=region,
endpoint_type=endpoint_type,
)
if disabled_quotas:
self.enabled_quotas = list(set(consts.NOVA_QUOTA_FIELDS) -
set(disabled_quotas))
self.no_neutron = True if 'floatingips' in self.enabled_quotas \
or 'fixedips' in self.enabled_quotas else False
self.enabled_quotas = list(
set(consts.NOVA_QUOTA_FIELDS) - set(disabled_quotas)
)
self.no_neutron = (
True
if "floatingips" in self.enabled_quotas
or "fixedips" in self.enabled_quotas
else False
)
except exceptions.ServiceUnavailable:
raise
@@ -53,19 +60,21 @@ class NovaClient(base.DriverBase):
# The API call does not give usage for keypair, fixed ips &
# metadata items. Have raised a bug for that.
quota_usage = self.nova_client.quotas.get(
project_id, user_id=user_id, detail=True)
project_id, user_id=user_id, detail=True
)
quota_usage_dict = quota_usage.to_dict()
del quota_usage_dict['id']
del quota_usage_dict["id"]
resource_usage = collections.defaultdict(dict)
for resource in quota_usage_dict:
# Don't need to add in "reserved" here, it will always be zero.
resource_usage[resource] = quota_usage_dict[resource]['in_use']
resource_usage[resource] = quota_usage_dict[resource]["in_use"]
# For time being, keypair is calculated in below manner.
# This is actually not correct for projects, as keypair quotas
# apply to users only, and across all projects.
resource_usage['key_pairs'] = \
len(self.nova_client.keypairs.list(user_id=user_id))
resource_usage["key_pairs"] = len(
self.nova_client.keypairs.list(user_id=user_id)
)
return resource_usage
except exceptions.InternalError:
raise
@@ -80,9 +89,10 @@ class NovaClient(base.DriverBase):
"""
try:
quotas = self.nova_client.quotas.get(
project_id, user_id=user_id, detail=False)
project_id, user_id=user_id, detail=False
)
quotas_dict = quotas.to_dict()
del quotas_dict['id']
del quotas_dict["id"]
return quotas_dict
except exceptions.InternalError:
raise
@@ -96,14 +106,15 @@ class NovaClient(base.DriverBase):
"""
try:
if not self.no_neutron:
if 'floating_ips' in new_quota:
del new_quota['floating_ips']
if 'fixed_ips' in new_quota:
del new_quota['fixed_ips']
if 'security_groups' in new_quota:
del new_quota['security_groups']
return self.nova_client.quotas.update(project_id, user_id=user_id,
**new_quota)
if "floating_ips" in new_quota:
del new_quota["floating_ips"]
if "fixed_ips" in new_quota:
del new_quota["fixed_ips"]
if "security_groups" in new_quota:
del new_quota["security_groups"]
return self.nova_client.quotas.update(
project_id, user_id=user_id, **new_quota
)
except exceptions.InternalError:
raise
@@ -130,7 +141,7 @@ class NovaClient(base.DriverBase):
return keypair
except Exception as exception:
LOG.error('Exception Occurred: %s', str(exception))
LOG.error("Exception Occurred: %s", str(exception))
pass
def create_keypairs(self, force, keypair):
@@ -144,9 +155,9 @@ class NovaClient(base.DriverBase):
self.nova_client.keypairs.delete(keypair)
LOG.info("Deleted Keypair: %s", keypair.name)
except Exception as exception:
LOG.error('Exception Occurred: %s', str(exception))
LOG.error("Exception Occurred: %s", str(exception))
pass
LOG.info("Created Keypair: %s", keypair.name)
return self.nova_client.keypairs. \
create(keypair.name,
public_key=keypair.public_key)
return self.nova_client.keypairs.create(
keypair.name, public_key=keypair.public_key
)

View File

@@ -38,68 +38,73 @@ class OpenStackDriver(object):
os_clients_dict = collections.defaultdict(dict)
_identity_tokens = {}
@lockutils.synchronized('dcorch-openstackdriver')
def __init__(self, region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD,
auth_url=None):
@lockutils.synchronized("dcorch-openstackdriver")
def __init__(self, region_name=dccommon_consts.VIRTUAL_MASTER_CLOUD, auth_url=None):
# Check if objects are cached and try to use those
self.region_name = region_name
if (region_name in OpenStackDriver._identity_tokens and
(region_name in OpenStackDriver.os_clients_dict) and
('keystone' in OpenStackDriver.os_clients_dict[region_name])
and self._is_token_valid(self.region_name)):
self.keystone_client = \
OpenStackDriver.os_clients_dict[region_name]['keystone']
if (
region_name in OpenStackDriver._identity_tokens
and (region_name in OpenStackDriver.os_clients_dict)
and ("keystone" in OpenStackDriver.os_clients_dict[region_name])
and self._is_token_valid(self.region_name)
):
self.keystone_client = OpenStackDriver.os_clients_dict[region_name][
"keystone"
]
else:
LOG.info("get new keystone client for %s" % region_name)
self.keystone_client = KeystoneClient(region_name, auth_url)
OpenStackDriver.os_clients_dict[region_name]['keystone'] = \
self.keystone_client
OpenStackDriver.os_clients_dict[region_name][
"keystone"
] = self.keystone_client
# self.disabled_quotas = self._get_disabled_quotas(region_name)
if region_name in OpenStackDriver.os_clients_dict and \
self._is_token_valid(region_name):
LOG.info('Using cached OS client objects %s' % region_name)
self.sysinv_client = OpenStackDriver.os_clients_dict[
region_name]['sysinv']
self.fm_client = OpenStackDriver.os_clients_dict[
region_name]['fm']
if region_name in OpenStackDriver.os_clients_dict and self._is_token_valid(
region_name
):
LOG.info("Using cached OS client objects %s" % region_name)
self.sysinv_client = OpenStackDriver.os_clients_dict[region_name]["sysinv"]
self.fm_client = OpenStackDriver.os_clients_dict[region_name]["fm"]
else:
# Create new objects and cache them
LOG.info("Creating fresh OS Clients objects %s" % region_name)
OpenStackDriver.os_clients_dict[
region_name] = collections.defaultdict(dict)
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(dict)
try:
sysinv_endpoint = self.keystone_client.endpoint_cache.get_endpoint(
'sysinv')
self.sysinv_client = SysinvClient(region_name,
self.keystone_client.session,
endpoint=sysinv_endpoint)
"sysinv"
)
self.sysinv_client = SysinvClient(
region_name, self.keystone_client.session, endpoint=sysinv_endpoint
)
OpenStackDriver.os_clients_dict[region_name][
'sysinv'] = self.sysinv_client
"sysinv"
] = self.sysinv_client
except Exception as exception:
LOG.error('sysinv_client region %s error: %s' %
(region_name, str(exception)))
LOG.error(
"sysinv_client region %s error: %s" % (region_name, str(exception))
)
try:
self.fm_client = FmClient(
region_name,
self.keystone_client.session,
endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT,
endpoint=self.keystone_client.endpoint_cache.get_endpoint("fm")
endpoint=self.keystone_client.endpoint_cache.get_endpoint("fm"),
)
OpenStackDriver.os_clients_dict[region_name][
'fm'] = self.fm_client
OpenStackDriver.os_clients_dict[region_name]["fm"] = self.fm_client
except Exception as exception:
LOG.error('fm_client region %s error: %s' %
(region_name, str(exception)))
LOG.error(
"fm_client region %s error: %s" % (region_name, str(exception))
)
@classmethod
@lockutils.synchronized('dcorch-openstackdriver')
@lockutils.synchronized("dcorch-openstackdriver")
def delete_region_clients(cls, region_name, clear_token=False):
LOG.warn("delete_region_clients=%s, clear_token=%s" %
(region_name, clear_token))
LOG.warn(
"delete_region_clients=%s, clear_token=%s" % (region_name, clear_token)
)
if region_name in cls.os_clients_dict:
del cls.os_clients_dict[region_name]
if clear_token:
@@ -109,37 +114,37 @@ class OpenStackDriver(object):
try:
return self.keystone_client.get_enabled_projects(id_only)
except Exception as exception:
LOG.error('Error Occurred: %s', str(exception))
LOG.error("Error Occurred: %s", str(exception))
def get_project_by_name(self, projectname):
try:
return self.keystone_client.get_project_by_name(projectname)
except Exception as exception:
LOG.error('Error Occurred : %s', str(exception))
LOG.error("Error Occurred : %s", str(exception))
def get_project_by_id(self, projectid):
try:
return self.keystone_client.get_project_by_id(projectid)
except Exception as exception:
LOG.error('Error Occurred : %s', str(exception))
LOG.error("Error Occurred : %s", str(exception))
def get_enabled_users(self, id_only=True):
try:
return self.keystone_client.get_enabled_users(id_only)
except Exception as exception:
LOG.error('Error Occurred : %s', str(exception))
LOG.error("Error Occurred : %s", str(exception))
def get_user_by_name(self, username):
try:
return self.keystone_client.get_user_by_name(username)
except Exception as exception:
LOG.error('Error Occurred : %s', str(exception))
LOG.error("Error Occurred : %s", str(exception))
def get_user_by_id(self, userid):
try:
return self.keystone_client.get_user_by_id(userid)
except Exception as exception:
LOG.error('Error Occurred : %s', str(exception))
LOG.error("Error Occurred : %s", str(exception))
def get_resource_usages(self, project_id, user_id):
raise NotImplementedError
@@ -256,7 +261,7 @@ class OpenStackDriver(object):
region_lists.remove(dccommon_consts.CLOUD_0)
return region_lists
except Exception as exception:
LOG.error('Error Occurred: %s', str(exception))
LOG.error("Error Occurred: %s", str(exception))
raise
def _get_filtered_regions(self, project_id):
@@ -264,40 +269,42 @@ class OpenStackDriver(object):
def _is_token_valid(self, region_name):
try:
keystone = \
self.os_clients_dict[region_name]['keystone'].keystone_client
if (not OpenStackDriver._identity_tokens
or region_name not in OpenStackDriver._identity_tokens
or not OpenStackDriver._identity_tokens[region_name]):
identity_token = \
keystone.tokens.validate(keystone.session.get_token())
keystone = self.os_clients_dict[region_name]["keystone"].keystone_client
if (
not OpenStackDriver._identity_tokens
or region_name not in OpenStackDriver._identity_tokens
or not OpenStackDriver._identity_tokens[region_name]
):
identity_token = keystone.tokens.validate(keystone.session.get_token())
OpenStackDriver._identity_tokens[region_name] = identity_token
LOG.info("Got new token for subcloud %s, expires_at=%s" %
(region_name, identity_token['expires_at']))
LOG.info(
"Got new token for subcloud %s, expires_at=%s"
% (region_name, identity_token["expires_at"])
)
# Reset the cached dictionary
OpenStackDriver.os_clients_dict[region_name] = \
collections.defaultdict(dict)
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(
dict
)
return False
keystone.tokens.validate(
OpenStackDriver._identity_tokens[region_name])
keystone.tokens.validate(OpenStackDriver._identity_tokens[region_name])
except Exception as exception:
LOG.info('_is_token_valid handle: %s', str(exception))
LOG.info("_is_token_valid handle: %s", str(exception))
# Reset the cached dictionary
OpenStackDriver.os_clients_dict[region_name] = \
collections.defaultdict(dict)
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(dict)
OpenStackDriver._identity_tokens[region_name] = None
return False
identity_token = OpenStackDriver._identity_tokens[region_name]
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(
identity_token['expires_at']))
expiry_time = timeutils.normalize_time(
timeutils.parse_isotime(identity_token["expires_at"])
)
if timeutils.is_soon(expiry_time, STALE_TOKEN_DURATION):
LOG.info("The cached keystone token for subcloud %s will "
"expire soon %s" %
(region_name, identity_token['expires_at']))
LOG.info(
"The cached keystone token for subcloud %s will "
"expire soon %s" % (region_name, identity_token["expires_at"])
)
# Reset the cached dictionary
OpenStackDriver.os_clients_dict[region_name] = \
collections.defaultdict(dict)
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(dict)
OpenStackDriver._identity_tokens[region_name] = None
return False
else:

View File

@@ -1,5 +1,5 @@
# Copyright (c) 2015 Ericsson AB.
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,16 +35,16 @@ class OrchestratorObject(base.VersionedObject):
"save" object method.
"""
OBJ_PROJECT_NAMESPACE = 'dcorch'
VERSION = '1.0'
OBJ_PROJECT_NAMESPACE = "dcorch"
VERSION = "1.0"
@staticmethod
def _from_db_object(context, obj, db_obj):
if db_obj is None:
return None
for field in obj.fields:
if field == 'metadata':
obj['metadata'] = db_obj['meta_data']
if field == "metadata":
obj["metadata"] = db_obj["meta_data"]
else:
obj[field] = db_obj[field]
@@ -67,6 +67,7 @@ class OrchestratorObjectRegistry(base.VersionedObjectRegistry):
setattr(objects, cls.obj_name(), cls)
else:
curr_version = versionutils.convert_version_to_tuple(
getattr(objects, cls.obj_name()).VERSION)
getattr(objects, cls.obj_name()).VERSION
)
if version >= curr_version:
setattr(objects, cls.obj_name(), cls)

View File

@@ -28,51 +28,53 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator orchestration job object."""
fields = {
'id': ovo_fields.IntegerField(),
'uuid': ovo_fields.UUIDField(),
'user_id': ovo_fields.StringField(),
'project_id': ovo_fields.StringField(),
'endpoint_type': ovo_fields.StringField(),
'source_resource_id': ovo_fields.StringField(), # resource master_id
'operation_type': ovo_fields.StringField(),
'resource_id': ovo_fields.IntegerField(),
'resource_info': ovo_fields.StringField(nullable=True),
"id": ovo_fields.IntegerField(),
"uuid": ovo_fields.UUIDField(),
"user_id": ovo_fields.StringField(),
"project_id": ovo_fields.StringField(),
"endpoint_type": ovo_fields.StringField(),
"source_resource_id": ovo_fields.StringField(), # resource master_id
"operation_type": ovo_fields.StringField(),
"resource_id": ovo_fields.IntegerField(),
"resource_info": ovo_fields.StringField(nullable=True),
}
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set("id"):
raise exceptions.ObjectActionError(
action="create", reason="already created"
)
updates = self.obj_get_changes()
try:
resource_id = updates.pop('resource_id')
resource_id = updates.pop("resource_id")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"resource_id set")
reason="cannot create a Subcloud object without a " "resource_id set",
)
updates = self.obj_get_changes()
try:
endpoint_type = updates.pop('endpoint_type')
endpoint_type = updates.pop("endpoint_type")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"endpoint_type set")
reason="cannot create a Subcloud object without a " "endpoint_type set",
)
updates = self.obj_get_changes()
try:
operation_type = updates.pop('operation_type')
operation_type = updates.pop("operation_type")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"operation_type set")
"operation_type set",
)
db_orch_job = db_api.orch_job_create(
self._context, resource_id, endpoint_type,
operation_type, updates)
self._context, resource_id, endpoint_type, operation_type, updates
)
return self._from_db_object(self._context, self, db_orch_job)
@classmethod
@@ -82,14 +84,13 @@ class OrchJob(base.OrchestratorObject, base.VersionedObjectDictCompat):
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
updates.pop('uuid', None)
db_orch_job = db_api.orch_job_update(self._context,
self.id, # pylint: disable=E1101
updates)
updates.pop("id", None)
updates.pop("uuid", None)
db_orch_job = db_api.orch_job_update(
self._context, self.id, updates # pylint: disable=E1101
)
self._from_db_object(self._context, self, db_orch_job)
self.obj_reset_changes()
def delete(self):
db_api.orch_job_delete(self._context,
self.id) # pylint: disable=E1101
db_api.orch_job_delete(self._context, self.id) # pylint: disable=E1101

View File

@@ -30,43 +30,46 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator orchestration request object."""
fields = {
'id': ovo_fields.IntegerField(),
'uuid': ovo_fields.UUIDField(),
'state': ovo_fields.StringField(),
'try_count': ovo_fields.IntegerField(),
'api_version': ovo_fields.StringField(nullable=True),
'target_region_name': ovo_fields.StringField(),
'orch_job_id': ovo_fields.IntegerField(),
'orch_job': ovo_fields.ObjectField('OrchJob'),
'updated_at': ovo_fields.DateTimeField(nullable=True),
'deleted_at': ovo_fields.DateTimeField(nullable=True),
'deleted': ovo_fields.IntegerField()
"id": ovo_fields.IntegerField(),
"uuid": ovo_fields.UUIDField(),
"state": ovo_fields.StringField(),
"try_count": ovo_fields.IntegerField(),
"api_version": ovo_fields.StringField(nullable=True),
"target_region_name": ovo_fields.StringField(),
"orch_job_id": ovo_fields.IntegerField(),
"orch_job": ovo_fields.ObjectField("OrchJob"),
"updated_at": ovo_fields.DateTimeField(nullable=True),
"deleted_at": ovo_fields.DateTimeField(nullable=True),
"deleted": ovo_fields.IntegerField(),
}
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set("id"):
raise exceptions.ObjectActionError(
action="create", reason="already created"
)
updates = self.obj_get_changes()
try:
orch_job_id = updates.pop('orch_job_id')
orch_job_id = updates.pop("orch_job_id")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"orch_job_id set")
reason="cannot create a Subcloud object without a " "orch_job_id set",
)
updates = self.obj_get_changes()
try:
target_region_name = updates.pop('target_region_name')
target_region_name = updates.pop("target_region_name")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"target_region_name set")
"target_region_name set",
)
db_orch_request = db_api.orch_request_create(
self._context, orch_job_id, target_region_name, updates)
self._context, orch_job_id, target_region_name, updates
)
return self._from_db_object(self._context, self, db_orch_request)
@staticmethod
@@ -76,15 +79,16 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
db_orch_request = db_obj._as_dict()
# When first creating the request, the db_obj won't have
# orch_job set.
if 'orch_job' in db_orch_request:
if "orch_job" in db_orch_request:
orch_job = orchjob.OrchJob._from_db_object(
context, orchjob.OrchJob(), db_orch_request['orch_job'])
context, orchjob.OrchJob(), db_orch_request["orch_job"]
)
else:
orch_job = orchjob.OrchJob.get_by_id(
context, db_orch_request['orch_job_id'])
db_orch_request['orch_job'] = orch_job
return super(obj.__class__, obj)._from_db_object(context, obj,
db_orch_request)
context, db_orch_request["orch_job_id"]
)
db_orch_request["orch_job"] = orch_job
return super(obj.__class__, obj)._from_db_object(context, obj, db_orch_request)
@classmethod
def get_by_id(cls, context, id):
@@ -93,8 +97,7 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
@classmethod
def get_most_recent_failed_request(cls, context):
db_orch_request = \
db_api.orch_request_get_most_recent_failed_request(context)
db_orch_request = db_api.orch_request_get_most_recent_failed_request(context)
if db_orch_request:
return cls._from_db_object(context, cls(), db_orch_request)
else:
@@ -102,39 +105,46 @@ class OrchRequest(base.OrchestratorObject, base.VersionedObjectDictCompat):
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
updates.pop('uuid', None)
updates.pop("id", None)
updates.pop("uuid", None)
db_orch_request = db_api.orch_request_update(
self._context,
self.id, # pylint: disable=E1101
updates)
self._context, self.id, updates # pylint: disable=E1101
)
self._from_db_object(self._context, self, db_orch_request)
self.obj_reset_changes()
def delete(self):
db_api.orch_request_destroy(self._context,
self.id) # pylint: disable=E1101
db_api.orch_request_destroy(self._context, self.id) # pylint: disable=E1101
@classmethod
def delete_previous_failed_requests(cls, context, delete_time):
db_api.orch_request_delete_previous_failed_requests(
context, delete_time)
db_api.orch_request_delete_previous_failed_requests(context, delete_time)
@base.OrchestratorObjectRegistry.register
class OrchRequestList(ovo_base.ObjectListBase, base.OrchestratorObject):
"""DC Orchestrator orchestration request list object."""
VERSION = '1.1'
VERSION = "1.1"
fields = {
'objects': ovo_fields.ListOfObjectsField('OrchRequest'),
"objects": ovo_fields.ListOfObjectsField("OrchRequest"),
}
@classmethod
def get_by_attrs(cls, context, endpoint_type, resource_type=None,
target_region_name=None, states=None):
def get_by_attrs(
cls,
context,
endpoint_type,
resource_type=None,
target_region_name=None,
states=None,
):
orch_reqs = db_api.orch_request_get_by_attrs(
context, endpoint_type, resource_type=resource_type,
target_region_name=target_region_name, states=states)
return ovo_base.obj_make_list(
context, cls(context), OrchRequest, orch_reqs)
context,
endpoint_type,
resource_type=resource_type,
target_region_name=target_region_name,
states=states,
)
return ovo_base.obj_make_list(context, cls(context), OrchRequest, orch_reqs)

View File

@@ -29,33 +29,34 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator subcloud object."""
fields = {
'id': ovo_fields.IntegerField(),
'uuid': ovo_fields.UUIDField(),
'resource_type': ovo_fields.StringField(),
'master_id': ovo_fields.StringField(),
"id": ovo_fields.IntegerField(),
"uuid": ovo_fields.UUIDField(),
"resource_type": ovo_fields.StringField(),
"master_id": ovo_fields.StringField(),
}
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set("id"):
raise exceptions.ObjectActionError(
action="create", reason="already created"
)
updates = self.obj_get_changes()
try:
resource_type = updates.pop('resource_type')
resource_type = updates.pop("resource_type")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Resource object without a "
"resource_type set")
reason="cannot create a Resource object without a " "resource_type set",
)
db_resource = db_api.resource_create(
self._context, resource_type, updates)
db_resource = db_api.resource_create(self._context, resource_type, updates)
return self._from_db_object(self._context, self, db_resource)
@classmethod
def get_by_type_and_master_id(cls, context, resource_type, master_id):
db_resource = db_api.resource_get_by_type_and_master_id(
context, resource_type, master_id)
context, resource_type, master_id
)
return cls._from_db_object(context, cls(), db_resource)
@classmethod
@@ -65,17 +66,16 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
def delete(self):
db_api.resource_delete(
self._context,
self.resource_type, # pylint: disable=E1101
self.master_id) # pylint: disable=E1101
self._context, self.resource_type, self.master_id # pylint: disable=E1101
) # pylint: disable=E1101
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
updates.pop('uuid', None)
db_resource = db_api.resource_update(self._context,
self.id, # pylint: disable=E1101
updates)
updates.pop("id", None)
updates.pop("uuid", None)
db_resource = db_api.resource_update(
self._context, self.id, updates # pylint: disable=E1101
)
self._from_db_object(self._context, self, db_resource)
self.obj_reset_changes()
@@ -83,15 +83,14 @@ class Resource(base.OrchestratorObject, base.VersionedObjectDictCompat):
@base.OrchestratorObjectRegistry.register
class ResourceList(ovo_base.ObjectListBase, base.OrchestratorObject):
"""DC Orchestrator resource list object."""
VERSION = '1.1'
VERSION = "1.1"
fields = {
'objects': ovo_fields.ListOfObjectsField('Resource'),
"objects": ovo_fields.ListOfObjectsField("Resource"),
}
@classmethod
def get_all(cls, context, resource_type=None):
resources = db_api.resource_get_all(
context, resource_type)
return ovo_base.obj_make_list(
context, cls(context), Resource, resources)
resources = db_api.resource_get_all(context, resource_type)
return ovo_base.obj_make_list(context, cls(context), Resource, resources)

View File

@@ -27,22 +27,23 @@ class Service(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator service object."""
fields = {
'id': ovo_fields.UUIDField(),
'host': ovo_fields.StringField(),
'binary': ovo_fields.StringField(),
'topic': ovo_fields.StringField(),
'disabled': ovo_fields.BooleanField(),
'disabled_reason': ovo_fields.StringField(nullable=True),
'created_at': ovo_fields.DateTimeField(),
'updated_at': ovo_fields.DateTimeField(),
'deleted_at': ovo_fields.DateTimeField(nullable=True),
'deleted': ovo_fields.IntegerField(nullable=True),
"id": ovo_fields.UUIDField(),
"host": ovo_fields.StringField(),
"binary": ovo_fields.StringField(),
"topic": ovo_fields.StringField(),
"disabled": ovo_fields.BooleanField(),
"disabled_reason": ovo_fields.StringField(nullable=True),
"created_at": ovo_fields.DateTimeField(),
"updated_at": ovo_fields.DateTimeField(),
"deleted_at": ovo_fields.DateTimeField(nullable=True),
"deleted": ovo_fields.IntegerField(nullable=True),
}
@classmethod
def create(cls, context, service_id, host=None, binary=None, topic=None):
obj = db_api.service_create(context, service_id=service_id, host=host,
binary=binary, topic=topic)
obj = db_api.service_create(
context, service_id=service_id, host=host, binary=binary, topic=topic
)
return cls._from_db_object(context, cls(context), obj)
@classmethod

View File

@@ -34,37 +34,38 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator subcloud object."""
fields = {
'id': ovo_fields.IntegerField(),
'uuid': ovo_fields.UUIDField(),
'region_name': ovo_fields.StringField(),
'software_version': ovo_fields.StringField(),
'management_state': ovo_fields.StringField(nullable=True),
'availability_status': ovo_fields.StringField(),
'capabilities': ovo_fields.DictOfListOfStringsField(),
'initial_sync_state': ovo_fields.StringField(),
'management_ip': ovo_fields.StringField()
"id": ovo_fields.IntegerField(),
"uuid": ovo_fields.UUIDField(),
"region_name": ovo_fields.StringField(),
"software_version": ovo_fields.StringField(),
"management_state": ovo_fields.StringField(nullable=True),
"availability_status": ovo_fields.StringField(),
"capabilities": ovo_fields.DictOfListOfStringsField(),
"initial_sync_state": ovo_fields.StringField(),
"management_ip": ovo_fields.StringField(),
}
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set("id"):
raise exceptions.ObjectActionError(
action="create", reason="already created"
)
updates = self.obj_get_changes()
try:
region_name = updates.pop('region_name')
region_name = updates.pop("region_name")
except KeyError:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a Subcloud object without a "
"region_name set")
reason="cannot create a Subcloud object without a " "region_name set",
)
try:
db_subcloud = db_api.subcloud_create(
self._context, region_name, updates)
db_subcloud = db_api.subcloud_create(self._context, region_name, updates)
return self._from_db_object(self._context, self, db_subcloud)
except Exception as e:
LOG.error("Failed to create subcloud %s: %s" % (
self.region_name, # pylint: disable=E1101
str(e)))
LOG.error(
"Failed to create subcloud %s: %s"
% (self.region_name, str(e)) # pylint: disable=no-member
)
raise e
@classmethod
@@ -77,17 +78,15 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
try:
db_api.subcloud_delete(context, subcloud_name)
except Exception as e:
LOG.error("Failed to delete subcloud entry for %s: %s"
% (subcloud_name, e))
LOG.error("Failed to delete subcloud entry for %s: %s" % (subcloud_name, e))
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
updates.pop('uuid', None)
updates.pop("id", None)
updates.pop("uuid", None)
db_subcloud = db_api.subcloud_update(
self._context,
self.region_name, # pylint: disable=E1101
updates)
self._context, self.region_name, updates # pylint: disable=no-member
)
self._from_db_object(self._context, self, db_subcloud)
self.obj_reset_changes()
@@ -96,32 +95,35 @@ class Subcloud(base.OrchestratorObject, base.VersionedObjectDictCompat):
# delete the associated sync requests
try:
db_api.orch_request_delete_by_subcloud(
self._context,
self.region_name) # pylint: disable=E1101
self._context, self.region_name # pylint: disable=no-member
)
except Exception as e:
LOG.error("Failed to delete orchestration request for %s: %s"
% (self.region_name, # pylint: disable=E1101
str(e)))
LOG.error(
"Failed to delete orchestration request for %s: %s"
% (self.region_name, str(e)) # pylint: disable=no-member
)
try:
db_api.subcloud_delete(self._context,
self.region_name) # pylint: disable=E1101
db_api.subcloud_delete(
self._context, self.region_name # pylint: disable=no-member
)
except Exception as e:
LOG.error("Failed to delete subcloud entry for %s: %s"
% (self.region_name, # pylint: disable=E1101
str(e)))
LOG.error(
"Failed to delete subcloud entry for %s: %s"
% (self.region_name, str(e)) # pylint: disable=no-member
)
@base.OrchestratorObjectRegistry.register
class SubcloudList(ovo_base.ObjectListBase, base.OrchestratorObject):
"""DC Orchestrator subcloud list object."""
VERSION = '1.1'
VERSION = "1.1"
fields = {
'objects': ovo_fields.ListOfObjectsField('Subcloud'),
"objects": ovo_fields.ListOfObjectsField("Subcloud"),
}
@classmethod
def get_all(cls, context):
subclouds = db_api.subcloud_get_all(context)
return ovo_base.obj_make_list(
context, cls(context), Subcloud, subclouds)
return ovo_base.obj_make_list(context, cls(context), Subcloud, subclouds)

View File

@@ -27,35 +27,37 @@ from dcorch.objects import base
# pylint: disable=no-member
@base.OrchestratorObjectRegistry.register
class SubcloudResource(base.OrchestratorObject,
base.VersionedObjectDictCompat):
class SubcloudResource(base.OrchestratorObject, base.VersionedObjectDictCompat):
"""DC Orchestrator subcloud object."""
fields = {
'id': ovo_fields.IntegerField(),
'uuid': ovo_fields.UUIDField(),
'shared_config_state': ovo_fields.StringField(),
'subcloud_resource_id': ovo_fields.StringField(),
'resource_id': ovo_fields.IntegerField(),
'subcloud_id': ovo_fields.IntegerField(),
"id": ovo_fields.IntegerField(),
"uuid": ovo_fields.UUIDField(),
"shared_config_state": ovo_fields.StringField(),
"subcloud_resource_id": ovo_fields.StringField(),
"resource_id": ovo_fields.IntegerField(),
"subcloud_id": ovo_fields.IntegerField(),
}
def create(self):
if self.obj_attr_is_set('id'):
raise exceptions.ObjectActionError(action='create',
reason='already created')
if self.obj_attr_is_set("id"):
raise exceptions.ObjectActionError(
action="create", reason="already created"
)
updates = self.obj_get_changes()
if 'subcloud_resource_id' not in updates:
if "subcloud_resource_id" not in updates:
raise exceptions.ObjectActionError(
action="create",
reason="cannot create a SubcloudResource object without a "
"subcloud_resource_id")
"subcloud_resource_id",
)
resource_id = updates.pop('resource_id')
subcloud_id = updates.pop('subcloud_id')
resource_id = updates.pop("resource_id")
subcloud_id = updates.pop("subcloud_id")
db_subcloud_resource = db_api.subcloud_resource_create(
self._context, subcloud_id, resource_id, updates)
self._context, subcloud_id, resource_id, updates
)
return self._from_db_object(self._context, self, db_subcloud_resource)
def is_managed(self):
@@ -68,41 +70,42 @@ class SubcloudResource(base.OrchestratorObject,
@classmethod
def get_by_resource_and_subcloud(cls, context, res_id, subcloud_id):
db_subcloud_resource = \
db_api.subcloud_resource_get_by_resource_and_subcloud(
context, res_id, subcloud_id)
db_subcloud_resource = db_api.subcloud_resource_get_by_resource_and_subcloud(
context, res_id, subcloud_id
)
return cls._from_db_object(context, cls(), db_subcloud_resource)
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
updates.pop('uuid', None)
updates.pop('resource', None)
updates.pop('subcloud', None)
updates.pop("id", None)
updates.pop("uuid", None)
updates.pop("resource", None)
updates.pop("subcloud", None)
db_subcloud = db_api.subcloud_resource_update(
self._context,
self.id, # pylint: disable=E1101
updates)
self._context, self.id, updates # pylint: disable=E1101
)
self._from_db_object(self._context, self, db_subcloud)
self.obj_reset_changes()
def delete(self):
db_api.subcloud_resource_delete(self._context,
self.id) # pylint: disable=E1101
db_api.subcloud_resource_delete(self._context, self.id) # pylint: disable=E1101
@base.OrchestratorObjectRegistry.register
class SubcloudResourceList(ovo_base.ObjectListBase, base.OrchestratorObject):
"""DC Orchestrator subcloud list object."""
VERSION = '1.1'
VERSION = "1.1"
fields = {
'objects': ovo_fields.ListOfObjectsField('SubcloudResource'),
"objects": ovo_fields.ListOfObjectsField("SubcloudResource"),
}
@classmethod
def get_by_resource_id(cls, context, resource_id):
subcloud_resources = db_api.subcloud_resources_get_by_resource(
context, resource_id)
context, resource_id
)
return ovo_base.obj_make_list(
context, cls(context), SubcloudResource, subcloud_resources)
context, cls(context), SubcloudResource, subcloud_resources
)

View File

@@ -26,12 +26,12 @@ class EngineClient(object):
1.0 - Initial version
"""
BASE_RPC_API_VERSION = '1.0'
BASE_RPC_API_VERSION = "1.0"
def __init__(self):
self._client = messaging.get_rpc_client(
topic=consts.TOPIC_ORCH_ENGINE,
version=self.BASE_RPC_API_VERSION)
topic=consts.TOPIC_ORCH_ENGINE, version=self.BASE_RPC_API_VERSION
)
@staticmethod
def make_msg(method, **kwargs):
@@ -53,17 +53,26 @@ class EngineClient(object):
client = self._client
return client.cast(ctxt, method, **kwargs)
def get_usage_for_project_and_user(self, ctxt, endpoint_type,
project_id, user_id=None):
return self.call(ctxt, self.make_msg('get_usage_for_project_and_user',
endpoint_type=endpoint_type,
project_id=project_id,
user_id=user_id))
def get_usage_for_project_and_user(
self, ctxt, endpoint_type, project_id, user_id=None
):
return self.call(
ctxt,
self.make_msg(
"get_usage_for_project_and_user",
endpoint_type=endpoint_type,
project_id=project_id,
user_id=user_id,
),
)
def quota_sync_for_project(self, ctxt, project_id, user_id):
return self.cast(ctxt, self.make_msg('quota_sync_for_project',
project_id=project_id,
user_id=user_id))
return self.cast(
ctxt,
self.make_msg(
"quota_sync_for_project", project_id=project_id, user_id=user_id
),
)
class EngineWorkerClient(object):
@@ -73,12 +82,12 @@ class EngineWorkerClient(object):
1.0 - Initial version
"""
BASE_RPC_API_VERSION = '1.0'
BASE_RPC_API_VERSION = "1.0"
def __init__(self):
self._client = messaging.get_rpc_client(
topic=consts.TOPIC_ORCH_ENGINE_WORKER,
version=self.BASE_RPC_API_VERSION)
topic=consts.TOPIC_ORCH_ENGINE_WORKER, version=self.BASE_RPC_API_VERSION
)
@staticmethod
def make_msg(method, **kwargs):
@@ -102,74 +111,92 @@ class EngineWorkerClient(object):
def keypair_sync_for_user(self, ctxt, job_id, payload):
return self.cast(
ctxt,
self.make_msg('keypair_sync_for_user', job_id=job_id,
payload=payload))
ctxt, self.make_msg("keypair_sync_for_user", job_id=job_id, payload=payload)
)
def image_sync(self, ctxt, job_id, payload):
return self.cast(
ctxt,
self.make_msg('image_sync', job_id=job_id, payload=payload))
ctxt, self.make_msg("image_sync", job_id=job_id, payload=payload)
)
def add_subcloud(self, ctxt, subcloud_name, sw_version, management_ip):
return self.call(
ctxt,
self.make_msg('add_subcloud', subcloud_name=subcloud_name,
sw_version=sw_version, management_ip=management_ip))
self.make_msg(
"add_subcloud",
subcloud_name=subcloud_name,
sw_version=sw_version,
management_ip=management_ip,
),
)
def del_subcloud(self, ctxt, subcloud_name):
return self.call(
ctxt,
self.make_msg('del_subcloud', subcloud_name=subcloud_name))
ctxt, self.make_msg("del_subcloud", subcloud_name=subcloud_name)
)
def update_subcloud_states(self, ctxt, subcloud_name, management_state,
availability_status):
def update_subcloud_states(
self, ctxt, subcloud_name, management_state, availability_status
):
return self.call(
ctxt,
self.make_msg('update_subcloud_states',
subcloud_name=subcloud_name,
management_state=management_state,
availability_status=availability_status))
self.make_msg(
"update_subcloud_states",
subcloud_name=subcloud_name,
management_state=management_state,
availability_status=availability_status,
),
)
def add_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
endpoint_type_list):
def add_subcloud_sync_endpoint_type(self, ctxt, subcloud_name, endpoint_type_list):
return self.cast(
ctxt,
self.make_msg('add_subcloud_sync_endpoint_type',
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list))
self.make_msg(
"add_subcloud_sync_endpoint_type",
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list,
),
)
def remove_subcloud_sync_endpoint_type(self, ctxt, subcloud_name,
endpoint_type_list):
def remove_subcloud_sync_endpoint_type(
self, ctxt, subcloud_name, endpoint_type_list
):
return self.cast(
ctxt,
self.make_msg('remove_subcloud_sync_endpoint_type',
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list))
self.make_msg(
"remove_subcloud_sync_endpoint_type",
subcloud_name=subcloud_name,
endpoint_type_list=endpoint_type_list,
),
)
def sync_subclouds(self, ctxt, subcloud_sync_list):
return self.cast(
ctxt,
self.make_msg('sync_subclouds',
subcloud_sync_list=subcloud_sync_list))
ctxt, self.make_msg("sync_subclouds", subcloud_sync_list=subcloud_sync_list)
)
def run_sync_audit(self, ctxt, subcloud_sync_list):
return self.cast(
ctxt,
self.make_msg('run_sync_audit',
subcloud_sync_list=subcloud_sync_list))
ctxt, self.make_msg("run_sync_audit", subcloud_sync_list=subcloud_sync_list)
)
def initial_sync_subclouds(self, ctxt, subcloud_capabilities):
return self.cast(
ctxt,
self.make_msg('initial_sync_subclouds',
subcloud_capabilities=subcloud_capabilities))
self.make_msg(
"initial_sync_subclouds", subcloud_capabilities=subcloud_capabilities
),
)
def update_subcloud_version(self, ctxt, subcloud_name, sw_version):
return self.call(
ctxt,
self.make_msg('update_subcloud_version',
subcloud_name=subcloud_name, sw_version=sw_version))
self.make_msg(
"update_subcloud_version",
subcloud_name=subcloud_name,
sw_version=sw_version,
),
)
def update_subcloud_management_ip(self, ctxt, subcloud_name, management_ip):
return self.call(
@@ -185,4 +212,5 @@ class EngineWorkerClient(object):
# that there is work to do.
def sync_request(self, ctxt, endpoint_type):
return self.cast(
ctxt, self.make_msg('sync_request', endpoint_type=endpoint_type))
ctxt, self.make_msg("sync_request", endpoint_type=endpoint_type)
)

View File

@@ -36,9 +36,11 @@ get_engine = api.get_engine
CAPABILITES = {
'endpoint_types':
[dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY]}
"endpoint_types": [
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
]
}
class FakeException(Exception):
@@ -53,8 +55,7 @@ class OrchestratorTestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def setup_dummy_db(self):
options.cfg.set_defaults(options.database_opts,
sqlite_synchronous=False)
options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
options.set_defaults(cfg.CONF, connection="sqlite://")
engine = get_engine()
db_api.db_sync(engine)
@@ -66,7 +67,7 @@ class OrchestratorTestCase(base.BaseTestCase):
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
if table.name == "migrate_version":
continue
engine.execute(table.delete())
@@ -81,74 +82,75 @@ class OrchestratorTestCase(base.BaseTestCase):
def _mock_pecan(self):
"""Mock pecan's abort"""
mock_patch = mock.patch.object(pecan, 'abort', wraps=pecan.abort)
mock_patch = mock.patch.object(pecan, "abort", wraps=pecan.abort)
self.mock_pecan_abort = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_rpc_client(self):
"""Mock rpc's manager client"""
mock_patch = mock.patch.object(rpc_client, 'EngineWorkerClient')
mock_patch = mock.patch.object(rpc_client, "EngineWorkerClient")
self.mock_rpc_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_rpc_client_subcloud_state_client(self):
mock_patch = mock.patch.object(dcmanager_rpc_client, 'SubcloudStateClient')
mock_patch = mock.patch.object(dcmanager_rpc_client, "SubcloudStateClient")
self.rpc_client_subcloud_state_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_rpc_client_manager(self):
mock_patch = mock.patch.object(dcmanager_rpc_client, 'ManagerClient')
mock_patch = mock.patch.object(dcmanager_rpc_client, "ManagerClient")
self.rpc_client_manager = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_log(self, target):
mock_patch = mock.patch.object(target, 'LOG')
mock_patch = mock.patch.object(target, "LOG")
self.log = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_openstack_driver(self):
mock_patch = mock.patch(
'dccommon.drivers.openstack.sdk_platform.OptimizedOpenStackDriver')
"dccommon.drivers.openstack.sdk_platform.OptimizedOpenStackDriver"
)
self.mock_openstack_driver = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_keystone_client(self):
mock_patch = mock.patch('keystoneclient.client.Client')
mock_patch = mock.patch("keystoneclient.client.Client")
self.mock_keystone_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_endpoint_cache_from_keystone(self):
mock_patch = mock.patch(
'dccommon.drivers.openstack.keystone_v3.OptimizedEndpointCache')
"dccommon.drivers.openstack.keystone_v3.OptimizedEndpointCache"
)
self.mock_endpoint_cache_from_keystone = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_endpoint_cache(self):
mock_patch = mock.patch(
'dccommon.endpoint_cache.OptimizedEndpointCache')
mock_patch = mock.patch("dccommon.endpoint_cache.OptimizedEndpointCache")
self.mock_endpoint_cache = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_m_dbs_client(self):
mock_patch = mock.patch('dcorch.engine.sync_thread.dbsyncclient.Client')
mock_patch = mock.patch("dcorch.engine.sync_thread.dbsyncclient.Client")
self.mock_m_dbs_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_sc_dbs_client(self):
mock_patch = mock.patch('dcorch.engine.sync_services.identity.Client')
mock_patch = mock.patch("dcorch.engine.sync_services.identity.Client")
self.mock_sc_dbs_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_sysinv_client(self, target):
mock_patch = mock.patch.object(target, 'SysinvClient')
mock_patch = mock.patch.object(target, "SysinvClient")
self.mock_sysinv_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
def _mock_builtins_open(self):
"""Mock builtins' open"""
mock_patch = mock.patch.object(builtins, 'open')
mock_patch = mock.patch.object(builtins, "open")
self.mock_builtins_open = mock_patch.start()
self.addCleanup(mock_patch.stop)

View File

@@ -21,7 +21,7 @@ from dcorch.tests.unit.common import constants as test_consts
config.register_options()
OPT_GROUP_NAME = 'keystone_authtoken'
OPT_GROUP_NAME = "keystone_authtoken"
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
@@ -38,31 +38,30 @@ class DCOrchApiTest(base.OrchestratorTestCase):
self.CONF = self.useFixture(config_fixture).conf
config_fixture.set_config_dirs([])
self.CONF.set_override('auth_strategy', 'noauth')
self.CONF.set_override("auth_strategy", "noauth")
self.app = self._make_app()
self.url = '/'
self.url = "/"
# The put method is used as a default value, leading to the generic
# implementation on controllers in case the method is not specified
self.method = self.app.put
self.params = {}
self.verb = None
self.headers = {
'X-Tenant-Id': str(uuid.uuid4()), 'X_ROLE': 'admin,member,reader',
'X-Identity-Status': 'Confirmed', 'X-Project-Name': 'admin'
"X-Tenant-Id": str(uuid.uuid4()),
"X_ROLE": "admin,member,reader",
"X-Identity-Status": "Confirmed",
"X-Project-Name": "admin",
}
def _make_app(self, enable_acl=False):
self.config_fixture = {
'app': {
'root': 'dcorch.api.controllers.root.RootController',
'modules': ['dcorch.api'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
'__force_dict__': True
}
"app": {
"root": "dcorch.api.controllers.root.RootController",
"modules": ["dcorch.api"],
"enable_acl": enable_acl,
"errors": {400: "/error", "__force_dict__": True},
},
}
@@ -76,8 +75,10 @@ class DCOrchApiTest(base.OrchestratorTestCase):
)
def _assert_response(
self, response, status_code=http.client.OK,
content_type=test_consts.APPLICATION_JSON
self,
response,
status_code=http.client.OK,
content_type=test_consts.APPLICATION_JSON,
):
"""Assert the response for a request"""
@@ -85,8 +86,12 @@ class DCOrchApiTest(base.OrchestratorTestCase):
self.assertEqual(response.content_type, content_type)
def _assert_pecan_and_response(
self, response, http_status, content=None, call_count=1,
content_type=test_consts.TEXT_PLAIN
self,
response,
http_status,
content=None,
call_count=1,
content_type=test_consts.TEXT_PLAIN,
):
"""Assert the response and pecan abort for a failed request"""
@@ -104,7 +109,7 @@ class TestRootController(DCOrchApiTest):
def setUp(self):
super(TestRootController, self).setUp()
self.url = '/'
self.url = "/"
self.method = self.app.get
def _test_method_returns_405(self, method, content_type=test_consts.TEXT_PLAIN):
@@ -123,7 +128,7 @@ class TestRootController(DCOrchApiTest):
self._assert_response(response)
json_body = jsonutils.loads(response.body)
versions = json_body.get('versions')
versions = json_body.get("versions")
self.assertEqual(1, len(versions))
def test_request_id(self):
@@ -132,11 +137,9 @@ class TestRootController(DCOrchApiTest):
response = self._send_request()
self._assert_response(response)
self.assertIn('x-openstack-request-id', response.headers)
self.assertTrue(
response.headers['x-openstack-request-id'].startswith('req-')
)
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
self.assertIn("x-openstack-request-id", response.headers)
self.assertTrue(response.headers["x-openstack-request-id"].startswith("req-"))
id_part = response.headers["x-openstack-request-id"].split("req-")[1]
self.assertTrue(uuidutils.is_uuid_like(id_part))
def test_post(self):
@@ -162,19 +165,17 @@ class TestRootController(DCOrchApiTest):
def test_head(self):
"""Test head request is not allowed on root"""
self._test_method_returns_405(
self.app.head, content_type=test_consts.TEXT_HTML
)
self._test_method_returns_405(self.app.head, content_type=test_consts.TEXT_HTML)
class TestErrors(DCOrchApiTest):
def setUp(self):
super(TestErrors, self).setUp()
cfg.CONF.set_override('admin_tenant', 'fake_tenant_id', group='cache')
cfg.CONF.set_override("admin_tenant", "fake_tenant_id", group="cache")
def test_404(self):
self.url = '/assert_called_once'
self.url = "/assert_called_once"
self.method = self.app.get
response = self._send_request()
@@ -183,7 +184,7 @@ class TestErrors(DCOrchApiTest):
)
def test_version_1_root_controller(self):
self.url = f'/v1.0/{uuidutils.generate_uuid()}/bad_method'
self.url = f"/v1.0/{uuidutils.generate_uuid()}/bad_method"
self.method = self.app.patch
response = self._send_request()
@@ -197,7 +198,7 @@ class TestKeystoneAuth(DCOrchApiTest):
def setUp(self):
super(TestKeystoneAuth, self).setUp()
cfg.CONF.set_override('auth_strategy', 'keystone')
cfg.CONF.set_override("auth_strategy", "keystone")
self.method = self.app.get

View File

@@ -4,6 +4,6 @@
#
# Content-type
TEXT_HTML = 'text/html'
TEXT_PLAIN = 'text/plain'
APPLICATION_JSON = 'application/json'
TEXT_HTML = "text/html"
TEXT_PLAIN = "text/plain"
APPLICATION_JSON = "application/json"

View File

@@ -39,8 +39,7 @@ SUBCLOUD_NAME_REGION_ONE = "RegionOne"
class DBAPIOrchRequestTest(base.OrchestratorTestCase):
def setup_dummy_db(self):
options.cfg.set_defaults(options.database_opts,
sqlite_synchronous=False)
options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
options.set_defaults(cfg.CONF, connection="sqlite://")
engine = get_engine()
db_api.db_sync(engine)
@@ -53,27 +52,26 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
if table.name == "migrate_version":
continue
engine.execute(table.delete())
@staticmethod
def create_subcloud(ctxt, region_name, **kwargs):
values = {
'management_state': None,
'management_ip': '192.168.0.1'
}
values = {"management_state": None, "management_ip": "192.168.0.1"}
values.update(kwargs)
return db_api.subcloud_create(ctxt, region_name, values)
def create_default_subcloud(self, ctxt):
region_name = 'RegionOne'
software_version = '17.07'
availability_status = 'online'
region_name = "RegionOne"
software_version = "17.07"
availability_status = "online"
subcloud = self.create_subcloud(
ctxt, region_name,
ctxt,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
return subcloud
@@ -84,25 +82,19 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
return db_api.resource_create(ctxt, resource_type, values)
@staticmethod
def create_orch_job(ctxt, resource_id, endpoint_type,
operation_type, values=None):
def create_orch_job(ctxt, resource_id, endpoint_type, operation_type, values=None):
if values is None:
values = {}
orch_job = db_api.orch_job_create(ctxt,
resource_id,
endpoint_type,
operation_type,
values)
orch_job = db_api.orch_job_create(
ctxt, resource_id, endpoint_type, operation_type, values
)
return orch_job
@staticmethod
def create_subcloud_resource(ctxt, subcloud_id, resource_id, **kwargs):
values = {}
values.update(kwargs)
return db_api.subcloud_resource_create(ctxt,
subcloud_id,
resource_id,
values)
return db_api.subcloud_resource_create(ctxt, subcloud_id, resource_id, values)
def setUp(self):
super(DBAPIOrchRequestTest, self).setUp()
@@ -112,160 +104,135 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
self.ctx = utils.dummy_context()
def test_create_orch_job(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
orch_job = self.create_orch_job(self.ctx,
resource.id,
endpoint_type,
operation_type)
orch_job = self.create_orch_job(
self.ctx, resource.id, endpoint_type, operation_type
)
self.assertIsNotNone(orch_job)
self.assertEqual(dccommon_consts.ENDPOINT_TYPE_PLATFORM,
orch_job.endpoint_type)
self.assertEqual(dccommon_consts.ENDPOINT_TYPE_PLATFORM, orch_job.endpoint_type)
created_orch_jobs = db_api.orch_job_get_all(
self.ctx,
resource_id=resource.id)
self.assertEqual(resource.id,
created_orch_jobs[0].get('resource_id'))
created_orch_jobs = db_api.orch_job_get_all(self.ctx, resource_id=resource.id)
self.assertEqual(resource.id, created_orch_jobs[0].get("resource_id"))
def test_primary_key_subcloud(self):
self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
self.create_subcloud, self.ctx,
SUBCLOUD_NAME_REGION_ONE)
self.assertRaises(
oslo_db.exception.DBDuplicateEntry,
self.create_subcloud,
self.ctx,
SUBCLOUD_NAME_REGION_ONE,
)
def no_test_unique_key_orch_job_uuid(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
orch_job = self.create_orch_job(self.ctx,
resource.id,
endpoint_type,
operation_type)
orch_job = self.create_orch_job(
self.ctx, resource.id, endpoint_type, operation_type
)
self.assertIsNotNone(orch_job)
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
self.create_orch_job, self.ctx,
resource.id,
endpoint_type,
operation_type)
self.assertRaises(
oslo_db.exception.DBDuplicateEntry,
self.create_orch_job,
self.ctx,
resource.id,
endpoint_type,
operation_type,
)
def create_default_resource(self, resource_type):
resource = self.create_resource(self.ctx,
resource_type)
resource = self.create_resource(self.ctx, resource_type)
return resource
def create_default_orch_request(self, orch_job_id, target_region_name):
api_version = 1.0
values = {'api_version': api_version,
'target_region_name': target_region_name}
orch_request = db_api.orch_request_create(self.ctx,
orch_job_id,
target_region_name,
values)
values = {"api_version": api_version, "target_region_name": target_region_name}
orch_request = db_api.orch_request_create(
self.ctx, orch_job_id, target_region_name, values
)
return orch_request
def test_orch_request_update(self):
resource = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_default_resource(consts.RESOURCE_TYPE_SYSINV_USER)
target_region_name = "RegionOne"
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job = self.create_orch_job(self.ctx,
resource.id,
endpoint_type,
operation_type,
values)
orch_request = self.create_default_orch_request(orch_job.id,
target_region_name)
orch_job = self.create_orch_job(
self.ctx, resource.id, endpoint_type, operation_type, values
)
orch_request = self.create_default_orch_request(orch_job.id, target_region_name)
self.assertIsNotNone(orch_request)
orch_job_state = consts.ORCH_REQUEST_STATE_COMPLETED
try_count = 6
api_version = "1.0"
values = {'state': orch_job_state,
'try_count': try_count,
'api_version': api_version}
values = {
"state": orch_job_state,
"try_count": try_count,
"api_version": api_version,
}
db_api.orch_request_update(self.ctx,
orch_request.uuid,
values)
gorch_request = db_api.orch_request_get(self.ctx,
orch_request.uuid)
self.assertEqual(orch_job_state,
gorch_request.state)
self.assertEqual(try_count,
gorch_request.try_count)
self.assertEqual(api_version,
gorch_request.api_version)
self.assertEqual(target_region_name,
gorch_request.target_region_name)
db_api.orch_request_update(self.ctx, orch_request.uuid, values)
gorch_request = db_api.orch_request_get(self.ctx, orch_request.uuid)
self.assertEqual(orch_job_state, gorch_request.state)
self.assertEqual(try_count, gorch_request.try_count)
self.assertEqual(api_version, gorch_request.api_version)
self.assertEqual(target_region_name, gorch_request.target_region_name)
def test_orch_request_get_all(self):
resource = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_default_resource(consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job = self.create_orch_job(self.ctx,
resource.id,
endpoint_type,
operation_type,
values)
orch_job = self.create_orch_job(
self.ctx, resource.id, endpoint_type, operation_type, values
)
target_region_name = "RegionOne"
self.create_default_orch_request(orch_job.id,
target_region_name)
self.create_default_orch_request(orch_job.id, target_region_name)
target_region_name = "RegionTwo"
self.create_default_orch_request(orch_job.id,
target_region_name)
self.create_default_orch_request(orch_job.id, target_region_name)
orch_requests = db_api.orch_request_get_all(self.ctx)
self.assertEqual(2, len(orch_requests))
def test_orch_request_get_by_orch_job(self):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_USER)
resource_sysinv = self.create_default_resource(consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
resource_sysinv.id,
endpoint_type,
operation_type,
values)
orch_job_sysinv = self.create_orch_job(
self.ctx, resource_sysinv.id, endpoint_type, operation_type, values
)
target_region_name = "RegionOne"
self.create_default_orch_request(orch_job_sysinv.id,
target_region_name)
self.create_default_orch_request(orch_job_sysinv.id, target_region_name)
resource_flavor = self.create_default_resource(
consts.RESOURCE_TYPE_COMPUTE_FLAVOR)
consts.RESOURCE_TYPE_COMPUTE_FLAVOR
)
endpoint_type = consts.ENDPOINT_TYPE_COMPUTE
operation_type = consts.OPERATION_TYPE_POST
values = {}
orch_job_flavor = self.create_orch_job(self.ctx,
resource_flavor.id,
endpoint_type,
operation_type,
values)
self.create_default_orch_request(orch_job_flavor.id,
target_region_name)
orch_job_flavor = self.create_orch_job(
self.ctx, resource_flavor.id, endpoint_type, operation_type, values
)
self.create_default_orch_request(orch_job_flavor.id, target_region_name)
orch_requests_sysinv = db_api.orch_request_get_all(
self.ctx,
orch_job_id=orch_job_sysinv.id)
self.ctx, orch_job_id=orch_job_sysinv.id
)
orch_requests_flavor = db_api.orch_request_get_all(
self.ctx,
orch_job_id=orch_job_flavor.id)
self.ctx, orch_job_id=orch_job_flavor.id
)
self.assertEqual(1, len(orch_requests_sysinv))
self.assertEqual(1, len(orch_requests_flavor))
@@ -278,11 +245,9 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
if request.updated_at > orts:
orid = request.id
most_recent = \
db_api.orch_request_get_most_recent_failed_request(self.ctx)
most_recent = db_api.orch_request_get_most_recent_failed_request(self.ctx)
self.assertIsNotNone(most_recent)
self.assertEqual(orid,
most_recent.id)
self.assertEqual(orid, most_recent.id)
def test_orch_request_delete_previous_failed_requests(self):
orch_requests = self.create_some_orch_requests()
@@ -294,7 +259,8 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
expected_count = total_count - failed_count
db_api.orch_request_delete_previous_failed_requests(
self.ctx, timeutils.utcnow())
self.ctx, timeutils.utcnow()
)
orch_requests = db_api.orch_request_get_all(self.ctx)
self.assertEqual(expected_count, len(orch_requests))
@@ -304,16 +270,11 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
orch_request1 = self.create_default_sysinv_orch_job()
orch_request2 = self.create_default_sysinv_orch_job()
values = {'state': consts.ORCH_REQUEST_STATE_FAILED,
'try_count': 2}
values = {"state": consts.ORCH_REQUEST_STATE_FAILED, "try_count": 2}
db_api.orch_request_update(self.ctx,
orch_request1.uuid,
values)
db_api.orch_request_update(self.ctx, orch_request1.uuid, values)
db_api.orch_request_update(self.ctx,
orch_request2.uuid,
values)
db_api.orch_request_update(self.ctx, orch_request2.uuid, values)
orch_requests = db_api.orch_request_get_all(self.ctx)
return orch_requests
@@ -324,21 +285,18 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
return orch_requests
def create_default_sysinv_orch_job(self):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_USER)
resource_sysinv = self.create_default_resource(consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
resource_sysinv.id,
endpoint_type,
operation_type,
values)
orch_job_sysinv = self.create_orch_job(
self.ctx, resource_sysinv.id, endpoint_type, operation_type, values
)
target_region_name = "RegionOne"
orch_request = self.create_default_orch_request(
orch_job_sysinv.id,
target_region_name)
orch_job_sysinv.id, target_region_name
)
return orch_request
def test_orch_request_update_invalid(self):
@@ -348,25 +306,29 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
values = {}
test_uuid = uuidutils.generate_uuid()
if orch_request.uuid != test_uuid:
self.assertRaises(exceptions.OrchRequestNotFound,
db_api.orch_request_update,
self.ctx,
test_uuid,
values)
self.assertRaises(
exceptions.OrchRequestNotFound,
db_api.orch_request_update,
self.ctx,
test_uuid,
values,
)
def test_foreign_keys(self):
subcloud = self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
self.assertIsNotNone(subcloud)
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
subcloud_resource_uuid = uuidutils.generate_uuid()
shared_config_state = consts.SHARED_CONFIG_STATE_UNMANAGED
subcloud_resource_create = self.create_subcloud_resource(
self.ctx, subcloud.id, resource.id,
self.ctx,
subcloud.id,
resource.id,
shared_config_state=shared_config_state,
subcloud_resource_uuid=subcloud_resource_uuid)
subcloud_resource_uuid=subcloud_resource_uuid,
)
self.assertIsNotNone(subcloud_resource_create)
self.assertEqual(subcloud.id, subcloud_resource_create.subcloud_id)
@@ -377,57 +339,57 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
self.assertIsNotNone(orch_request)
db_api.orch_request_destroy(self.ctx, orch_request.uuid)
self.assertRaises(exceptions.OrchRequestNotFound,
db_api.orch_request_get,
self.ctx,
orch_request.uuid)
self.assertRaises(
exceptions.OrchRequestNotFound,
db_api.orch_request_get,
self.ctx,
orch_request.uuid,
)
def skip_test_orch_request_get_by_attrs(self):
resource_sysinv = self.create_default_resource(
consts.RESOURCE_TYPE_SYSINV_USER)
resource_sysinv = self.create_default_resource(consts.RESOURCE_TYPE_SYSINV_USER)
endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
operation_type = consts.OPERATION_TYPE_PATCH
values = {}
orch_job_sysinv = self.create_orch_job(self.ctx,
resource_sysinv.id,
endpoint_type,
operation_type,
values)
orch_job_sysinv = self.create_orch_job(
self.ctx, resource_sysinv.id, endpoint_type, operation_type, values
)
target_region_name = "RegionOne"
orch_request_sysinv_1 = self.create_default_orch_request(
orch_job_sysinv.id,
target_region_name)
orch_job_sysinv.id, target_region_name
)
db_api.orch_request_update(self.ctx,
orch_request_sysinv_1.uuid,
{'state':
consts.ORCH_REQUEST_STATE_COMPLETED})
db_api.orch_request_update(
self.ctx,
orch_request_sysinv_1.uuid,
{"state": consts.ORCH_REQUEST_STATE_COMPLETED},
)
orch_request_sysinv_2 = self.create_default_orch_request(
orch_job_sysinv.id,
target_region_name)
orch_job_sysinv.id, target_region_name
)
db_api.orch_request_update(self.ctx,
orch_request_sysinv_2.uuid,
{'state':
consts.ORCH_REQUEST_STATE_IN_PROGRESS})
db_api.orch_request_update(
self.ctx,
orch_request_sysinv_2.uuid,
{"state": consts.ORCH_REQUEST_STATE_IN_PROGRESS},
)
resource_flavor = self.create_default_resource(
consts.RESOURCE_TYPE_COMPUTE_FLAVOR)
consts.RESOURCE_TYPE_COMPUTE_FLAVOR
)
endpoint_type = consts.ENDPOINT_TYPE_COMPUTE
operation_type = consts.OPERATION_TYPE_POST
values = {}
orch_job_flavor = self.create_orch_job(self.ctx,
resource_flavor.id,
endpoint_type,
operation_type,
values)
orch_job_flavor = self.create_orch_job(
self.ctx, resource_flavor.id, endpoint_type, operation_type, values
)
orch_request_compute = self.create_default_orch_request(
orch_job_flavor.id,
target_region_name)
orch_job_flavor.id, target_region_name
)
attrs_endpoint_type = dccommon_consts.ENDPOINT_TYPE_PLATFORM
attrs_resource_type = consts.RESOURCE_TYPE_SYSINV_USER
@@ -436,7 +398,8 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
attrs_endpoint_type,
attrs_resource_type,
target_region_name=target_region_name,
states=[consts.ORCH_REQUEST_STATE_IN_PROGRESS])
states=[consts.ORCH_REQUEST_STATE_IN_PROGRESS],
)
self.assertEqual(1, len(orch_requests_attrs_1))
@@ -445,8 +408,11 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
attrs_endpoint_type,
attrs_resource_type,
target_region_name=target_region_name,
states=[consts.ORCH_REQUEST_STATE_IN_PROGRESS,
consts.ORCH_REQUEST_STATE_COMPLETED])
states=[
consts.ORCH_REQUEST_STATE_IN_PROGRESS,
consts.ORCH_REQUEST_STATE_COMPLETED,
],
)
self.assertEqual(2, len(orch_requests_attrs_2))
@@ -455,7 +421,8 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
consts.ENDPOINT_TYPE_COMPUTE,
consts.RESOURCE_TYPE_COMPUTE_FLAVOR,
target_region_name=target_region_name,
states=consts.ORCH_REQUEST_STATE_NONE)
states=consts.ORCH_REQUEST_STATE_NONE,
)
self.assertEqual(1, len(orch_requests_attrs))
self.assertEqual(orch_request_compute.id, orch_requests_attrs[0].id)
@@ -466,8 +433,7 @@ class DBAPIOrchRequestTest(base.OrchestratorTestCase):
soft_deleted_count = 0
delete_time = timeutils.utcnow() - datetime.timedelta(days=2)
values = {'deleted': 1,
'deleted_at': delete_time}
values = {"deleted": 1, "deleted_at": delete_time}
for request in orch_requests:
if request == consts.ORCH_REQUEST_STATE_FAILED:
db_api.orch_request_update(self.ctx, request.uuid, values)

View File

@@ -34,8 +34,7 @@ get_engine = api.get_engine
class DBAPISubcloudTest(base.OrchestratorTestCase):
def setup_dummy_db(self):
options.cfg.set_defaults(options.database_opts,
sqlite_synchronous=False)
options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
options.set_defaults(cfg.CONF, connection="sqlite://")
engine = get_engine()
db_api.db_sync(engine)
@@ -48,24 +47,26 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
if table.name == "migrate_version":
continue
engine.execute(table.delete())
@staticmethod
def create_subcloud(ctxt, region_name, **kwargs):
values = {'management_ip': '192.168.0.1'}
values = {"management_ip": "192.168.0.1"}
values.update(kwargs)
return db_api.subcloud_create(ctxt, region_name, values)
def create_default_subcloud(self, ctxt):
region_name = 'RegionOne'
software_version = '17.07'
region_name = "RegionOne"
software_version = "17.07"
availability_status = dccommon_consts.AVAILABILITY_ONLINE
subcloud = self.create_subcloud(
ctxt, region_name,
ctxt,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
return subcloud
@@ -87,53 +88,59 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
subcloud = self.create_default_subcloud(self.ctx)
availability_status_update = dccommon_consts.AVAILABILITY_OFFLINE
software_version_update = subcloud.software_version + '1'
values = {'availability_status': availability_status_update,
'software_version': software_version_update}
updated = db_api.subcloud_update(self.ctx, subcloud.region_name,
values)
software_version_update = subcloud.software_version + "1"
values = {
"availability_status": availability_status_update,
"software_version": software_version_update,
}
updated = db_api.subcloud_update(self.ctx, subcloud.region_name, values)
self.assertIsNotNone(updated)
updated_subcloud = db_api.subcloud_get(self.ctx, subcloud.region_name)
self.assertEqual(availability_status_update,
updated_subcloud.availability_status)
self.assertEqual(software_version_update,
updated_subcloud.software_version)
self.assertEqual(
availability_status_update, updated_subcloud.availability_status
)
self.assertEqual(software_version_update, updated_subcloud.software_version)
def test_delete_subcloud(self):
subcloud = self.create_default_subcloud(self.ctx)
db_api.subcloud_delete(self.ctx, subcloud.region_name)
self.assertRaises(exceptions.SubcloudNotFound,
db_api.subcloud_get,
self.ctx, subcloud.region_name)
self.assertRaises(
exceptions.SubcloudNotFound,
db_api.subcloud_get,
self.ctx,
subcloud.region_name,
)
def test_delete_all_subcloud(self):
region_names = ['RegionOne', 'RegionTwo']
software_version = '17.07'
region_names = ["RegionOne", "RegionTwo"]
software_version = "17.07"
availability_status = dccommon_consts.AVAILABILITY_ONLINE
for region_name in region_names:
subcloud = self.create_subcloud(
self.ctx, region_name,
self.ctx,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
db_api.subcloud_delete(self.ctx, region_name)
for region_name in region_names:
self.assertRaises(exceptions.SubcloudNotFound,
db_api.subcloud_get,
self.ctx, region_name)
self.assertRaises(
exceptions.SubcloudNotFound, db_api.subcloud_get, self.ctx, region_name
)
def test_subcloud_get_by_region_name(self):
subcloud = self.create_default_subcloud(self.ctx)
by_region_names = db_api.subcloud_get_all(
self.ctx,
region_name=subcloud.region_name)
self.ctx, region_name=subcloud.region_name
)
self.assertIsNotNone(by_region_names)
for by_region_name in by_region_names:
self.assertEqual(subcloud.region_name, by_region_name.region_name)
@@ -144,57 +151,67 @@ class DBAPISubcloudTest(base.OrchestratorTestCase):
by_statuses = db_api.subcloud_get_all(
self.ctx,
management_state=subcloud.management_state,
availability_status=subcloud.availability_status)
availability_status=subcloud.availability_status,
)
self.assertIsNotNone(by_statuses)
for by_status in by_statuses:
self.assertEqual(subcloud.management_state,
by_status.management_state)
self.assertEqual(subcloud.availability_status,
by_status.availability_status)
self.assertEqual(subcloud.management_state, by_status.management_state)
self.assertEqual(
subcloud.availability_status, by_status.availability_status
)
def test_subcloud_get_by_availability_status(self):
region_names = ['RegionOne', 'RegionTwo']
software_version = '17.07'
region_names = ["RegionOne", "RegionTwo"]
software_version = "17.07"
availability_status = dccommon_consts.AVAILABILITY_ONLINE
for region_name in region_names:
subcloud = self.create_subcloud(
self.ctx, region_name,
self.ctx,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
region_names = ['RegionThree', 'RegionFour']
software_version = '17.07'
region_names = ["RegionThree", "RegionFour"]
software_version = "17.07"
availability_status = dccommon_consts.AVAILABILITY_OFFLINE
for region_name in region_names:
subcloud = self.create_subcloud(
self.ctx, region_name,
self.ctx,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
by_statuses = db_api.subcloud_get_all(
self.ctx,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
self.ctx, availability_status=dccommon_consts.AVAILABILITY_ONLINE
)
self.assertIsNotNone(by_statuses)
for by_status in by_statuses:
self.assertEqual(dccommon_consts.AVAILABILITY_ONLINE,
by_status.availability_status)
self.assertEqual(
dccommon_consts.AVAILABILITY_ONLINE, by_status.availability_status
)
by_statuses = db_api.subcloud_get_all(
self.ctx,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
self.ctx, availability_status=dccommon_consts.AVAILABILITY_OFFLINE
)
self.assertIsNotNone(by_statuses)
for by_status in by_statuses:
self.assertEqual(dccommon_consts.AVAILABILITY_OFFLINE,
by_status.availability_status)
self.assertEqual(
dccommon_consts.AVAILABILITY_OFFLINE, by_status.availability_status
)
def test_subcloud_duplicate_region_names(self):
region_name = 'RegionOne'
region_name = "RegionOne"
subcloud = self.create_subcloud(self.ctx, region_name)
self.assertRaises(db_exc.DBDuplicateEntry,
self.create_subcloud,
self.ctx, subcloud.region_name)
self.assertRaises(
db_exc.DBDuplicateEntry,
self.create_subcloud,
self.ctx,
subcloud.region_name,
)

View File

@@ -37,8 +37,7 @@ SUBCLOUD_NAME_REGION_ONE = "RegionOne"
class DBAPISubcloudResourceTest(base.OrchestratorTestCase):
def setup_dummy_db(self):
options.cfg.set_defaults(options.database_opts,
sqlite_synchronous=False)
options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
options.set_defaults(cfg.CONF, connection="sqlite://")
engine = get_engine()
db_api.db_sync(engine)
@@ -51,27 +50,26 @@ class DBAPISubcloudResourceTest(base.OrchestratorTestCase):
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
if table.name == "migrate_version":
continue
engine.execute(table.delete())
@staticmethod
def create_subcloud(ctxt, region_name, **kwargs):
values = {
'management_state': None,
'management_ip': '192.168.0.1'
}
values = {"management_state": None, "management_ip": "192.168.0.1"}
values.update(kwargs)
return db_api.subcloud_create(ctxt, region_name, values)
def create_default_subcloud(self, ctxt):
region_name = 'RegionOne'
software_version = '17.07'
availability_status = 'online'
region_name = "RegionOne"
software_version = "17.07"
availability_status = "online"
subcloud = self.create_subcloud(
ctxt, region_name,
ctxt,
region_name,
software_version=software_version,
availability_status=availability_status)
availability_status=availability_status,
)
self.assertIsNotNone(subcloud)
return subcloud
@@ -85,10 +83,7 @@ class DBAPISubcloudResourceTest(base.OrchestratorTestCase):
def create_subcloud_resource(ctxt, subcloud_id, resource_id, **kwargs):
values = {}
values.update(kwargs)
return db_api.subcloud_resource_create(ctxt,
subcloud_id,
resource_id,
values)
return db_api.subcloud_resource_create(ctxt, subcloud_id, resource_id, values)
def setUp(self):
super(DBAPISubcloudResourceTest, self).setUp()
@@ -98,116 +93,125 @@ class DBAPISubcloudResourceTest(base.OrchestratorTestCase):
self.ctx = utils.dummy_context()
def test_create_resource(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
self.assertEqual(consts.RESOURCE_TYPE_SYSINV_USER,
resource.resource_type)
self.assertEqual(consts.RESOURCE_TYPE_SYSINV_USER, resource.resource_type)
created_resource = db_api.resource_get_all(
self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
self.assertEqual(consts.RESOURCE_TYPE_SYSINV_USER,
created_resource[0].get('resource_type'))
self.ctx, consts.RESOURCE_TYPE_SYSINV_USER
)
self.assertEqual(
consts.RESOURCE_TYPE_SYSINV_USER, created_resource[0].get("resource_type")
)
def test_primary_key_subcloud(self):
self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
self.create_subcloud, self.ctx,
SUBCLOUD_NAME_REGION_ONE)
self.assertRaises(
oslo_db.exception.DBDuplicateEntry,
self.create_subcloud,
self.ctx,
SUBCLOUD_NAME_REGION_ONE,
)
def test_unique_key_resource_uuid(self):
created_resource = self.create_resource(
self.ctx, consts.RESOURCE_TYPE_SYSINV_USER
)
self.assertRaises(
oslo_db.exception.DBDuplicateEntry,
self.create_resource,
self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
self.assertRaises(oslo_db.exception.DBDuplicateEntry,
self.create_resource, self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER,
uuid=created_resource.uuid)
consts.RESOURCE_TYPE_SYSINV_USER,
uuid=created_resource.uuid,
)
def skip_test_resource_update(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
master_id = resource.master_id
values = {'master_id': master_id}
db_api.resource_update(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER,
values)
values = {"master_id": master_id}
db_api.resource_update(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER, values)
gresource = db_api.resource_get_by_id(self.ctx, resource.id)
self.assertEqual(master_id,
gresource.get('master_id'))
self.assertEqual(master_id, gresource.get("master_id"))
def test_resource_get_all(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
query = db_api.resource_get_all(self.ctx)
self.assertEqual(query[0].get('resource_type'), resource.resource_type)
self.assertEqual(query[0].get("resource_type"), resource.resource_type)
def skip_test_update_invalid_resource(self):
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
# master_uuid = uuidutils.generate_uuid()
master_id = resource.master_id
values = {'master_id': master_id}
self.assertRaises(exceptions.ResourceNotFound,
db_api.resource_update,
self.ctx,
'fake_resource_type',
master_id,
values)
values = {"master_id": master_id}
self.assertRaises(
exceptions.ResourceNotFound,
db_api.resource_update,
self.ctx,
"fake_resource_type",
master_id,
values,
)
def test_subcloud_resource_create(self):
subcloud = self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
subcloud_resource_uuid = uuidutils.generate_uuid()
shared_config_state = consts.SHARED_CONFIG_STATE_UNMANAGED
subcloud_resource_create = self.create_subcloud_resource(
self.ctx, subcloud.id, resource.id,
self.ctx,
subcloud.id,
resource.id,
shared_config_state=shared_config_state,
subcloud_resource_uuid=subcloud_resource_uuid)
subcloud_resource_uuid=subcloud_resource_uuid,
)
self.assertIsNotNone(subcloud_resource_create)
self.assertEqual(consts.SHARED_CONFIG_STATE_UNMANAGED,
subcloud_resource_create.shared_config_state)
self.assertEqual(
consts.SHARED_CONFIG_STATE_UNMANAGED,
subcloud_resource_create.shared_config_state,
)
def test_subcloud_resource_update(self):
subcloud = self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
subcloud_resource_uuid = uuidutils.generate_uuid()
shared_config_state = consts.SHARED_CONFIG_STATE_UNMANAGED
subcloud_resource_create = self.create_subcloud_resource(
self.ctx, subcloud.id, resource.id,
self.ctx,
subcloud.id,
resource.id,
shared_config_state=shared_config_state,
subcloud_resource_uuid=subcloud_resource_uuid)
subcloud_resource_uuid=subcloud_resource_uuid,
)
self.assertIsNotNone(subcloud_resource_create)
values = {'shared_config_state': consts.SHARED_CONFIG_STATE_MANAGED}
db_api.subcloud_resource_update(
self.ctx, subcloud_resource_create.id,
values)
values = {"shared_config_state": consts.SHARED_CONFIG_STATE_MANAGED}
db_api.subcloud_resource_update(self.ctx, subcloud_resource_create.id, values)
subcloud_resources = db_api.subcloud_resources_get_by_resource(
self.ctx,
resource.uuid)
self.assertEqual(consts.SHARED_CONFIG_STATE_MANAGED,
subcloud_resources[0].get('shared_config_state'))
self.ctx, resource.uuid
)
self.assertEqual(
consts.SHARED_CONFIG_STATE_MANAGED,
subcloud_resources[0].get("shared_config_state"),
)
def test_foreign_keys(self):
subcloud = self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
self.assertIsNotNone(subcloud)
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
subcloud_resource_uuid = uuidutils.generate_uuid()
shared_config_state = consts.SHARED_CONFIG_STATE_UNMANAGED
subcloud_resource_create = self.create_subcloud_resource(
self.ctx, subcloud.id, resource.id,
self.ctx,
subcloud.id,
resource.id,
shared_config_state=shared_config_state,
subcloud_resource_uuid=subcloud_resource_uuid)
subcloud_resource_uuid=subcloud_resource_uuid,
)
self.assertIsNotNone(subcloud_resource_create)
self.assertEqual(subcloud.id, subcloud_resource_create.subcloud_id)
@@ -216,16 +220,18 @@ class DBAPISubcloudResourceTest(base.OrchestratorTestCase):
def test_delete_subcloud_resource(self):
subcloud = self.create_subcloud(self.ctx, SUBCLOUD_NAME_REGION_ONE)
self.assertIsNotNone(subcloud)
resource = self.create_resource(self.ctx,
consts.RESOURCE_TYPE_SYSINV_USER)
resource = self.create_resource(self.ctx, consts.RESOURCE_TYPE_SYSINV_USER)
self.assertIsNotNone(resource)
subcloud_resource_uuid = uuidutils.generate_uuid()
shared_config_state = consts.SHARED_CONFIG_STATE_UNMANAGED
subcloud_resource = self.create_subcloud_resource(
self.ctx, subcloud.id, resource.id,
self.ctx,
subcloud.id,
resource.id,
shared_config_state=shared_config_state,
subcloud_resource_uuid=subcloud_resource_uuid)
subcloud_resource_uuid=subcloud_resource_uuid,
)
db_api.subcloud_resource_delete(self.ctx, subcloud_resource.uuid)
subcloud_resources = db_api.subcloud_resources_get_all(self.ctx)

View File

@@ -112,10 +112,11 @@ class PostResourceMixin(BaseMixin):
self._resource_add().assert_called_once()
self._assert_log(
'info', f"Created Keystone {self._get_resource_name()} "
"info",
f"Created Keystone {self._get_resource_name()} "
f"{self._get_rsrc().id}:"
f"{self._get_resource_ref().get(self._get_resource_name()).get('id')} "
f"[{self._get_resource_ref_name()}]"
f"[{self._get_resource_ref_name()}]",
)
def test_post_fails_without_source_resource_id(self):
@@ -125,8 +126,9 @@ class PostResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"Received {self._get_resource_name()} create request "
"without required 'source_resource_id' field"
"error",
f"Received {self._get_resource_name()} create request "
"without required 'source_resource_id' field",
)
def test_post_fails_with_dbsync_unauthorized_exception(self):
@@ -146,9 +148,10 @@ class PostResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"No {self._get_resource_name()} data returned when creating "
"error",
f"No {self._get_resource_name()} data returned when creating "
f"{self._get_resource_name()} "
f"{self._get_request().orch_job.source_resource_id} in subcloud."
f"{self._get_request().orch_job.source_resource_id} in subcloud.",
)
def test_post_fails_without_resource_records(self):
@@ -158,10 +161,11 @@ class PostResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', "No data retrieved from master cloud for "
"error",
"No data retrieved from master cloud for "
f"{self._get_resource_name()} "
f"{self._get_request().orch_job.source_resource_id} to create its "
"equivalent in subcloud."
"equivalent in subcloud.",
)
@@ -178,9 +182,10 @@ class PutResourceMixin(BaseMixin):
self._resource_detail().assert_called_once()
self._resource_update().assert_called_once()
self._assert_log(
'info', f"Updated Keystone {self._get_resource_name()} {self.rsrc.id}:"
"info",
f"Updated Keystone {self._get_resource_name()} {self.rsrc.id}:"
f"{self._get_resource_ref().get(self._get_resource_name()).get('id')} "
f"[{self._get_resource_ref_name()}]"
f"[{self._get_resource_ref_name()}]",
)
def test_put_fails_without_source_resource_id(self):
@@ -190,21 +195,24 @@ class PutResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"Received {self._get_resource_name()} update request "
"without required source resource id"
"error",
f"Received {self._get_resource_name()} update request "
"without required source resource id",
)
def test_put_fails_without_id_in_resource_info(self):
"""Test put fails without id in resource info"""
print(f"{{{self._get_resource_name()}: {{}}}}")
self._get_request().orch_job.resource_info = \
self._get_request().orch_job.resource_info = (
f'{{"{self._get_resource_name()}": {{}}}}'
)
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"Received {self._get_resource_name()} update request "
"without required subcloud resource id"
"error",
f"Received {self._get_resource_name()} update request "
"without required subcloud resource id",
)
def test_put_fails_with_dbsync_unauthorized_exception(self):
@@ -224,10 +232,11 @@ class PutResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', "No data retrieved from master cloud for "
"error",
"No data retrieved from master cloud for "
f"{self._get_resource_name()} "
f"{self._get_request().orch_job.source_resource_id} "
"to update its equivalent in subcloud."
"to update its equivalent in subcloud.",
)
def test_put_fails_without_resource_ref(self):
@@ -237,10 +246,11 @@ class PutResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"No {self._get_resource_name()} data returned when updating "
"error",
f"No {self._get_resource_name()} data returned when updating "
f"{self._get_resource_name()} "
f"{self._get_resource_ref().get(self._get_resource_name()).get('id')} "
"in subcloud."
"in subcloud.",
)
@@ -258,8 +268,9 @@ class PatchResourceMixin(BaseMixin):
self._resource_keystone_update().assert_called_once()
self._assert_log(
'info', f"Updated Keystone {self._get_resource_name()}: "
f"{self._get_rsrc().id}:{mock_update.id}"
"info",
f"Updated Keystone {self._get_resource_name()}: "
f"{self._get_rsrc().id}:{mock_update.id}",
)
def test_patch_fails_with_empty_resource_update_dict(self):
@@ -269,8 +280,9 @@ class PatchResourceMixin(BaseMixin):
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
'error', f"Received {self._get_resource_name()} update request "
"without any update fields"
"error",
f"Received {self._get_resource_name()} update request "
"without any update fields",
)
def test_patch_fails_without_resource_subcloud_rsrc(self):
@@ -280,17 +292,19 @@ class PatchResourceMixin(BaseMixin):
the resource subcloud rsrc is not found
"""
loaded_resource_info = \
jsonutils.loads(self._get_request().orch_job.resource_info)
loaded_resource_info = jsonutils.loads(
self._get_request().orch_job.resource_info
)
self._get_rsrc().id = 9999
self._execute()
self._assert_log(
'error', f"Unable to update {self._get_resource_name()} reference "
"error",
f"Unable to update {self._get_resource_name()} reference "
f"{self.rsrc}:{loaded_resource_info[self._get_resource_name()]}, cannot "
f"find equivalent Keystone {self._get_resource_name()} in subcloud."
f"find equivalent Keystone {self._get_resource_name()} in subcloud.",
)
def test_patch_fails_with_resource_ref_id_not_equal_resource_id(self):
@@ -303,9 +317,10 @@ class PatchResourceMixin(BaseMixin):
self._execute()
self._assert_log(
'error', f"Unable to update Keystone {self._get_resource_name()} "
"error",
f"Unable to update Keystone {self._get_resource_name()} "
f"{self._get_rsrc().id}:"
f"{self._get_subcloud_resource().subcloud_resource_id} for subcloud"
f"{self._get_subcloud_resource().subcloud_resource_id} for subcloud",
)
@@ -319,9 +334,10 @@ class DeleteResourceMixin(BaseMixin):
self._resource_keystone_delete().assert_called_once()
self._assert_log(
'info', f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
"info",
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
f"{self._get_subcloud_resource().id} "
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted"
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
)
def test_delete_succeeds_with_keystone_not_found_exception(self):
@@ -332,20 +348,23 @@ class DeleteResourceMixin(BaseMixin):
self._execute()
self._resource_keystone_delete().assert_called_once()
self._get_log().assert_has_calls([
mock.call.info(
f"Delete {self._get_resource_name()}: {self._get_resource_name()} "
f"{self._get_subcloud_resource().subcloud_resource_id} "
f"not found in {self._get_subcloud().region_name}, "
"considered as deleted.", extra=mock.ANY
),
mock.call.info(
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
f"{self._get_subcloud_resource().id} "
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
extra=mock.ANY
)],
any_order=False
self._get_log().assert_has_calls(
[
mock.call.info(
f"Delete {self._get_resource_name()}: {self._get_resource_name()} "
f"{self._get_subcloud_resource().subcloud_resource_id} "
f"not found in {self._get_subcloud().region_name}, "
"considered as deleted.",
extra=mock.ANY,
),
mock.call.info(
f"Keystone {self._get_resource_name()} {self._get_rsrc().id}:"
f"{self._get_subcloud_resource().id} "
f"[{self._get_subcloud_resource().subcloud_resource_id}] deleted",
extra=mock.ANY,
),
],
any_order=False,
)
def test_delete_fails_without_resource_subcloud_rsrc(self):
@@ -360,7 +379,8 @@ class DeleteResourceMixin(BaseMixin):
self._execute()
self._assert_log(
'error', f"Unable to delete {self._get_resource_name()} reference "
"error",
f"Unable to delete {self._get_resource_name()} reference "
f"{self._get_rsrc()}, cannot find equivalent Keystone "
f"{self._get_resource_name()} in subcloud."
f"{self._get_resource_name()} in subcloud.",
)

View File

@@ -301,8 +301,9 @@ class TestIdentitySyncThreadProjectsPost(
super().setUp()
self.method = self.identity_sync_thread.post_projects
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
self.resource_add = self.sc_dbs_client.project_manager.add_project
self.resource_add = (
self.identity_sync_thread.get_sc_dbs_client().project_manager.add_project
)
class TestIdentitySyncThreadProjectsPut(
@@ -314,8 +315,9 @@ class TestIdentitySyncThreadProjectsPut(
super().setUp()
self.method = self.identity_sync_thread.put_projects
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
self.resource_update = self.sc_dbs_client.project_manager.update_project
self.resource_update = (
self.identity_sync_thread.get_sc_dbs_client().project_manager.update_project
)
class TestIdentitySyncThreadProjectsPatch(
@@ -358,8 +360,9 @@ class BaseTestIdentitySyncThreadRoles(BaseTestIdentitySyncThread):
self.resource_name: {"id": RESOURCE_ID, "name": "fake value"}
}
self.resource_ref_name = self.resource_ref.get(self.resource_name).get("name")
self.dbs_client = self.identity_sync_thread.get_master_dbs_client()
self.resource_detail = self.dbs_client.role_manager.role_detail
self.resource_detail = (
self.identity_sync_thread.get_master_dbs_client().role_manager.role_detail
)
class TestIdentitySyncThreadRolesPost(
@@ -371,8 +374,9 @@ class TestIdentitySyncThreadRolesPost(
super().setUp()
self.method = self.identity_sync_thread.post_roles
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
self.resource_add = self.sc_dbs_client.role_manager.add_role
self.resource_add = (
self.identity_sync_thread.get_sc_dbs_client().role_manager.add_role
)
class TestIdentitySyncThreadRolesPut(
@@ -384,8 +388,9 @@ class TestIdentitySyncThreadRolesPut(
super().setUp()
self.method = self.identity_sync_thread.put_roles
self.sc_dbs_client = self.identity_sync_thread.get_sc_dbs_client()
self.resource_update = self.sc_dbs_client.role_manager.update_role
self.resource_update = (
self.identity_sync_thread.get_sc_dbs_client().role_manager.update_role
)
class TestIdentitySyncThreadRolesPatch(
@@ -443,16 +448,15 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
self.rsrc.master_id = self.resource_tags
self.mock_sc_role = self._create_mock_object(self.role_id)
self.identity_sync_thread.get_sc_ks_client().roles.list.return_value = [
self.mock_sc_role
]
self.identity_sync_thread.get_sc_ks_client().projects.list.return_value = [
self.sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
self.sc_ks_client.roles.list.return_value = [self.mock_sc_role]
self.sc_ks_client.projects.list.return_value = [
self._create_mock_object(self.project_id)
]
self.identity_sync_thread.get_sc_ks_client().domains.list.return_value = [
self.sc_ks_client.domains.list.return_value = [
self._create_mock_object(self.project_id)
]
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = [
self.sc_ks_client.users.list.return_value = [
self._create_mock_object(self.actor_id)
]
@@ -475,8 +479,8 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
def test_post_succeeds_with_sc_group(self):
"""Test post succeeds with sc group"""
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = []
self.identity_sync_thread.get_sc_ks_client().groups.list.return_value = [
self.sc_ks_client.users.list.return_value = []
self.sc_ks_client.groups.list.return_value = [
self._create_mock_object(self.actor_id)
]
@@ -502,7 +506,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
def test_post_fails_without_sc_role(self):
"""Test post fails without sc role"""
self.identity_sync_thread.get_sc_ks_client().roles.list.return_value = []
self.sc_ks_client.roles.list.return_value = []
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
@@ -515,7 +519,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
def test_post_fails_without_sc_proj(self):
"""Test post fails without sc proj"""
self.identity_sync_thread.get_sc_ks_client().projects.list.return_value = []
self.sc_ks_client.projects.list.return_value = []
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
@@ -528,7 +532,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
def test_post_fails_wihtout_sc_user_and_sc_group(self):
"""Test post fails without sc user and sc group"""
self.identity_sync_thread.get_sc_ks_client().users.list.return_value = []
self.sc_ks_client.users.list.return_value = []
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
@@ -541,8 +545,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsPost(
def test_post_fails_without_role_ref(self):
"""Test post fails without role ref"""
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
sc_ks_client.role_assignments.list.return_value = []
self.sc_ks_client.role_assignments.list.return_value = []
self._execute_and_assert_exception(exceptions.SyncRequestFailed)
self._assert_log(
@@ -586,14 +589,14 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
self.method = self.identity_sync_thread.delete_project_role_assignments
self.sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
self.subcloud_resource.subcloud_resource_id = self.resource_tags
self.subcloud_resource.save()
def test_delete_succeeds(self):
"""Test delete succeeds"""
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
sc_ks_client.role_assignments.list.return_value = []
self.sc_ks_client.role_assignments.list.return_value = []
self._execute()
@@ -634,12 +637,11 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
def test_delete_for_user_succeeds_with_keystone_not_found_exception(self):
"""Test delete fails for user with keystone not found exception"""
sc_ks_client = self.identity_sync_thread.get_sc_ks_client()
sc_ks_client.roles.revoke.side_effect = [
self.sc_ks_client.roles.revoke.side_effect = [
keystone_exceptions.NotFound,
None,
]
sc_ks_client.role_assignments.list.return_value = []
self.sc_ks_client.role_assignments.list.return_value = []
self._execute()
@@ -664,9 +666,7 @@ class TestIdentitySyncThreadProjectRoleAssignmentsDelete(
def test_delete_for_group_succeeds_with_keystone_not_found_exception(self):
"""Test delete fails for group with keystone not found exception"""
self.identity_sync_thread.get_sc_ks_client().roles.revoke.side_effect = (
keystone_exceptions.NotFound
)
self.sc_ks_client.roles.revoke.side_effect = keystone_exceptions.NotFound
self._execute()

View File

@@ -33,7 +33,7 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
super(TestGenericSyncManager, self).setUp()
# Mock the DCorch engine-worker API client
p = mock.patch('dcorch.rpc.client.EngineWorkerClient')
p = mock.patch("dcorch.rpc.client.EngineWorkerClient")
self.mock_dcorch_api = p.start()
self.addCleanup(p.stop)
@@ -48,9 +48,11 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
chunks = list()
chunk_num = -1
for i in range(1, 23):
region_name = 'subcloud' + str(i)
subcloud_sync_identity = \
(region_name, dccommon_consts.ENDPOINT_TYPE_IDENTITY)
region_name = "subcloud" + str(i)
subcloud_sync_identity = (
region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
)
subcloud_sync_list.append(subcloud_sync_identity)
if (i - 1) % CONF.workers == 0:
chunk_num += 1
@@ -60,12 +62,11 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
gsm = generic_sync_manager.GenericSyncManager()
rpc_method = mock.MagicMock()
rpc_method.__name__ = 'mock_rpc_method'
rpc_method.__name__ = "mock_rpc_method"
gsm._process_subclouds(rpc_method, subcloud_sync_list)
# Verify the number of chunks
self.assertEqual(math.ceil(len(subcloud_sync_list) / CONF.workers),
len(chunks))
self.assertEqual(math.ceil(len(subcloud_sync_list) / CONF.workers), len(chunks))
# Verify rpc call for each chunk of subclouds
for chunk in chunks:
rpc_method.assert_any_call(mock.ANY, chunk)
@@ -74,169 +75,206 @@ class TestGenericSyncManager(base.OrchestratorTestCase):
# Create subcloud1 not eligible for sync due to initial_sync_state
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud1',
name="subcloud1",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
sync_request=consts.SYNC_STATUS_REQUESTED)
sync_request=consts.SYNC_STATUS_REQUESTED,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud1',
name="subcloud1",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
sync_request=consts.SYNC_STATUS_REQUESTED)
sync_request=consts.SYNC_STATUS_REQUESTED,
)
# Create subcloud2 not eligible for sync due to sync_request
utils.create_subcloud_static(
self.ctx,
name='subcloud2',
name="subcloud2",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud2',
name="subcloud2",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
sync_request=consts.SYNC_STATUS_IN_PROGRESS)
sync_request=consts.SYNC_STATUS_IN_PROGRESS,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud2',
name="subcloud2",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
sync_request=consts.SYNC_STATUS_IN_PROGRESS)
sync_request=consts.SYNC_STATUS_IN_PROGRESS,
)
# Create 22 eligible subclouds
subcloud_sync_list = []
for i in range(3, 25):
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
management_ip='10.10.10.' + str(i))
management_ip="10.10.10." + str(i),
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
sync_request='requested')
subcloud_sync_list.append((subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
subcloud.management_ip))
sync_request="requested",
)
subcloud_sync_list.append(
(
subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
subcloud.management_ip,
)
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
sync_request='requested')
subcloud_sync_list.append((subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
subcloud.management_ip))
sync_request="requested",
)
subcloud_sync_list.append(
(
subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
subcloud.management_ip,
)
)
gsm = generic_sync_manager.GenericSyncManager()
gsm._process_subclouds = mock.MagicMock()
gsm.sync_subclouds()
gsm._process_subclouds.assert_called_once_with(
self.mock_dcorch_api().sync_subclouds, subcloud_sync_list)
self.mock_dcorch_api().sync_subclouds, subcloud_sync_list
)
# Verify the sync_request of the subclouds were updated to in-progress
for i in range(3, 25):
subcloud_sync_identity = db_api.subcloud_sync_get(
self.ctx,
'subcloud' + str(i),
dccommon_consts.ENDPOINT_TYPE_IDENTITY)
self.assertEqual(consts.SYNC_STATUS_IN_PROGRESS,
subcloud_sync_identity.sync_request)
self.ctx, "subcloud" + str(i), dccommon_consts.ENDPOINT_TYPE_IDENTITY
)
self.assertEqual(
consts.SYNC_STATUS_IN_PROGRESS, subcloud_sync_identity.sync_request
)
subcloud_sync_platform = db_api.subcloud_sync_get(
self.ctx,
'subcloud' + str(i),
dccommon_consts.ENDPOINT_TYPE_PLATFORM)
self.assertEqual(consts.SYNC_STATUS_IN_PROGRESS,
subcloud_sync_platform.sync_request)
self.ctx, "subcloud" + str(i), dccommon_consts.ENDPOINT_TYPE_PLATFORM
)
self.assertEqual(
consts.SYNC_STATUS_IN_PROGRESS, subcloud_sync_platform.sync_request
)
def test_run_sync_audit(self):
# Create subcloud1 not eligible for audit due to initial_sync_state
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud1',
name="subcloud1",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
audit_status=consts.AUDIT_STATUS_NONE)
audit_status=consts.AUDIT_STATUS_NONE,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud1',
name="subcloud1",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
audit_status=consts.AUDIT_STATUS_NONE)
audit_status=consts.AUDIT_STATUS_NONE,
)
# Create subcloud2 not eligible for audit due to management_state
utils.create_subcloud_static(
self.ctx,
name='subcloud2',
name="subcloud2",
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud2',
name="subcloud2",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
audit_status=consts.AUDIT_STATUS_FAILED)
audit_status=consts.AUDIT_STATUS_FAILED,
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud2',
name="subcloud2",
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
audit_status=consts.AUDIT_STATUS_FAILED)
audit_status=consts.AUDIT_STATUS_FAILED,
)
# Create 22 eligible subclouds
subcloud_sync_list = []
for i in range(3, 25):
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
management_ip='10.10.10.' + str(i))
last_audit_time = timeutils.utcnow() - \
timedelta(seconds=generic_sync_manager.AUDIT_INTERVAL)
management_ip="10.10.10." + str(i),
)
last_audit_time = timeutils.utcnow() - timedelta(
seconds=generic_sync_manager.AUDIT_INTERVAL
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_IDENTITY,
audit_status=consts.AUDIT_STATUS_COMPLETED,
last_audit_time=last_audit_time)
subcloud_sync_list.append((subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
subcloud.management_ip))
last_audit_time=last_audit_time,
)
subcloud_sync_list.append(
(
subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
subcloud.management_ip,
)
)
utils.create_subcloud_sync_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
endpoint_type=dccommon_consts.ENDPOINT_TYPE_PLATFORM,
audit_status=consts.AUDIT_STATUS_COMPLETED,
last_audit_time=last_audit_time)
subcloud_sync_list.append((subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
subcloud.management_ip))
last_audit_time=last_audit_time,
)
subcloud_sync_list.append(
(
subcloud.region_name,
dccommon_consts.ENDPOINT_TYPE_PLATFORM,
subcloud.management_ip,
)
)
gsm = generic_sync_manager.GenericSyncManager()
gsm._process_subclouds = mock.MagicMock()
gsm.run_sync_audit()
gsm._process_subclouds.assert_called_once_with(
self.mock_dcorch_api().run_sync_audit, subcloud_sync_list)
self.mock_dcorch_api().run_sync_audit, subcloud_sync_list
)
# Verify the audit_status of the subclouds were updated to in-progress
for i in range(3, 25):
subcloud_sync_identity = db_api.subcloud_sync_get(
self.ctx,
'subcloud' + str(i),
dccommon_consts.ENDPOINT_TYPE_IDENTITY)
self.assertEqual(consts.AUDIT_STATUS_IN_PROGRESS,
subcloud_sync_identity.audit_status)
self.ctx, "subcloud" + str(i), dccommon_consts.ENDPOINT_TYPE_IDENTITY
)
self.assertEqual(
consts.AUDIT_STATUS_IN_PROGRESS, subcloud_sync_identity.audit_status
)
subcloud_sync_platform = db_api.subcloud_sync_get(
self.ctx,
'subcloud' + str(i),
dccommon_consts.ENDPOINT_TYPE_PLATFORM)
self.assertEqual(consts.AUDIT_STATUS_IN_PROGRESS,
subcloud_sync_platform.audit_status)
self.ctx, "subcloud" + str(i), dccommon_consts.ENDPOINT_TYPE_PLATFORM
)
self.assertEqual(
consts.AUDIT_STATUS_IN_PROGRESS, subcloud_sync_platform.audit_status
)

View File

@@ -16,10 +16,10 @@ from dcorch.tests import base
from dcorch.tests import utils
SUBCLOUD_SYNC_LIST = [
('subcloud1', dccommon_consts.ENDPOINT_TYPE_IDENTITY, '192.168.1.11'),
('subcloud1', dccommon_consts.ENDPOINT_TYPE_PLATFORM, '192.168.1.11'),
('subcloud2', dccommon_consts.ENDPOINT_TYPE_IDENTITY, '192.168.1.12'),
('subcloud2', dccommon_consts.ENDPOINT_TYPE_PLATFORM, '192.168.1.12')
("subcloud1", dccommon_consts.ENDPOINT_TYPE_IDENTITY, "192.168.1.11"),
("subcloud1", dccommon_consts.ENDPOINT_TYPE_PLATFORM, "192.168.1.11"),
("subcloud2", dccommon_consts.ENDPOINT_TYPE_IDENTITY, "192.168.1.12"),
("subcloud2", dccommon_consts.ENDPOINT_TYPE_PLATFORM, "192.168.1.12"),
]
@@ -27,28 +27,28 @@ class TestGenericSyncWorkerManager(base.OrchestratorTestCase):
def setUp(self):
super(TestGenericSyncWorkerManager, self).setUp()
self.engine_id = uuidutils.generate_uuid()
self.gswm = generic_sync_worker_manager.GenericSyncWorkerManager(
self.engine_id)
self.gswm = generic_sync_worker_manager.GenericSyncWorkerManager(self.engine_id)
# Mock sync_object_class_map
p = mock.patch.object(generic_sync_worker_manager,
'sync_object_class_map',
{dccommon_consts.ENDPOINT_TYPE_PLATFORM:
mock.MagicMock(),
dccommon_consts.ENDPOINT_TYPE_IDENTITY:
mock.MagicMock(),
dccommon_consts.ENDPOINT_TYPE_IDENTITY_OS:
mock.MagicMock()})
p = mock.patch.object(
generic_sync_worker_manager,
"sync_object_class_map",
{
dccommon_consts.ENDPOINT_TYPE_PLATFORM: mock.MagicMock(),
dccommon_consts.ENDPOINT_TYPE_IDENTITY: mock.MagicMock(),
dccommon_consts.ENDPOINT_TYPE_IDENTITY_OS: mock.MagicMock(),
},
)
self.mock_sync_object_class_map = p.start()
self.addCleanup(mock.patch.stopall)
# Mock thread
p = mock.patch.object(threadgroup, 'Thread')
p = mock.patch.object(threadgroup, "Thread")
self.mock_thread = p.start()
self.addCleanup(p.stop)
# Mock ThreadGroupManager start
p = mock.patch('dcorch.engine.scheduler.ThreadGroupManager.start')
p = mock.patch("dcorch.engine.scheduler.ThreadGroupManager.start")
self.mock_thread_start = p.start()
self.mock_thread_start.return_value = self.mock_thread
self.addCleanup(p.stop)
@@ -58,7 +58,8 @@ class TestGenericSyncWorkerManager(base.OrchestratorTestCase):
def test_create_sync_objects(self):
sync_objs = self.gswm.create_sync_objects(
'subcloud1', base.CAPABILITES, '192.168.1.11')
"subcloud1", base.CAPABILITES, "192.168.1.11"
)
# Verify both endpoint types have corresponding sync object
self.assertEqual(len(sync_objs), 2)
@@ -68,39 +69,44 @@ class TestGenericSyncWorkerManager(base.OrchestratorTestCase):
def test_update_subcloud_state(self):
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
)
# Update all states
self.gswm.update_subcloud_state(
self.ctx,
'subcloud1',
"subcloud1",
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
)
# Compare all states (match)
match = self.gswm.subcloud_state_matches(
'subcloud1',
"subcloud1",
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
)
self.assertTrue(match)
# Update one state
self.gswm.update_subcloud_state(
self.ctx,
'subcloud1',
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
"subcloud1",
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
)
# Compare all states (match)
match = self.gswm.subcloud_state_matches(
'subcloud1',
"subcloud1",
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED)
initial_sync_state=consts.INITIAL_SYNC_STATE_COMPLETED,
)
self.assertTrue(match)
def test_sync_subclouds(self):
@@ -111,11 +117,8 @@ class TestGenericSyncWorkerManager(base.OrchestratorTestCase):
# Verify 4 threads started, one for each endpoint_type of a subcloud
for subcloud_name, endpoint_type, ip in SUBCLOUD_SYNC_LIST:
self.mock_thread_start.assert_any_call(
self.gswm._sync_subcloud,
mock.ANY,
subcloud_name,
endpoint_type,
ip)
self.gswm._sync_subcloud, mock.ANY, subcloud_name, endpoint_type, ip
)
def test_run_sync_audit(self):
self.gswm._audit_subcloud = mock.MagicMock()
@@ -129,39 +132,44 @@ class TestGenericSyncWorkerManager(base.OrchestratorTestCase):
mock.ANY,
subcloud_name,
endpoint_type,
mock.ANY)
mock.ANY,
)
def test_sync_request(self):
subcloud1 = utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE)
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE,
)
utils.create_subcloud_sync_static(
self.ctx,
subcloud1.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
subcloud_id=subcloud1.id)
subcloud_id=subcloud1.id,
)
subcloud2 = utils.create_subcloud_static(
self.ctx,
name='subcloud2',
name="subcloud2",
management_state=dccommon_consts.MANAGEMENT_MANAGED,
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
)
utils.create_subcloud_sync_static(
self.ctx,
subcloud2.region_name,
dccommon_consts.ENDPOINT_TYPE_IDENTITY,
vsubcloud_id=subcloud2.id)
vsubcloud_id=subcloud2.id,
)
self.gswm.sync_request(self.ctx, dccommon_consts.ENDPOINT_TYPE_IDENTITY)
# Verify the sync_request of the subclouds were updated to requested
subcloud_sync = db_api.subcloud_sync_get(
self.ctx, 'subcloud1', dccommon_consts.ENDPOINT_TYPE_IDENTITY)
self.assertEqual(consts.SYNC_STATUS_REQUESTED,
subcloud_sync.sync_request)
self.ctx, "subcloud1", dccommon_consts.ENDPOINT_TYPE_IDENTITY
)
self.assertEqual(consts.SYNC_STATUS_REQUESTED, subcloud_sync.sync_request)
subcloud_sync = db_api.subcloud_sync_get(
self.ctx, 'subcloud2', dccommon_consts.ENDPOINT_TYPE_IDENTITY)
self.assertEqual(consts.SYNC_STATUS_REQUESTED,
subcloud_sync.sync_request)
self.ctx, "subcloud2", dccommon_consts.ENDPOINT_TYPE_IDENTITY
)
self.assertEqual(consts.SYNC_STATUS_REQUESTED, subcloud_sync.sync_request)

View File

@@ -32,7 +32,7 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
super(TestInitialSyncManager, self).setUp()
# Mock the DCorch engine-worker API client
mock_patch = mock.patch.object(client, 'EngineWorkerClient')
mock_patch = mock.patch.object(client, "EngineWorkerClient")
self.mock_rpc_client = mock_patch.start()
self.addCleanup(mock_patch.stop)
@@ -43,24 +43,29 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
def test_init_actions(self):
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE)
name="subcloud1",
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE,
)
utils.create_subcloud_static(
self.ctx,
name='subcloud2',
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS)
name="subcloud2",
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS,
)
utils.create_subcloud_static(
self.ctx,
name='subcloud3',
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
name="subcloud3",
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
)
utils.create_subcloud_static(
self.ctx,
name='subcloud4',
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED)
name="subcloud4",
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
)
utils.create_subcloud_static(
self.ctx,
name='subcloud5',
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS)
name="subcloud5",
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS,
)
ism = initial_sync_manager.InitialSyncManager()
@@ -68,28 +73,32 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
ism.init_actions()
# Verify the subclouds are in the correct initial sync state
subcloud = db_api.subcloud_get(self.ctx, 'subcloud1')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_NONE)
subcloud = db_api.subcloud_get(self.ctx, 'subcloud2')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_REQUESTED)
subcloud = db_api.subcloud_get(self.ctx, 'subcloud3')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_REQUESTED)
subcloud = db_api.subcloud_get(self.ctx, 'subcloud4')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_REQUESTED)
subcloud = db_api.subcloud_get(self.ctx, 'subcloud5')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_REQUESTED)
subcloud = db_api.subcloud_get(self.ctx, "subcloud1")
self.assertEqual(subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_NONE)
subcloud = db_api.subcloud_get(self.ctx, "subcloud2")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_REQUESTED
)
subcloud = db_api.subcloud_get(self.ctx, "subcloud3")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_REQUESTED
)
subcloud = db_api.subcloud_get(self.ctx, "subcloud4")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_REQUESTED
)
subcloud = db_api.subcloud_get(self.ctx, "subcloud5")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_REQUESTED
)
def test_initial_sync_subclouds(self):
# Create subcloud1 not eligible for initial sync due to initial_sync_state
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS)
name="subcloud1",
initial_sync_state=consts.INITIAL_SYNC_STATE_IN_PROGRESS,
)
chunks = list()
chunk_num = -1
# Create 21 eligible subclouds
@@ -99,11 +108,14 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
chunks.insert(chunk_num, dict())
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud' + str(i),
name="subcloud" + str(i),
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
management_ip='192.168.1.' + str(i))
chunks[chunk_num][subcloud.region_name] = \
(base.CAPABILITES, subcloud.management_ip)
management_ip="192.168.1." + str(i),
)
chunks[chunk_num][subcloud.region_name] = (
base.CAPABILITES,
subcloud.management_ip,
)
ism = initial_sync_manager.InitialSyncManager()
@@ -115,4 +127,5 @@ class TestInitialSyncManager(base.OrchestratorTestCase):
# Verify a thread started for each chunk of subclouds
for chunk in chunks:
self.mock_rpc_client().initial_sync_subclouds.assert_any_call(
mock.ANY, chunk)
mock.ANY, chunk
)

View File

@@ -30,34 +30,41 @@ class FakeGSWM(object):
def update_subcloud_state(self, ctx, subcloud_name, initial_sync_state):
db_api.subcloud_update(
ctx,
subcloud_name,
values={'initial_sync_state': initial_sync_state})
ctx, subcloud_name, values={"initial_sync_state": initial_sync_state}
)
def create_sync_objects(self, subcloud_name, capabilities, management_ip):
sync_objs = {}
endpoint_type_list = capabilities.get('endpoint_types', None)
endpoint_type_list = capabilities.get("endpoint_types", None)
if endpoint_type_list:
for endpoint_type in endpoint_type_list:
sync_obj = FakeSyncObject()
sync_objs.update({endpoint_type: sync_obj})
return sync_objs
def subcloud_state_matches(self, subcloud_name,
management_state=None,
availability_status=None,
initial_sync_state=None):
def subcloud_state_matches(
self,
subcloud_name,
management_state=None,
availability_status=None,
initial_sync_state=None,
):
# compare subcloud states
match = True
sc = db_api.subcloud_get(self.ctx, subcloud_name)
if management_state is not None and \
sc.management_state != management_state:
if management_state is not None and sc.management_state != management_state:
match = False
if match and availability_status is not None and \
sc.availability_status != availability_status:
if (
match
and availability_status is not None
and sc.availability_status != availability_status
):
match = False
if match and initial_sync_state is not None and \
sc.initial_sync_state != initial_sync_state:
if (
match
and initial_sync_state is not None
and sc.initial_sync_state != initial_sync_state
):
match = False
return match
@@ -68,31 +75,33 @@ class TestInitialSyncWorkerManager(base.OrchestratorTestCase):
self.engine_id = uuidutils.generate_uuid()
self.fake_gswm = FakeGSWM(self.ctx, self.engine_id)
self.iswm = initial_sync_worker_manager.InitialSyncWorkerManager(
self.fake_gswm, self.engine_id)
self.fake_gswm, self.engine_id
)
# Mock eventlet
p = mock.patch('eventlet.greenthread.spawn_after')
p = mock.patch("eventlet.greenthread.spawn_after")
self.mock_eventlet_spawn_after = p.start()
self.addCleanup(p.stop)
# Mock FernetKeyManager distribute_Keys
p = mock.patch(
'dcorch.engine.fernet_key_manager.FernetKeyManager.distribute_keys')
"dcorch.engine.fernet_key_manager.FernetKeyManager.distribute_keys"
)
self.mock_distribute_keys = p.start()
self.addCleanup(p.stop)
# Mock db_api subcloud_sync_update
p = mock.patch('dcorch.db.api.subcloud_sync_update')
p = mock.patch("dcorch.db.api.subcloud_sync_update")
self.mock_subcloud_sync_update = p.start()
self.addCleanup(p.stop)
# Mock thread
p = mock.patch.object(threadgroup, 'Thread')
p = mock.patch.object(threadgroup, "Thread")
self.mock_thread = p.start()
self.addCleanup(p.stop)
# Mock ThreadGroupManager start
p = mock.patch('dcorch.engine.scheduler.ThreadGroupManager.start')
p = mock.patch("dcorch.engine.scheduler.ThreadGroupManager.start")
self.mock_thread_start = p.start()
self.mock_thread_start.return_value = self.mock_thread
self.addCleanup(p.stop)
@@ -103,16 +112,16 @@ class TestInitialSyncWorkerManager(base.OrchestratorTestCase):
def test_initial_sync_subcloud(self):
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
management_ip="192.168.1.11")
management_ip="192.168.1.11",
)
self.assertIsNotNone(subcloud)
# Initial sync the subcloud
self.iswm._initial_sync_subcloud(self.ctx,
subcloud.region_name,
base.CAPABILITES,
subcloud.management_ip)
self.iswm._initial_sync_subcloud(
self.ctx, subcloud.region_name, base.CAPABILITES, subcloud.management_ip
)
self.mock_distribute_keys.assert_called_once()
@@ -120,25 +129,26 @@ class TestInitialSyncWorkerManager(base.OrchestratorTestCase):
self.assertEqual(2, self.mock_subcloud_sync_update.call_count)
# Verify the initial sync was completed
subcloud = db_api.subcloud_get(self.ctx, 'subcloud1')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_COMPLETED)
subcloud = db_api.subcloud_get(self.ctx, "subcloud1")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_COMPLETED
)
def test_initial_sync_subcloud_not_required(self):
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud1',
initial_sync_state='',
management_ip='192.168.1.11')
name="subcloud1",
initial_sync_state="",
management_ip="192.168.1.11",
)
self.assertIsNotNone(subcloud)
self.iswm.initial_sync = mock.MagicMock()
# Initial sync the subcloud
self.iswm._initial_sync_subcloud(self.ctx,
subcloud.region_name,
base.CAPABILITES,
subcloud.management_ip)
self.iswm._initial_sync_subcloud(
self.ctx, subcloud.region_name, base.CAPABILITES, subcloud.management_ip
)
# Verify that the initial sync steps were not done
self.iswm.initial_sync.assert_not_called()
@@ -146,91 +156,99 @@ class TestInitialSyncWorkerManager(base.OrchestratorTestCase):
self.mock_subcloud_sync_update.assert_not_called()
# Verify the initial sync state was not changed
subcloud = db_api.subcloud_get(self.ctx, 'subcloud1')
self.assertEqual(subcloud.initial_sync_state, '')
subcloud = db_api.subcloud_get(self.ctx, "subcloud1")
self.assertEqual(subcloud.initial_sync_state, "")
def test_initial_sync_subcloud_failed(self):
subcloud = utils.create_subcloud_static(
self.ctx,
name='subcloud1',
name="subcloud1",
initial_sync_state=consts.INITIAL_SYNC_STATE_REQUESTED,
management_ip='192.168.1.11')
management_ip="192.168.1.11",
)
self.assertIsNotNone(subcloud)
self.iswm.enable_subcloud = mock.MagicMock()
# Force a failure
self.mock_distribute_keys.side_effect = Exception('fake_exception')
self.mock_distribute_keys.side_effect = Exception("fake_exception")
# Initial sync the subcloud
self.iswm._initial_sync_subcloud(self.ctx,
subcloud.region_name,
base.CAPABILITES,
subcloud.management_ip)
self.iswm._initial_sync_subcloud(
self.ctx, subcloud.region_name, base.CAPABILITES, subcloud.management_ip
)
# Verify the initial sync was failed
subcloud = db_api.subcloud_get(self.ctx, 'subcloud1')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_FAILED)
subcloud = db_api.subcloud_get(self.ctx, "subcloud1")
self.assertEqual(subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_FAILED)
# Verify that the subcloud was not enabled
self.iswm.enable_subcloud.assert_not_called()
# Verify the initial sync was retried
self.mock_eventlet_spawn_after.assert_called_with(
initial_sync_worker_manager.SYNC_FAIL_HOLD_OFF, mock.ANY, 'subcloud1')
initial_sync_worker_manager.SYNC_FAIL_HOLD_OFF, mock.ANY, "subcloud1"
)
def test_reattempt_sync(self):
utils.create_subcloud_static(
self.ctx,
name='subcloud1',
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE)
name="subcloud1",
initial_sync_state=consts.INITIAL_SYNC_STATE_NONE,
)
utils.create_subcloud_static(
self.ctx,
name='subcloud2',
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED)
name="subcloud2",
initial_sync_state=consts.INITIAL_SYNC_STATE_FAILED,
)
# Reattempt sync success
self.iswm._reattempt_sync('subcloud2')
self.iswm._reattempt_sync("subcloud2")
# Verify the subcloud is in the correct initial sync state
subcloud = db_api.subcloud_get(self.ctx, 'subcloud2')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_REQUESTED)
subcloud = db_api.subcloud_get(self.ctx, "subcloud2")
self.assertEqual(
subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_REQUESTED
)
# Reattempt sync when not needed
self.iswm._reattempt_sync('subcloud1')
self.iswm._reattempt_sync("subcloud1")
# Verify the subcloud is in the correct initial sync state
subcloud = db_api.subcloud_get(self.ctx, 'subcloud1')
self.assertEqual(subcloud.initial_sync_state,
consts.INITIAL_SYNC_STATE_NONE)
subcloud = db_api.subcloud_get(self.ctx, "subcloud1")
self.assertEqual(subcloud.initial_sync_state, consts.INITIAL_SYNC_STATE_NONE)
def test_initial_sync_subclouds(self):
subcloud1 = utils.create_subcloud_static(
self.ctx,
name='subcloud1',
initial_sync_state='',
management_ip='192.168.1.11')
name="subcloud1",
initial_sync_state="",
management_ip="192.168.1.11",
)
subcloud2 = utils.create_subcloud_static(
self.ctx,
name='subcloud2',
initial_sync_state='',
management_ip='192.168.1.12')
name="subcloud2",
initial_sync_state="",
management_ip="192.168.1.12",
)
subcloud_capabilities = {
subcloud1.region_name: (base.CAPABILITES, subcloud1.management_ip),
subcloud2.region_name: (base.CAPABILITES, subcloud2.management_ip)
subcloud2.region_name: (base.CAPABILITES, subcloud2.management_ip),
}
self.iswm.initial_sync_subclouds(self.ctx, subcloud_capabilities)
# Verify 2 threads started, one for each of the subcloud
self.mock_thread_start.assert_any_call(self.iswm._initial_sync_subcloud,
mock.ANY,
subcloud1.region_name,
base.CAPABILITES,
subcloud1.management_ip)
self.mock_thread_start.assert_called_with(self.iswm._initial_sync_subcloud,
mock.ANY,
subcloud2.region_name,
base.CAPABILITES,
subcloud2.management_ip)
self.mock_thread_start.assert_any_call(
self.iswm._initial_sync_subcloud,
mock.ANY,
subcloud1.region_name,
base.CAPABILITES,
subcloud1.management_ip,
)
self.mock_thread_start.assert_called_with(
self.iswm._initial_sync_subcloud,
mock.ANY,
subcloud2.region_name,
base.CAPABILITES,
subcloud2.management_ip,
)

View File

@@ -44,13 +44,13 @@ class UUIDStub(object):
uuid.uuid4 = self.uuid4
UUIDs = (UUID1, UUID2, UUID3, UUID4, UUID5) = sorted([str(uuid.uuid4())
for x in range(5)])
UUIDs = (UUID1, UUID2, UUID3, UUID4, UUID5) = sorted(
[str(uuid.uuid4()) for x in range(5)]
)
def random_name():
return ''.join(random.choice(string.ascii_uppercase)
for x in range(10))
return "".join(random.choice(string.ascii_uppercase) for x in range(10))
def setup_dummy_db():
@@ -67,30 +67,31 @@ def reset_dummy_db():
meta.reflect(bind=engine)
for table in reversed(meta.sorted_tables):
if table.name == 'migrate_version':
if table.name == "migrate_version":
continue
engine.execute(table.delete())
def create_quota_limit(ctxt, **kwargs):
values = {
'project_id': UUID1,
'resource': "ram",
'limit': 10,
"project_id": UUID1,
"resource": "ram",
"limit": 10,
}
values.update(kwargs)
return db_api.quota_create(ctxt, **values)
def dummy_context(user='test_username', tenant='test_project_id',
region_name=None):
return context.RequestContext.from_dict({
'auth_token': 'abcd1234',
'user': user,
'project': tenant,
'is_admin': True,
'region_name': region_name
})
def dummy_context(user="test_username", tenant="test_project_id", region_name=None):
return context.RequestContext.from_dict(
{
"auth_token": "abcd1234",
"user": user,
"project": tenant,
"is_admin": True,
"region_name": region_name,
}
)
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
@@ -101,12 +102,12 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
def create_subcloud_static(ctxt, name, **kwargs):
values = {
'software_version': '10.04',
'management_state': dccommon_consts.MANAGEMENT_MANAGED,
'availability_status': dccommon_consts.AVAILABILITY_ONLINE,
'initial_sync_state': '',
'capabilities': base.CAPABILITES,
'management_ip': '192.168.0.1'
"software_version": "10.04",
"management_state": dccommon_consts.MANAGEMENT_MANAGED,
"availability_status": dccommon_consts.AVAILABILITY_ONLINE,
"initial_sync_state": "",
"capabilities": base.CAPABILITES,
"management_ip": "192.168.0.1",
}
values.update(kwargs)
return db_api.subcloud_create(ctxt, name, values=values)
@@ -114,10 +115,10 @@ def create_subcloud_static(ctxt, name, **kwargs):
def create_subcloud_sync_static(ctxt, name, endpoint_type, **kwargs):
values = {
'subcloud_name': name,
'endpoint_type': endpoint_type,
'subcloud_id': '',
'sync_request': ''
"subcloud_name": name,
"endpoint_type": endpoint_type,
"subcloud_id": "",
"sync_request": "",
}
values.update(kwargs)
return db_api.subcloud_sync_create(ctxt, name, endpoint_type, values=values)

View File

@@ -1,3 +1,4 @@
# Copyright (c) 2018-2022, 2024 Wind River Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,4 +14,4 @@
import pbr.version
version_info = pbr.version.VersionInfo('distributedcloud')
version_info = pbr.version.VersionInfo("distributedcloud")

View File

@@ -7,10 +7,6 @@ import sys
modules = [
"dccommon",
"dcdbsync",
"dcorch/api",
"dcorch/common",
"dcorch/db",
"dcorch/engine",
"dcorch",
"dcmanager/api",
"dcmanager/audit",
@@ -25,10 +21,7 @@ modules = [
formatted_modules = [
"dccommon",
"dcdbsync",
"dcorch/api",
"dcorch/common",
"dcorch/db",
"dcorch/engine",
"dcorch",
]