Apply black formatter to dcmanager/tests
This commit applies the Black format to the `dcmanager/tests` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50444 Change-Id: I59c9526f4754e6ff3dd34283b57c11f1ffc311dd Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
a714f57a37
commit
2967ee254c
@ -63,9 +63,9 @@ class PeerGroupAuditManager(manager.Manager):
|
||||
# Get peer site system peer
|
||||
dc_peer_system_peer = dc_client.get_system_peer(
|
||||
utils.get_local_system().uuid)
|
||||
association = dc_client. \
|
||||
get_peer_group_association_with_peer_id_and_pg_id(
|
||||
dc_peer_system_peer.get('id'), peer_group_id)
|
||||
association = dc_client.get_peer_group_association_with_peer_id_and_pg_id(
|
||||
dc_peer_system_peer.get("id"), peer_group_id
|
||||
)
|
||||
return association.get("sync-status")
|
||||
except Exception:
|
||||
LOG.exception(f"Failed to get subclouds of peer group "
|
||||
|
@ -111,7 +111,7 @@ class PeerMonitor(object):
|
||||
self.peer.peer_name)
|
||||
return failed, dc_peer_subcloud_peer_group_list
|
||||
|
||||
def _update_sync_status_when_secondary_site_becomes_unreachable(self):
|
||||
def _update_sync_status_secondary_site_becomes_unreachable(self):
|
||||
# Get associations by system peer
|
||||
associations = SystemPeerManager.get_local_associations(self.context,
|
||||
self.peer)
|
||||
@ -139,7 +139,7 @@ class PeerMonitor(object):
|
||||
sync_status=sync_status,
|
||||
sync_message=message)
|
||||
|
||||
def _update_sync_status_when_secondary_site_becomes_reachable(self):
|
||||
def _update_sync_status_secondary_site_becomes_reachable(self):
|
||||
# Get associations by system peer
|
||||
associations = SystemPeerManager.get_local_associations(self.context,
|
||||
self.peer)
|
||||
@ -189,7 +189,7 @@ class PeerMonitor(object):
|
||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE
|
||||
)
|
||||
# pylint: disable=line-too-long
|
||||
self._update_sync_status_when_secondary_site_becomes_unreachable() # noqa: E501
|
||||
self._update_sync_status_secondary_site_becomes_unreachable()
|
||||
failure_count = 0
|
||||
self._set_require_audit_flag_to_associated_peer_groups()
|
||||
else:
|
||||
@ -203,7 +203,7 @@ class PeerMonitor(object):
|
||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_AVAILABLE
|
||||
)
|
||||
# pylint: disable=line-too-long
|
||||
self._update_sync_status_when_secondary_site_becomes_reachable() # noqa: E501
|
||||
self._update_sync_status_secondary_site_becomes_reachable()
|
||||
LOG.info("DC %s back online, clear alarm" %
|
||||
self.peer.peer_name)
|
||||
self._clear_failure()
|
||||
|
@ -96,10 +96,11 @@ class SystemPeerManager(manager.Manager):
|
||||
dc_peer_system_peer = dc_client.get_system_peer(
|
||||
utils.get_local_system().uuid)
|
||||
# Get peer site group association
|
||||
dc_peer_association = dc_client.\
|
||||
get_peer_group_association_with_peer_id_and_pg_id(
|
||||
dc_peer_system_peer.get('id'),
|
||||
remote_pg.get('id'))
|
||||
dc_peer_association = (
|
||||
dc_client.get_peer_group_association_with_peer_id_and_pg_id(
|
||||
dc_peer_system_peer.get("id"), remote_pg.get("id")
|
||||
)
|
||||
)
|
||||
|
||||
# Update peer site association sync_status only if the
|
||||
# sync_status is different from the current sync_status
|
||||
@ -669,7 +670,8 @@ class SystemPeerManager(manager.Manager):
|
||||
"""Get non-primary Association from peer site."""
|
||||
try:
|
||||
return dc_client.get_peer_group_association_with_peer_id_and_pg_id(
|
||||
dc_peer_system_peer_id, dc_peer_pg_id)
|
||||
dc_peer_system_peer_id, dc_peer_pg_id
|
||||
)
|
||||
except dccommon_exceptions.PeerGroupAssociationNotFound:
|
||||
LOG.error(f"Peer Group association does not exist on peer site."
|
||||
f"Peer Group ID: {dc_peer_pg_id}, Peer System Peer ID: "
|
||||
|
@ -48,48 +48,55 @@ get_engine = api.get_engine
|
||||
# Enable foreign key support in sqlite - see:
|
||||
# http://docs.sqlalchemy.org/en/latest/dialects/sqlite.html
|
||||
|
||||
SUBCLOUD_1 = {'name': 'subcloud1',
|
||||
'region_name': '2ec93dfb654846909efe61d1b39dd2ce',
|
||||
'rehomed': True,
|
||||
'software_version': "22.12"}
|
||||
SUBCLOUD_2 = {'name': 'subcloud2',
|
||||
'region_name': 'ca2761ee7aa34cbe8415ec9a3c86854f',
|
||||
'rehomed': True,
|
||||
'software_version': "22.12"}
|
||||
SUBCLOUD_3 = {'name': 'subcloud3',
|
||||
'region_name': '659e12e5f7ad411abfcd83f5cedca0bf',
|
||||
'rehomed': True,
|
||||
'software_version': "21.12"}
|
||||
SUBCLOUD_4 = {'name': 'subcloud4',
|
||||
'region_name': 'c25f3b0553384104b664789bd93a2ba8',
|
||||
'rehomed': False,
|
||||
'software_version': "21.12"}
|
||||
SUBCLOUD_5 = {'name': 'subcloud5',
|
||||
'region_name': '809581dc2d154e008480bac1f43b7aff',
|
||||
'rehomed': False,
|
||||
'software_version': "21.12"}
|
||||
SUBCLOUD_6 = {'name': 'subcloud6',
|
||||
'region_name': '8c60b99f3e1245b7bc5a049802ade8d2',
|
||||
'rehomed': False,
|
||||
'software_version': "22.12"}
|
||||
SUBCLOUD_7 = {'name': 'subcloud7',
|
||||
'region_name': '9fde6dca22fa422bb1e8cf03bedc18e4'}
|
||||
SUBCLOUD_8 = {'name': 'subcloud8',
|
||||
'region_name': 'f3cb0b109c4543fda3ed50ed5783279d'}
|
||||
SUBCLOUD_9 = {'name': 'subcloud9',
|
||||
'region_name': '1cfab1df7b444bb3bd562894d684f352'}
|
||||
SUBCLOUD_10 = {'name': 'subcloud10',
|
||||
'region_name': '6d0040199b4f4a9fb4a1f2ed4d498159'}
|
||||
SUBCLOUD_11 = {'name': 'subcloud11',
|
||||
'region_name': '169e6fc231e94959ad6ff0a66fbcb753'}
|
||||
SUBCLOUD_1 = {
|
||||
"name": "subcloud1",
|
||||
"region_name": "2ec93dfb654846909efe61d1b39dd2ce",
|
||||
"rehomed": True,
|
||||
"software_version": "22.12",
|
||||
}
|
||||
SUBCLOUD_2 = {
|
||||
"name": "subcloud2",
|
||||
"region_name": "ca2761ee7aa34cbe8415ec9a3c86854f",
|
||||
"rehomed": True,
|
||||
"software_version": "22.12",
|
||||
}
|
||||
SUBCLOUD_3 = {
|
||||
"name": "subcloud3",
|
||||
"region_name": "659e12e5f7ad411abfcd83f5cedca0bf",
|
||||
"rehomed": True,
|
||||
"software_version": "21.12",
|
||||
}
|
||||
SUBCLOUD_4 = {
|
||||
"name": "subcloud4",
|
||||
"region_name": "c25f3b0553384104b664789bd93a2ba8",
|
||||
"rehomed": False,
|
||||
"software_version": "21.12",
|
||||
}
|
||||
SUBCLOUD_5 = {
|
||||
"name": "subcloud5",
|
||||
"region_name": "809581dc2d154e008480bac1f43b7aff",
|
||||
"rehomed": False,
|
||||
"software_version": "21.12",
|
||||
}
|
||||
SUBCLOUD_6 = {
|
||||
"name": "subcloud6",
|
||||
"region_name": "8c60b99f3e1245b7bc5a049802ade8d2",
|
||||
"rehomed": False,
|
||||
"software_version": "22.12",
|
||||
}
|
||||
SUBCLOUD_7 = {"name": "subcloud7", "region_name": "9fde6dca22fa422bb1e8cf03bedc18e4"}
|
||||
SUBCLOUD_8 = {"name": "subcloud8", "region_name": "f3cb0b109c4543fda3ed50ed5783279d"}
|
||||
SUBCLOUD_9 = {"name": "subcloud9", "region_name": "1cfab1df7b444bb3bd562894d684f352"}
|
||||
SUBCLOUD_10 = {"name": "subcloud10", "region_name": "6d0040199b4f4a9fb4a1f2ed4d498159"}
|
||||
SUBCLOUD_11 = {"name": "subcloud11", "region_name": "169e6fc231e94959ad6ff0a66fbcb753"}
|
||||
|
||||
SUBCLOUD_SAMPLE_DATA_0 = [
|
||||
6, # id
|
||||
"subcloud-4", # name
|
||||
"demo subcloud", # description
|
||||
"demo subcloud", # description
|
||||
"Ottawa-Lab-Aisle_3-Rack_C", # location
|
||||
"12.34", # software-version
|
||||
"managed", # management-state
|
||||
"managed", # management-state
|
||||
"online", # availability-status
|
||||
"fd01:3::0/64", # management_subnet
|
||||
"fd01:3::1", # management_gateway_address
|
||||
@ -101,7 +108,7 @@ SUBCLOUD_SAMPLE_DATA_0 = [
|
||||
"NULL", # reserved-2
|
||||
"2018-05-15 14:45:12.508708", # created-at
|
||||
"2018-05-24 10:48:18.090931", # updated-at
|
||||
"NULL", # deleted-at
|
||||
"NULL", # deleted-at
|
||||
0, # deleted
|
||||
"10.10.10.0/24", # external_oam_subnet
|
||||
"10.10.10.1", # external_oam_gateway_address
|
||||
@ -110,8 +117,8 @@ SUBCLOUD_SAMPLE_DATA_0 = [
|
||||
1, # group_id
|
||||
consts.DEPLOY_STATE_DONE, # deploy_status
|
||||
consts.ERROR_DESC_EMPTY, # error_description
|
||||
SUBCLOUD_4['region_name'], # region_name
|
||||
json.dumps({'data_install': 'test data install values'}), # data_install
|
||||
SUBCLOUD_4["region_name"], # region_name
|
||||
json.dumps({"data_install": "test data install values"}), # data_install
|
||||
]
|
||||
|
||||
|
||||
@ -134,8 +141,7 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
"""Test case base class for all unit tests."""
|
||||
|
||||
def setup_dummy_db(self):
|
||||
options.cfg.set_defaults(options.database_opts,
|
||||
sqlite_synchronous=False)
|
||||
options.cfg.set_defaults(options.database_opts, sqlite_synchronous=False)
|
||||
options.set_defaults(cfg.CONF, connection="sqlite://")
|
||||
engine = get_engine()
|
||||
db_api.db_sync(engine)
|
||||
@ -147,7 +153,7 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
meta.reflect(bind=engine)
|
||||
|
||||
for table in reversed(meta.sorted_tables):
|
||||
if table.name == 'migrate_version':
|
||||
if table.name == "migrate_version":
|
||||
continue
|
||||
engine.execute(table.delete())
|
||||
|
||||
@ -171,7 +177,7 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
def _mock_pecan(self):
|
||||
"""Mock pecan's abort"""
|
||||
|
||||
mock_patch_object = mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
mock_patch_object = mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
self.mock_pecan_abort = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
@ -185,103 +191,103 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
def _mock_audit_rpc_client(self):
|
||||
"""Mock rpc's manager audit client"""
|
||||
|
||||
mock_patch_object = mock.patch.object(rpcapi, 'ManagerAuditClient')
|
||||
mock_patch_object = mock.patch.object(rpcapi, "ManagerAuditClient")
|
||||
self.mock_audit_rpc_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_rpc_client(self):
|
||||
"""Mock rpc's manager client"""
|
||||
|
||||
mock_patch_object = mock.patch.object(rpc_client, 'ManagerClient')
|
||||
mock_patch_object = mock.patch.object(rpc_client, "ManagerClient")
|
||||
self.mock_rpc_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_rpc_subcloud_state_client(self):
|
||||
"""Mock rpc's subcloud state client"""
|
||||
|
||||
mock_patch_object = mock.patch.object(rpc_client, 'SubcloudStateClient')
|
||||
mock_patch_object = mock.patch.object(rpc_client, "SubcloudStateClient")
|
||||
self.mock_rpc_subcloud_state_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_rpc_api_manager_audit_worker_client(self):
|
||||
"""Mock rpc's api manager audit worker client"""
|
||||
|
||||
mock_patch_object = mock.patch.object(rpcapi, 'ManagerAuditWorkerClient')
|
||||
mock_patch_object = mock.patch.object(rpcapi, "ManagerAuditWorkerClient")
|
||||
self.mock_rpc_api_manager_audit_worker_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_subcloud_audit_manager_context(self):
|
||||
"""Mock subcloud audit manager's context"""
|
||||
|
||||
mock_patch_object = mock.patch.object(subcloud_audit_manager, 'context')
|
||||
mock_patch_object = mock.patch.object(subcloud_audit_manager, "context")
|
||||
self.mock_subcloud_audit_manager_context = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_openstack_driver(self, target):
|
||||
"""Mock the target's OpenStackDriver"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'OpenStackDriver')
|
||||
mock_patch_object = mock.patch.object(target, "OpenStackDriver")
|
||||
self.mock_openstack_driver = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_sysinv_client(self, target):
|
||||
"""Mock the target's SysinvClient"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'SysinvClient')
|
||||
mock_patch_object = mock.patch.object(target, "SysinvClient")
|
||||
self.mock_sysinv_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_software_client(self, target):
|
||||
"""Mock the target's SoftwareClient"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'SoftwareClient')
|
||||
mock_patch_object = mock.patch.object(target, "SoftwareClient")
|
||||
self.mock_software_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_fm_client(self, target):
|
||||
"""Mock the target's FmClient"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'FmClient')
|
||||
mock_patch_object = mock.patch.object(target, "FmClient")
|
||||
self.mock_fm_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_read_from_cache(self, target):
|
||||
mock_patch = mock.patch.object(target, '_read_from_cache')
|
||||
mock_patch = mock.patch.object(target, "_read_from_cache")
|
||||
self.mock_read_from_cache = mock_patch.start()
|
||||
self.addCleanup(mock_patch.stop)
|
||||
|
||||
def _mock_vim_client(self, target):
|
||||
"""Mock the target's VimClient"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'VimClient')
|
||||
mock_patch_object = mock.patch.object(target, "VimClient")
|
||||
self.mock_vim_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_network_address_pool(self):
|
||||
"""Mock phased subcloud deploy's get_network_address_pool"""
|
||||
|
||||
mock_patch_object = mock.patch.object(psd_common, 'get_network_address_pool')
|
||||
mock_patch_object = mock.patch.object(psd_common, "get_network_address_pool")
|
||||
self.mock_get_network_address_pool = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_oam_addresses(self):
|
||||
"""Mock phased subcloud deploy's get_oam_addresses"""
|
||||
|
||||
mock_patch_object = mock.patch.object(psd_common, 'get_oam_addresses')
|
||||
mock_patch_object = mock.patch.object(psd_common, "get_oam_addresses")
|
||||
self.mock_get_oam_addresses = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_ks_client(self):
|
||||
"""Mock phased subcloud deploy's get_ks_client"""
|
||||
|
||||
mock_patch_object = mock.patch.object(psd_common, 'get_ks_client')
|
||||
mock_patch_object = mock.patch.object(psd_common, "get_ks_client")
|
||||
self.mock_get_ks_client = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_query(self):
|
||||
"""Mock phased subcloud deploy's query"""
|
||||
|
||||
mock_patch_object = mock.patch.object(psd_common.PatchingClient, 'query')
|
||||
mock_patch_object = mock.patch.object(psd_common.PatchingClient, "query")
|
||||
self.mock_query = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
@ -289,7 +295,7 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
"""Mock phased subcloud deploy's get_subcloud_db_install_values"""
|
||||
|
||||
mock_patch_object = mock.patch.object(
|
||||
psd_common, 'get_subcloud_db_install_values'
|
||||
psd_common, "get_subcloud_db_install_values"
|
||||
)
|
||||
self.mock_get_subcloud_db_install_values = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
@ -297,75 +303,75 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
def _mock_validate_k8s_version(self):
|
||||
"""Mock phased subcloud deploy's validate_k8s_version"""
|
||||
|
||||
mock_patch_object = mock.patch.object(psd_common, 'validate_k8s_version')
|
||||
mock_patch_object = mock.patch.object(psd_common, "validate_k8s_version")
|
||||
self.mock_validate_k8s_version = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_vault_load_files(self):
|
||||
"""Mock dcmanager util's get_vault_load_files"""
|
||||
|
||||
mock_patch_object = mock.patch.object(dutils, 'get_vault_load_files')
|
||||
mock_patch_object = mock.patch.object(dutils, "get_vault_load_files")
|
||||
self.mock_get_vault_load_files = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_load_yaml_file(self):
|
||||
"""Mock dcmanager util's load_yaml_file"""
|
||||
|
||||
mock_patch_object = mock.patch.object(dutils, 'load_yaml_file')
|
||||
mock_patch_object = mock.patch.object(dutils, "load_yaml_file")
|
||||
self.mock_load_yaml_file = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_local_system(self):
|
||||
"""Mock dcmanager util's get_local_system"""
|
||||
|
||||
mock_patch_object = mock.patch.object(dutils, 'get_local_system')
|
||||
mock_patch_object = mock.patch.object(dutils, "get_local_system")
|
||||
self.mock_get_local_system = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_remove(self):
|
||||
"""Mock os' remove"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os, 'remove')
|
||||
mock_patch_object = mock.patch.object(os, "remove")
|
||||
self.mock_os_remove = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_mkdir(self):
|
||||
"""Mock os' mkdir"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os, 'mkdir')
|
||||
mock_patch_object = mock.patch.object(os, "mkdir")
|
||||
self.mock_os_mkdir = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_listdir(self):
|
||||
"""Mock os' listdir"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os, 'listdir')
|
||||
mock_patch_object = mock.patch.object(os, "listdir")
|
||||
self.mock_os_listdir = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_path_isdir(self):
|
||||
"""Mock os' path.isdir"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os_path, 'isdir')
|
||||
mock_patch_object = mock.patch.object(os_path, "isdir")
|
||||
self.mock_os_path_isdir = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_path_exists(self):
|
||||
"""Mock os' path.exists"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os_path, 'exists')
|
||||
mock_patch_object = mock.patch.object(os_path, "exists")
|
||||
self.mock_os_path_exists = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_builtins_open(self):
|
||||
"""Mock builtins' open"""
|
||||
|
||||
mock_patch_object = mock.patch.object(builtins, 'open')
|
||||
mock_patch_object = mock.patch.object(builtins, "open")
|
||||
self.mock_builtins_open = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_log(self, target):
|
||||
mock_patch_object = mock.patch.object(target, 'LOG')
|
||||
mock_patch_object = mock.patch.object(target, "LOG")
|
||||
self.mock_log = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
@ -379,7 +385,7 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
else:
|
||||
self.mock_pecan_abort.assert_called_with(http_status)
|
||||
|
||||
def _create_password(self, keyword='default'):
|
||||
def _create_password(self, keyword="default"):
|
||||
"""Create a password with based on the specified keyword"""
|
||||
|
||||
return base64.b64encode(keyword.encode("utf-8")).decode("utf-8")
|
||||
@ -387,21 +393,21 @@ class DCManagerTestCase(base.BaseTestCase):
|
||||
def _mock_subcloud_manager(self, target):
|
||||
"""Mock the target's SubcloudManager"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'SubcloudManager')
|
||||
mock_patch_object = mock.patch.object(target, "SubcloudManager")
|
||||
self.mock_subcloud_manager = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_peer_monitor_manager(self, target):
|
||||
"""Mock the target's PeerMonitorManager"""
|
||||
|
||||
mock_patch_object = mock.patch.object(target, 'PeerMonitorManager')
|
||||
mock_patch_object = mock.patch.object(target, "PeerMonitorManager")
|
||||
self.mock_peer_monitor_manager = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_ansible_run_playbook(self):
|
||||
"""Mock AnsiblePlaybook's run_playbook"""
|
||||
|
||||
mock_patch_object = mock.patch.object(AnsiblePlaybook, 'run_playbook')
|
||||
mock_patch_object = mock.patch.object(AnsiblePlaybook, "run_playbook")
|
||||
self.mock_ansible_run_playbook = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
|
@ -32,7 +32,7 @@ from dcmanager.tests.unit.common import consts as test_consts
|
||||
from dcmanager.tests import utils
|
||||
|
||||
config.register_options()
|
||||
OPT_GROUP_NAME = 'keystone_authtoken'
|
||||
OPT_GROUP_NAME = "keystone_authtoken"
|
||||
cfg.CONF.import_group(OPT_GROUP_NAME, "keystonemiddleware.auth_token")
|
||||
|
||||
|
||||
@ -49,11 +49,11 @@ class DCManagerApiTest(DCManagerTestCase):
|
||||
self.CONF = self.useFixture(config_fixture).conf
|
||||
config_fixture.set_config_dirs([])
|
||||
|
||||
self.CONF.set_override('auth_strategy', 'noauth')
|
||||
self.CONF.set_override("auth_strategy", "noauth")
|
||||
|
||||
self.app = self._make_app()
|
||||
|
||||
self.url = '/'
|
||||
self.url = "/"
|
||||
# The put method is used as a default value, leading to the generic
|
||||
# implementation on controllers in case the method is not specified
|
||||
self.method = self.app.put
|
||||
@ -61,20 +61,19 @@ class DCManagerApiTest(DCManagerTestCase):
|
||||
self.upload_files = None
|
||||
self.verb = None
|
||||
self.headers = {
|
||||
'X-Tenant-Id': utils.UUID1, 'X_ROLE': 'admin,member,reader',
|
||||
'X-Identity-Status': 'Confirmed', 'X-Project-Name': 'admin'
|
||||
"X-Tenant-Id": utils.UUID1,
|
||||
"X_ROLE": "admin,member,reader",
|
||||
"X-Identity-Status": "Confirmed",
|
||||
"X-Project-Name": "admin",
|
||||
}
|
||||
|
||||
def _make_app(self, enable_acl=False):
|
||||
self.config_fixture = {
|
||||
'app': {
|
||||
'root': 'dcmanager.api.controllers.root.RootController',
|
||||
'modules': ['dcmanager.api'],
|
||||
'enable_acl': enable_acl,
|
||||
'errors': {
|
||||
400: '/error',
|
||||
'__force_dict__': True
|
||||
}
|
||||
"app": {
|
||||
"root": "dcmanager.api.controllers.root.RootController",
|
||||
"modules": ["dcmanager.api"],
|
||||
"enable_acl": enable_acl,
|
||||
"errors": {400: "/error", "__force_dict__": True},
|
||||
},
|
||||
}
|
||||
|
||||
@ -86,16 +85,21 @@ class DCManagerApiTest(DCManagerTestCase):
|
||||
kwargs = {}
|
||||
|
||||
if self.upload_files:
|
||||
kwargs = {'upload_files': self.upload_files}
|
||||
kwargs = {"upload_files": self.upload_files}
|
||||
|
||||
return self.method(
|
||||
self.url, headers=self.headers, params=self.params,
|
||||
expect_errors=True, **kwargs
|
||||
self.url,
|
||||
headers=self.headers,
|
||||
params=self.params,
|
||||
expect_errors=True,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def _assert_response(
|
||||
self, response, status_code=http.client.OK,
|
||||
content_type=test_consts.APPLICATION_JSON
|
||||
self,
|
||||
response,
|
||||
status_code=http.client.OK,
|
||||
content_type=test_consts.APPLICATION_JSON,
|
||||
):
|
||||
"""Assert the response for a request"""
|
||||
|
||||
@ -103,8 +107,12 @@ class DCManagerApiTest(DCManagerTestCase):
|
||||
self.assertEqual(response.content_type, content_type)
|
||||
|
||||
def _assert_pecan_and_response(
|
||||
self, response, http_status, content=None, call_count=1,
|
||||
content_type=test_consts.TEXT_PLAIN
|
||||
self,
|
||||
response,
|
||||
http_status,
|
||||
content=None,
|
||||
call_count=1,
|
||||
content_type=test_consts.TEXT_PLAIN,
|
||||
):
|
||||
"""Assert the response and pecan abort for a failed request"""
|
||||
|
||||
@ -122,7 +130,7 @@ class TestRootController(DCManagerApiTest):
|
||||
def setUp(self):
|
||||
super(TestRootController, self).setUp()
|
||||
|
||||
self.url = '/'
|
||||
self.url = "/"
|
||||
self.method = self.app.get
|
||||
|
||||
def _test_method_returns_405(self, method, content_type=test_consts.TEXT_PLAIN):
|
||||
@ -141,7 +149,7 @@ class TestRootController(DCManagerApiTest):
|
||||
|
||||
self._assert_response(response)
|
||||
json_body = jsonutils.loads(response.body)
|
||||
versions = json_body.get('versions')
|
||||
versions = json_body.get("versions")
|
||||
self.assertEqual(1, len(versions))
|
||||
|
||||
def test_request_id(self):
|
||||
@ -150,11 +158,9 @@ class TestRootController(DCManagerApiTest):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertIn('x-openstack-request-id', response.headers)
|
||||
self.assertTrue(
|
||||
response.headers['x-openstack-request-id'].startswith('req-')
|
||||
)
|
||||
id_part = response.headers['x-openstack-request-id'].split('req-')[1]
|
||||
self.assertIn("x-openstack-request-id", response.headers)
|
||||
self.assertTrue(response.headers["x-openstack-request-id"].startswith("req-"))
|
||||
id_part = response.headers["x-openstack-request-id"].split("req-")[1]
|
||||
self.assertTrue(uuidutils.is_uuid_like(id_part))
|
||||
|
||||
def test_post(self):
|
||||
@ -180,19 +186,17 @@ class TestRootController(DCManagerApiTest):
|
||||
def test_head(self):
|
||||
"""Test head request is not allowed on root"""
|
||||
|
||||
self._test_method_returns_405(
|
||||
self.app.head, content_type=test_consts.TEXT_HTML
|
||||
)
|
||||
self._test_method_returns_405(self.app.head, content_type=test_consts.TEXT_HTML)
|
||||
|
||||
|
||||
class TestErrors(DCManagerApiTest):
|
||||
|
||||
def setUp(self):
|
||||
super(TestErrors, self).setUp()
|
||||
cfg.CONF.set_override('admin_tenant', 'fake_tenant_id', group='cache')
|
||||
cfg.CONF.set_override("admin_tenant", "fake_tenant_id", group="cache")
|
||||
|
||||
def test_404(self):
|
||||
self.url = '/assert_called_once'
|
||||
self.url = "/assert_called_once"
|
||||
self.method = self.app.get
|
||||
|
||||
response = self._send_request()
|
||||
@ -201,7 +205,7 @@ class TestErrors(DCManagerApiTest):
|
||||
)
|
||||
|
||||
def test_version_1_root_controller(self):
|
||||
self.url = f'/v1.0/{uuidutils.generate_uuid()}/bad_method'
|
||||
self.url = f"/v1.0/{uuidutils.generate_uuid()}/bad_method"
|
||||
self.method = self.app.patch
|
||||
|
||||
response = self._send_request()
|
||||
@ -215,7 +219,7 @@ class TestKeystoneAuth(DCManagerApiTest):
|
||||
def setUp(self):
|
||||
super(TestKeystoneAuth, self).setUp()
|
||||
|
||||
cfg.CONF.set_override('auth_strategy', 'keystone')
|
||||
cfg.CONF.set_override("auth_strategy", "keystone")
|
||||
|
||||
self.method = self.app.get
|
||||
|
||||
|
@ -30,10 +30,10 @@ class APIMixin(object):
|
||||
FAKE_TENANT = utils.UUID1
|
||||
|
||||
api_headers = {
|
||||
'X-Tenant-Id': FAKE_TENANT,
|
||||
'X_ROLE': 'admin,member,reader',
|
||||
'X-Identity-Status': 'Confirmed',
|
||||
'X-Project-Name': 'admin'
|
||||
"X-Tenant-Id": FAKE_TENANT,
|
||||
"X_ROLE": "admin,member,reader",
|
||||
"X-Identity-Status": "Confirmed",
|
||||
"X-Project-Name": "admin",
|
||||
}
|
||||
|
||||
# subclasses should provide methods
|
||||
@ -47,7 +47,7 @@ class APIMixin(object):
|
||||
return self.api_headers
|
||||
|
||||
def get_single_url(self, uuid):
|
||||
return '%s/%s' % (self.get_api_prefix(), uuid)
|
||||
return "%s/%s" % (self.get_api_prefix(), uuid)
|
||||
|
||||
def get_api_prefix(self):
|
||||
raise NotImplementedError
|
||||
@ -102,15 +102,16 @@ class PostMixin(object):
|
||||
with contextlib.ExitStack() as stack:
|
||||
# Only mocks it if it's not already mocked by the derived class
|
||||
if not isinstance(rpc_client.ManagerClient, mock.Mock):
|
||||
stack.enter_context(mock.patch.object(rpc_client,
|
||||
'ManagerClient'))
|
||||
stack.enter_context(mock.patch.object(rpc_client, "ManagerClient"))
|
||||
params = self.get_post_params()
|
||||
upload_files = self.get_post_upload_files()
|
||||
response = self.app.post(
|
||||
self.get_api_prefix(), params=params,
|
||||
upload_files=upload_files, headers=self.get_api_headers()
|
||||
self.get_api_prefix(),
|
||||
params=params,
|
||||
upload_files=upload_files,
|
||||
headers=self.get_api_headers(),
|
||||
)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(response.status_code, http.client.OK)
|
||||
self.assert_fields(response.json)
|
||||
|
||||
@ -118,49 +119,53 @@ class PostMixin(object):
|
||||
class PostRejectedMixin(object):
|
||||
# Test that a POST operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
@mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
def test_create_not_allowed(self, mock_pecan_abort, _):
|
||||
response = self.app.post(
|
||||
self.API_PREFIX, params=self.get_post_params(),
|
||||
self.API_PREFIX,
|
||||
params=self.get_post_params(),
|
||||
upload_files=self.get_post_upload_files(),
|
||||
headers=self.get_api_headers(), expect_errors=True
|
||||
headers=self.get_api_headers(),
|
||||
expect_errors=True,
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, http.client.FORBIDDEN)
|
||||
mock_pecan_abort.assert_called_once()
|
||||
mock_pecan_abort.assert_called_with(
|
||||
http.client.FORBIDDEN, 'Operation not permitted.'
|
||||
http.client.FORBIDDEN, "Operation not permitted."
|
||||
)
|
||||
|
||||
|
||||
class PostJSONMixin(object):
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
def test_create_success(self, _):
|
||||
# Test that a POST (post_json) operation is supported by the API
|
||||
ndict = self.get_post_object()
|
||||
response = self.app.post_json(
|
||||
self.get_api_prefix(), ndict, headers=self.get_api_headers()
|
||||
)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
|
||||
|
||||
class PostJSONRejectedMixin(object):
|
||||
# Test that a POST (post_json) operation is blocked by the API
|
||||
# API should return 400 BAD_REQUEST or FORBIDDEN 403
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
@mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
def test_create_not_allowed(self, mock_pecan_abort, _):
|
||||
response = self.app.post_json(
|
||||
self.API_PREFIX, self.get_post_object(), headers=self.get_api_headers(),
|
||||
expect_errors=True
|
||||
self.API_PREFIX,
|
||||
self.get_post_object(),
|
||||
headers=self.get_api_headers(),
|
||||
expect_errors=True,
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, http.client.FORBIDDEN)
|
||||
mock_pecan_abort.assert_called_once()
|
||||
mock_pecan_abort.assert_called_with(
|
||||
http.client.FORBIDDEN, 'Operation not permitted.'
|
||||
http.client.FORBIDDEN, "Operation not permitted."
|
||||
)
|
||||
|
||||
|
||||
@ -172,7 +177,7 @@ class GetMixin(object):
|
||||
initial_list_size = 0
|
||||
|
||||
# Performing a GET on this ID should fail. subclass mixins can override
|
||||
invalid_id = '123'
|
||||
invalid_id = "123"
|
||||
|
||||
def validate_entry(self, result_item):
|
||||
self.assert_fields(result_item)
|
||||
@ -185,18 +190,16 @@ class GetMixin(object):
|
||||
self.validate_entry(result_item)
|
||||
|
||||
def validate_list_response(self, expected_length, response):
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(response.status_code, http.client.OK)
|
||||
|
||||
# validate the list length
|
||||
self.validate_list(expected_length, response.json)
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
def test_initial_list_size(self, _):
|
||||
# Test that a GET operation for a list is supported by the API
|
||||
response = self.app.get(
|
||||
self.get_api_prefix(), headers=self.get_api_headers()
|
||||
)
|
||||
response = self.app.get(self.get_api_prefix(), headers=self.get_api_headers())
|
||||
# Validate the initial length
|
||||
self.validate_list_response(self.initial_list_size, response)
|
||||
|
||||
@ -204,26 +207,25 @@ class GetMixin(object):
|
||||
context = utils.dummy_context()
|
||||
self._create_db_object(context)
|
||||
|
||||
response = self.app.get(
|
||||
self.get_api_prefix(), headers=self.get_api_headers()
|
||||
)
|
||||
response = self.app.get(self.get_api_prefix(), headers=self.get_api_headers())
|
||||
self.validate_list_response(self.initial_list_size + 1, response)
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
@mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
def test_fail_get_single(self, mock_pecan_abort, _):
|
||||
# Test that a GET operation for an invalid ID returns the
|
||||
# appropriate error results
|
||||
response = self.app.get(
|
||||
self.get_single_url(self.invalid_id), headers=self.get_api_headers(),
|
||||
expect_errors=True
|
||||
self.get_single_url(self.invalid_id),
|
||||
headers=self.get_api_headers(),
|
||||
expect_errors=True,
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, http.client.NOT_FOUND)
|
||||
mock_pecan_abort.assert_called_once()
|
||||
mock_pecan_abort.assert_called_with(http.client.NOT_FOUND, mock.ANY)
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
def test_get_single(self, _):
|
||||
context = utils.dummy_context()
|
||||
db_obj = self._create_db_object(context)
|
||||
@ -232,7 +234,7 @@ class GetMixin(object):
|
||||
response = self.app.get(
|
||||
self.get_single_url(db_obj.id), headers=self.get_api_headers()
|
||||
)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(response.status_code, http.client.OK)
|
||||
self.validate_entry(response.json)
|
||||
|
||||
@ -244,50 +246,54 @@ class UpdateMixin(object):
|
||||
for key, value in sub_dict.items():
|
||||
self.assertEqual(value, full_obj.get(key))
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
def test_update_success(self, _):
|
||||
context = utils.dummy_context()
|
||||
single_obj = self._create_db_object(context)
|
||||
update_data = self.get_update_object()
|
||||
|
||||
response = self.app.patch_json(
|
||||
self.get_single_url(single_obj.id), headers=self.get_api_headers(),
|
||||
params=update_data
|
||||
self.get_single_url(single_obj.id),
|
||||
headers=self.get_api_headers(),
|
||||
params=update_data,
|
||||
)
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(response.status_code, http.client.OK)
|
||||
self.validate_updated_fields(update_data, response.json)
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
@mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
def test_update_empty_changeset(self, mock_pecan_abort, _):
|
||||
context = utils.dummy_context()
|
||||
single_obj = self._create_db_object(context)
|
||||
|
||||
response = self.app.patch_json(
|
||||
self.get_single_url(single_obj.id), headers=self.get_api_headers(),
|
||||
params={}, expect_errors=True
|
||||
self.get_single_url(single_obj.id),
|
||||
headers=self.get_api_headers(),
|
||||
params={},
|
||||
expect_errors=True,
|
||||
)
|
||||
|
||||
self.assertEqual(response.status_code, http.client.BAD_REQUEST)
|
||||
mock_pecan_abort.assert_called_once()
|
||||
mock_pecan_abort.assert_called_with(http.client.BAD_REQUEST, 'Body required')
|
||||
mock_pecan_abort.assert_called_with(http.client.BAD_REQUEST, "Body required")
|
||||
|
||||
|
||||
# ------ API Delete Mixin
|
||||
class DeleteMixin(object):
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
def test_delete_success(self, _):
|
||||
context = utils.dummy_context()
|
||||
single_obj = self._create_db_object(context)
|
||||
response = self.app.delete(self.get_single_url(single_obj.id),
|
||||
headers=self.get_api_headers())
|
||||
self.assertEqual(response.content_type, 'application/json')
|
||||
response = self.app.delete(
|
||||
self.get_single_url(single_obj.id), headers=self.get_api_headers()
|
||||
)
|
||||
self.assertEqual(response.content_type, "application/json")
|
||||
self.assertEqual(response.status_code, http.client.OK)
|
||||
|
||||
@mock.patch.object(rpc_client, 'ManagerClient')
|
||||
@mock.patch.object(pecan, 'abort', wraps=pecan.abort)
|
||||
@mock.patch.object(rpc_client, "ManagerClient")
|
||||
@mock.patch.object(pecan, "abort", wraps=pecan.abort)
|
||||
def test_double_delete(self, mock_pecan_abort, _):
|
||||
context = utils.dummy_context()
|
||||
single_obj = self._create_db_object(context)
|
||||
@ -299,8 +305,9 @@ class DeleteMixin(object):
|
||||
|
||||
# delete the same object a second time. this should fail (NOT_FOUND)
|
||||
response = self.app.delete(
|
||||
self.get_single_url(single_obj.id), headers=self.get_api_headers(),
|
||||
expect_errors=True
|
||||
self.get_single_url(single_obj.id),
|
||||
headers=self.get_api_headers(),
|
||||
expect_errors=True,
|
||||
)
|
||||
self.assertEqual(response.status_code, http.client.NOT_FOUND)
|
||||
|
||||
|
@ -23,7 +23,7 @@ class BaseTestSubcloudAlarmController(DCManagerApiTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.url = '/v1.0/alarms'
|
||||
self.url = "/v1.0/alarms"
|
||||
|
||||
|
||||
class TestSubcloudAlarmController(BaseTestSubcloudAlarmController):
|
||||
@ -40,7 +40,7 @@ class TestSubcloudAlarmController(BaseTestSubcloudAlarmController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.text, 'null')
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class TestSubcloudAlarmControllerGet(BaseTestSubcloudAlarmController):
|
||||
@ -55,24 +55,32 @@ class TestSubcloudAlarmControllerGet(BaseTestSubcloudAlarmController):
|
||||
"""Test get succeeds"""
|
||||
|
||||
subcloud1_values = {
|
||||
'uuid': utils.UUID2, 'critical_alarms': 1, 'major_alarms': 2,
|
||||
'minor_alarms': 3, 'warnings': 0, 'cloud_status': 'critical'
|
||||
"uuid": utils.UUID2,
|
||||
"critical_alarms": 1,
|
||||
"major_alarms": 2,
|
||||
"minor_alarms": 3,
|
||||
"warnings": 0,
|
||||
"cloud_status": "critical",
|
||||
}
|
||||
|
||||
subcloud2_values = {
|
||||
'uuid': utils.UUID3, 'critical_alarms': 0, 'major_alarms': 2,
|
||||
'minor_alarms': 3, 'warnings': 4, 'cloud_status': 'degraded'
|
||||
"uuid": utils.UUID3,
|
||||
"critical_alarms": 0,
|
||||
"major_alarms": 2,
|
||||
"minor_alarms": 3,
|
||||
"warnings": 4,
|
||||
"cloud_status": "degraded",
|
||||
}
|
||||
|
||||
subcloud_summary = [
|
||||
{'region_name': 'subcloud1', **subcloud1_values},
|
||||
{'region_name': 'subcloud2', **subcloud2_values}
|
||||
{"region_name": "subcloud1", **subcloud1_values},
|
||||
{"region_name": "subcloud2", **subcloud2_values},
|
||||
]
|
||||
|
||||
db_api.subcloud_alarms_create(self.ctx, 'subcloud2', values=subcloud2_values)
|
||||
db_api.subcloud_alarms_create(self.ctx, 'subcloud1', values=subcloud1_values)
|
||||
db_api.subcloud_alarms_create(self.ctx, "subcloud2", values=subcloud2_values)
|
||||
db_api.subcloud_alarms_create(self.ctx, "subcloud1", values=subcloud1_values)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(subcloud_summary, response.json.get('alarm_summary'))
|
||||
self.assertEqual(subcloud_summary, response.json.get("alarm_summary"))
|
||||
|
@ -25,7 +25,7 @@ class BaseTestNotificationsController(DCManagerApiTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.url = '/v1.0/notifications'
|
||||
self.url = "/v1.0/notifications"
|
||||
|
||||
self._mock_audit_rpc_client()
|
||||
|
||||
@ -44,7 +44,7 @@ class TestNotificationsController(BaseTestNotificationsController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.text, 'null')
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class TestNotificationsControllerPost(BaseTestNotificationsController):
|
||||
@ -58,7 +58,7 @@ class TestNotificationsControllerPost(BaseTestNotificationsController):
|
||||
def test_post_succeeds_with_platform_upgrade_completed(self):
|
||||
"""Test post succeeds with platform upgrade completed event"""
|
||||
|
||||
self.params = json.dumps({'events': ['platform-upgrade-completed']})
|
||||
self.params = json.dumps({"events": ["platform-upgrade-completed"]})
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -69,7 +69,7 @@ class TestNotificationsControllerPost(BaseTestNotificationsController):
|
||||
def test_post_succeeds_with_k8s_upgrade_completed(self):
|
||||
"""Test post succeeds with k8s upgrade completed event"""
|
||||
|
||||
self.params = json.dumps({'events': ['k8s-upgrade-completed']})
|
||||
self.params = json.dumps({"events": ["k8s-upgrade-completed"]})
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -80,12 +80,14 @@ class TestNotificationsControllerPost(BaseTestNotificationsController):
|
||||
def test_post_succeeds_with_kube_rootca_update_completed_in_events(self):
|
||||
"""Tests post succeeds when kube-rootca-update-completed in events"""
|
||||
|
||||
self.params = json.dumps({'events': ['kube-rootca-update-completed']})
|
||||
self.params = json.dumps({"events": ["kube-rootca-update-completed"]})
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self.mock_audit_rpc_client().\
|
||||
trigger_kube_rootca_update_audit.assert_called_once()
|
||||
trigger_kube_rootca_update_audit = (
|
||||
self.mock_audit_rpc_client().trigger_kube_rootca_update_audit
|
||||
)
|
||||
trigger_kube_rootca_update_audit.assert_called_once()
|
||||
|
||||
self._assert_response(response)
|
||||
|
||||
|
@ -21,42 +21,46 @@ from dcmanager.tests.unit.api.v1.controllers.mixins import GetMixin
|
||||
|
||||
# SAMPLE SYSTEM PEER DATA
|
||||
SAMPLE_SYSTEM_PEER_UUID = str(uuid.uuid4())
|
||||
SAMPLE_SYSTEM_PEER_NAME = 'SystemPeer1'
|
||||
SAMPLE_MANAGER_ENDPOINT = 'http://127.0.0.1:5000'
|
||||
SAMPLE_MANAGER_USERNAME = 'admin'
|
||||
SAMPLE_MANAGER_PASSWORD = 'password'
|
||||
SAMPLE_PEER_CONTROLLER_GATEWAY_IP = '128.128.128.1'
|
||||
SAMPLE_ADMINISTRATIVE_STATE = 'enabled'
|
||||
SAMPLE_SYSTEM_PEER_NAME = "SystemPeer1"
|
||||
SAMPLE_MANAGER_ENDPOINT = "http://127.0.0.1:5000"
|
||||
SAMPLE_MANAGER_USERNAME = "admin"
|
||||
SAMPLE_MANAGER_PASSWORD = "password"
|
||||
SAMPLE_PEER_CONTROLLER_GATEWAY_IP = "128.128.128.1"
|
||||
SAMPLE_ADMINISTRATIVE_STATE = "enabled"
|
||||
SAMPLE_HEARTBEAT_INTERVAL = 10
|
||||
SAMPLE_HEARTBEAT_FAILURE_THRESHOLD = 3
|
||||
SAMPLE_HEARTBEAT_FAILURES_POLICY = 'alarm'
|
||||
SAMPLE_HEARTBEAT_FAILURES_POLICY = "alarm"
|
||||
SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT = 600
|
||||
SAMPLE_AVAILABILITY_STATE_AVAILABLE = 'available'
|
||||
SAMPLE_AVAILABILITY_STATE_AVAILABLE = "available"
|
||||
|
||||
# SAMPLE SUBCLOUD PEER GROUP DATA
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_NAME = 'GroupX'
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_NAME = "GroupX"
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_ID = str(uuid.uuid4())
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_NAME = 'dc-local'
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_NAME = "dc-local"
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING = 50
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_PRIORITY = 0
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_STATE = 'enabled'
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_STATE = "enabled"
|
||||
|
||||
# SAMPLE PEER GROUP ASSOCIATION DATA
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_ID = 1
|
||||
SAMPLE_SYSTEM_PEER_ID = 1
|
||||
SAMPLE_PEER_GROUP_PRIORITY = 1
|
||||
SAMPLE_PEER_GROUP_PRIORITY_UPDATED = 99
|
||||
SAMPLE_SYNC_STATUS = 'synced'
|
||||
SAMPLE_SYNC_MESSAGE = 'None'
|
||||
SAMPLE_ASSOCIATION_TYPE = 'primary'
|
||||
SAMPLE_SYNC_STATUS = "synced"
|
||||
SAMPLE_SYNC_MESSAGE = "None"
|
||||
SAMPLE_ASSOCIATION_TYPE = "primary"
|
||||
|
||||
|
||||
class PeerGroupAssociationAPIMixin(APIMixin):
|
||||
API_PREFIX = '/v1.0/peer-group-associations'
|
||||
RESULT_KEY = 'peer_group_associations'
|
||||
API_PREFIX = "/v1.0/peer-group-associations"
|
||||
RESULT_KEY = "peer_group_associations"
|
||||
EXPECTED_FIELDS = [
|
||||
'id', 'peer-group-id', 'system-peer-id', 'peer-group-priority',
|
||||
'created-at', 'updated-at'
|
||||
"id",
|
||||
"peer-group-id",
|
||||
"system-peer-id",
|
||||
"peer-group-priority",
|
||||
"created-at",
|
||||
"updated-at",
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
@ -65,71 +69,67 @@ class PeerGroupAssociationAPIMixin(APIMixin):
|
||||
def _get_test_system_peer_dict(self, **kw):
|
||||
# id should not be part of the structure
|
||||
system_peer = {
|
||||
'peer_uuid': kw.get('peer_uuid', SAMPLE_SYSTEM_PEER_UUID),
|
||||
'peer_name': kw.get('peer_name', SAMPLE_SYSTEM_PEER_NAME),
|
||||
'endpoint': kw.get('manager_endpoint', SAMPLE_MANAGER_ENDPOINT),
|
||||
'username': kw.get('manager_username', SAMPLE_MANAGER_USERNAME),
|
||||
'password': kw.get('manager_password', SAMPLE_MANAGER_PASSWORD),
|
||||
'gateway_ip': kw.get(
|
||||
'peer_controller_gateway_ip', SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
"peer_uuid": kw.get("peer_uuid", SAMPLE_SYSTEM_PEER_UUID),
|
||||
"peer_name": kw.get("peer_name", SAMPLE_SYSTEM_PEER_NAME),
|
||||
"endpoint": kw.get("manager_endpoint", SAMPLE_MANAGER_ENDPOINT),
|
||||
"username": kw.get("manager_username", SAMPLE_MANAGER_USERNAME),
|
||||
"password": kw.get("manager_password", SAMPLE_MANAGER_PASSWORD),
|
||||
"gateway_ip": kw.get(
|
||||
"peer_controller_gateway_ip", SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
),
|
||||
'administrative_state': kw.get(
|
||||
'administrative_state', SAMPLE_ADMINISTRATIVE_STATE
|
||||
"administrative_state": kw.get(
|
||||
"administrative_state", SAMPLE_ADMINISTRATIVE_STATE
|
||||
),
|
||||
'heartbeat_interval': kw.get(
|
||||
'heartbeat_interval', SAMPLE_HEARTBEAT_INTERVAL
|
||||
"heartbeat_interval": kw.get(
|
||||
"heartbeat_interval", SAMPLE_HEARTBEAT_INTERVAL
|
||||
),
|
||||
'heartbeat_failure_threshold': kw.get(
|
||||
'heartbeat_failure_threshold', SAMPLE_HEARTBEAT_FAILURE_THRESHOLD
|
||||
"heartbeat_failure_threshold": kw.get(
|
||||
"heartbeat_failure_threshold", SAMPLE_HEARTBEAT_FAILURE_THRESHOLD
|
||||
),
|
||||
'heartbeat_failure_policy': kw.get(
|
||||
'heartbeat_failure_policy', SAMPLE_HEARTBEAT_FAILURES_POLICY
|
||||
"heartbeat_failure_policy": kw.get(
|
||||
"heartbeat_failure_policy", SAMPLE_HEARTBEAT_FAILURES_POLICY
|
||||
),
|
||||
"heartbeat_maintenance_timeout": kw.get(
|
||||
"heartbeat_maintenance_timeout", SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT
|
||||
),
|
||||
'heartbeat_maintenance_timeout': kw.get(
|
||||
'heartbeat_maintenance_timeout', SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT
|
||||
)
|
||||
}
|
||||
return system_peer
|
||||
|
||||
def _get_test_subcloud_peer_group_dict(self, **kw):
|
||||
# id should not be part of the structure
|
||||
group = {
|
||||
'peer_group_name': kw.get(
|
||||
'peer_group_name', SAMPLE_SUBCLOUD_PEER_GROUP_NAME
|
||||
"peer_group_name": kw.get(
|
||||
"peer_group_name", SAMPLE_SUBCLOUD_PEER_GROUP_NAME
|
||||
),
|
||||
'system_leader_id': kw.get(
|
||||
'system_leader_id', SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_ID
|
||||
"system_leader_id": kw.get(
|
||||
"system_leader_id", SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_ID
|
||||
),
|
||||
'system_leader_name': kw.get(
|
||||
'system_leader_name', SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_NAME
|
||||
"system_leader_name": kw.get(
|
||||
"system_leader_name", SAMPLE_SUBCLOUD_PEER_GROUP_SYSTEM_LEADER_NAME
|
||||
),
|
||||
'group_priority': kw.get(
|
||||
'group_priority', SAMPLE_SUBCLOUD_PEER_GROUP_PRIORITY
|
||||
"group_priority": kw.get(
|
||||
"group_priority", SAMPLE_SUBCLOUD_PEER_GROUP_PRIORITY
|
||||
),
|
||||
'group_state': kw.get(
|
||||
'group_state', SAMPLE_SUBCLOUD_PEER_GROUP_STATE
|
||||
"group_state": kw.get("group_state", SAMPLE_SUBCLOUD_PEER_GROUP_STATE),
|
||||
"max_subcloud_rehoming": kw.get(
|
||||
"max_subcloud_rehoming",
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING,
|
||||
),
|
||||
'max_subcloud_rehoming': kw.get(
|
||||
'max_subcloud_rehoming',
|
||||
SAMPLE_SUBCLOUD_PEER_GROUP_MAX_SUBCLOUDS_REHOMING
|
||||
),
|
||||
'migration_status': None
|
||||
"migration_status": None,
|
||||
}
|
||||
return group
|
||||
|
||||
def _get_test_peer_group_association_dict(self, **kw):
|
||||
# id should not be part of the structure
|
||||
association = {
|
||||
'peer_group_id': kw.get(
|
||||
'peer_group_id', SAMPLE_SUBCLOUD_PEER_GROUP_ID
|
||||
"peer_group_id": kw.get("peer_group_id", SAMPLE_SUBCLOUD_PEER_GROUP_ID),
|
||||
"system_peer_id": kw.get("system_peer_id", SAMPLE_SYSTEM_PEER_ID),
|
||||
"peer_group_priority": kw.get(
|
||||
"peer_group_priority", SAMPLE_PEER_GROUP_PRIORITY
|
||||
),
|
||||
'system_peer_id': kw.get('system_peer_id', SAMPLE_SYSTEM_PEER_ID),
|
||||
'peer_group_priority': kw.get(
|
||||
'peer_group_priority', SAMPLE_PEER_GROUP_PRIORITY
|
||||
),
|
||||
'sync_status': kw.get('sync_status', SAMPLE_SYNC_STATUS),
|
||||
'sync_message': kw.get('sync_message', SAMPLE_SYNC_MESSAGE),
|
||||
'association_type': kw.get('association_type', SAMPLE_ASSOCIATION_TYPE)
|
||||
"sync_status": kw.get("sync_status", SAMPLE_SYNC_STATUS),
|
||||
"sync_message": kw.get("sync_message", SAMPLE_SYNC_MESSAGE),
|
||||
"association_type": kw.get("association_type", SAMPLE_ASSOCIATION_TYPE),
|
||||
}
|
||||
return association
|
||||
|
||||
@ -163,10 +163,14 @@ class PeerGroupAssociationAPIMixin(APIMixin):
|
||||
)
|
||||
|
||||
def _create_peer_group_association(self, context, peer_id, peer_group_id, **kw):
|
||||
kw['peer_group_id'] = peer_group_id if kw.get('peer_group_id') is None \
|
||||
else kw.get('peer_group_id')
|
||||
kw['system_peer_id'] = peer_id if kw.get('system_peer_id') is None \
|
||||
else kw.get('system_peer_id')
|
||||
kw["peer_group_id"] = (
|
||||
peer_group_id
|
||||
if kw.get("peer_group_id") is None
|
||||
else kw.get("peer_group_id")
|
||||
)
|
||||
kw["system_peer_id"] = (
|
||||
peer_id if kw.get("system_peer_id") is None else kw.get("system_peer_id")
|
||||
)
|
||||
creation_fields = self._get_test_peer_group_association_dict(**kw)
|
||||
|
||||
return db_api.peer_group_association_create(context, **creation_fields)
|
||||
@ -175,9 +179,7 @@ class PeerGroupAssociationAPIMixin(APIMixin):
|
||||
return self._get_test_peer_group_association_dict()
|
||||
|
||||
def get_update_object(self):
|
||||
update_object = {
|
||||
'peer_group_priority': SAMPLE_PEER_GROUP_PRIORITY_UPDATED
|
||||
}
|
||||
update_object = {"peer_group_priority": SAMPLE_PEER_GROUP_PRIORITY_UPDATED}
|
||||
return update_object
|
||||
|
||||
|
||||
@ -199,13 +201,15 @@ class BaseTestPeerGroupAssociationController(
|
||||
def _create_non_primary_association_type(self):
|
||||
db_api.peer_group_association_destroy(self.ctx, self.single_obj.id)
|
||||
self.single_obj = self._create_peer_group_association(
|
||||
self.ctx, self.peer_id, self.peer_group_id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY
|
||||
self.ctx,
|
||||
self.peer_id,
|
||||
self.peer_group_id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY,
|
||||
)
|
||||
|
||||
|
||||
class TestPeerGroupAssociationController(BaseTestPeerGroupAssociationController):
|
||||
""""Test class for PeerGroupAssociationController"""
|
||||
"""Test class for PeerGroupAssociationController"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -218,11 +222,11 @@ class TestPeerGroupAssociationController(BaseTestPeerGroupAssociationController)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.text, 'null')
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
""""Test class for post requests"""
|
||||
"""Test class for post requests"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -231,8 +235,9 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
self.params = self.get_post_object()
|
||||
|
||||
db_api.system_peer_update(
|
||||
self.ctx, peer_id=self.peer_id,
|
||||
availability_state=SAMPLE_AVAILABILITY_STATE_AVAILABLE
|
||||
self.ctx,
|
||||
peer_id=self.peer_id,
|
||||
availability_state=SAMPLE_AVAILABILITY_STATE_AVAILABLE,
|
||||
)
|
||||
|
||||
def _validate_peer_group_association(self):
|
||||
@ -241,8 +246,9 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
def test_post_succeeds(self):
|
||||
"""Test post succeeds"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.return_value = \
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.return_value = (
|
||||
self._get_test_peer_group_association_dict()
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -255,10 +261,11 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
"""Test post succeeds with non primary subcloud peer group"""
|
||||
|
||||
db_api.subcloud_peer_group_update(
|
||||
self.ctx, self.peer_group_id,
|
||||
group_priority=peer_group_association.MIN_PEER_GROUP_ASSOCIATION_PRIORITY
|
||||
self.ctx,
|
||||
self.peer_group_id,
|
||||
group_priority=peer_group_association.MIN_PEER_GROUP_ASSOCIATION_PRIORITY,
|
||||
)
|
||||
self.params['peer_group_priority'] = None
|
||||
self.params["peer_group_priority"] = None
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -270,18 +277,20 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
def test_post_fails_with_invalid_system_peer_id(self):
|
||||
"""Test post fails with invalid system peer id"""
|
||||
|
||||
bad_values = ['', 'test-system-peer-id']
|
||||
bad_values = ["", "test-system-peer-id"]
|
||||
for index, bad_value in enumerate(bad_values, start=1):
|
||||
self.params['system_peer_id'] = bad_value
|
||||
self.params["system_peer_id"] = bad_value
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
'Invalid system_peer_id', call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid system_peer_id",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, 'system_peer_get')
|
||||
@mock.patch.object(db_api, "system_peer_get")
|
||||
def test_post_fails_with_generic_exception_while_validating_system_peer_id(
|
||||
self, mock_system_peer_get
|
||||
):
|
||||
@ -292,22 +301,22 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, 'Invalid system_peer_id'
|
||||
response, http.client.BAD_REQUEST, "Invalid system_peer_id"
|
||||
)
|
||||
|
||||
def test_post_fails_with_textual_peer_group_id(self):
|
||||
"""Test post fails with textual peer group id"""
|
||||
|
||||
# A string peer group priority is not permitted.
|
||||
self.params['peer_group_id'] = 'peer-group-id'
|
||||
self.params["peer_group_id"] = "peer-group-id"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, 'Invalid peer_group_id'
|
||||
response, http.client.BAD_REQUEST, "Invalid peer_group_id"
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, 'subcloud_peer_group_get')
|
||||
@mock.patch.object(db_api, "subcloud_peer_group_get")
|
||||
def test_post_fails_with_generic_exception_while_validating_peer_group_id(
|
||||
self, mock_subcloud_peer_group_get
|
||||
):
|
||||
@ -321,7 +330,7 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
# Error exception instead of a Bad Request. This also applies to all of the
|
||||
# others validations when a generic exception occurs.
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, 'Invalid peer_group_id'
|
||||
response, http.client.BAD_REQUEST, "Invalid peer_group_id"
|
||||
)
|
||||
|
||||
def test_post_fails_with_invalid_peer_group_priority(self):
|
||||
@ -332,15 +341,17 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
# TODO(rlima): a floting point value should also raise an invalid
|
||||
# peer_group_priority, but, currently, it doesn't since the validation
|
||||
# updates the value to an integer
|
||||
bad_values = [65537, -2, 'abc', 0]
|
||||
bad_values = [65537, -2, "abc", 0]
|
||||
for index, bad_value in enumerate(bad_values, start=1):
|
||||
self.params['peer_group_priority'] = bad_value
|
||||
self.params["peer_group_priority"] = bad_value
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
'Invalid peer_group_priority', call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer_group_priority",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_post_fails_with_primary_group_priority(self):
|
||||
@ -350,18 +361,19 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
payload doesn't have one, a bad request should occur
|
||||
"""
|
||||
|
||||
self.params['peer_group_priority'] = None
|
||||
self.params["peer_group_priority"] = None
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association create is not allowed when the subcloud "
|
||||
"peer group priority is greater than 0 and it is required when "
|
||||
"the subcloud peer group priority is 0."
|
||||
"the subcloud peer group priority is 0.",
|
||||
)
|
||||
|
||||
@mock.patch.object(json, 'loads')
|
||||
@mock.patch.object(json, "loads")
|
||||
def test_post_fails_with_malformed_payload(self, mock_json_loads):
|
||||
"""Test post fails when the payload is malformed"""
|
||||
|
||||
@ -379,7 +391,7 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
def test_post_fails_with_invalid_payload(self):
|
||||
"""Test post fails when the payload is invalid"""
|
||||
|
||||
self.params = 'invalid payload'
|
||||
self.params = "invalid payload"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -399,7 +411,7 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
)
|
||||
|
||||
@mock.patch.object(
|
||||
db_api, 'peer_group_association_get_by_peer_group_and_system_peer_id'
|
||||
db_api, "peer_group_association_get_by_peer_group_and_system_peer_id"
|
||||
)
|
||||
def test_post_fails_with_get_by_peer_group_and_system_peer_id_exception(
|
||||
self, mock_peer_group_association_get
|
||||
@ -415,34 +427,36 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"peer_group_association_get_by_peer_group_and_system_peer_id failed: "
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"peer_group_association_get_by_peer_group_and_system_peer_id failed: ",
|
||||
)
|
||||
|
||||
def test_post_fails_with_existing_association(self):
|
||||
"""Test post fails when an association exists"""
|
||||
|
||||
self._create_peer_group_association(
|
||||
self.ctx, self.peer_id, self.peer_group_id
|
||||
)
|
||||
self._create_peer_group_association(self.ctx, self.peer_id, self.peer_group_id)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "A Peer group association with same "
|
||||
"peer_group_id, system_peer_id already exists"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"A Peer group association with same "
|
||||
"peer_group_id, system_peer_id already exists",
|
||||
)
|
||||
|
||||
def test_post_fails_with_remote_error_for_rpc_client(self):
|
||||
"""Test post fails with a remote error for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.side_effect = \
|
||||
RemoteError('msg', 'value')
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, 'value'
|
||||
response, http.client.UNPROCESSABLE_ENTITY, "value"
|
||||
)
|
||||
|
||||
def test_post_fails_with_generic_exception_for_rpc_client(self):
|
||||
@ -453,13 +467,14 @@ class TestPeerGroupAssociationPost(BaseTestPeerGroupAssociationController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
'Unable to create peer group association'
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to create peer group association",
|
||||
)
|
||||
|
||||
|
||||
class TestPeerGroupAssociationGet(BaseTestPeerGroupAssociationController, GetMixin):
|
||||
""""Test class for get requests"""
|
||||
"""Test class for get requests"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -472,18 +487,19 @@ class TestPeerGroupAssociationGet(BaseTestPeerGroupAssociationController, GetMix
|
||||
def test_get_fails_with_association_id_not_being_digit(self):
|
||||
"""Test get fails when the association id is not a digit"""
|
||||
|
||||
self.url = f'{self.url}/fake'
|
||||
self.url = f"{self.url}/fake"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer",
|
||||
)
|
||||
|
||||
|
||||
class BaseTestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationController):
|
||||
""""Base test class for patch requests"""
|
||||
"""Base test class for patch requests"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -492,7 +508,7 @@ class BaseTestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationController):
|
||||
self.ctx, self.peer_id, self.peer_group_id
|
||||
)
|
||||
|
||||
self.url = f'{self.url}/{self.single_obj.id}'
|
||||
self.url = f"{self.url}/{self.single_obj.id}"
|
||||
self.method = self.app.patch_json
|
||||
self.params = self.get_update_object()
|
||||
|
||||
@ -506,7 +522,7 @@ class BaseTestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationController):
|
||||
|
||||
|
||||
class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
""""Test class for patch requests"""
|
||||
"""Test class for patch requests"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -522,8 +538,9 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
def test_patch_succeeds(self):
|
||||
"""Test patch succeeds"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.return_value = \
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.return_value = (
|
||||
self._get_test_peer_group_association_dict()
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -535,8 +552,8 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
|
||||
self._create_non_primary_association_type()
|
||||
|
||||
self.params.pop('peer_group_priority')
|
||||
self.params['sync_status'] = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||
self.params.pop("peer_group_priority")
|
||||
self.params["sync_status"] = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -554,37 +571,37 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
|
||||
# Failures will return text rather than json
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, 'Body required'
|
||||
response, http.client.BAD_REQUEST, "Body required"
|
||||
)
|
||||
|
||||
def test_patch_fails_without_valid_association_id(self):
|
||||
"""Test patch fails without a valid association_id"""
|
||||
|
||||
self.url = f'{self.API_PREFIX}/fake'
|
||||
self.url = f"{self.API_PREFIX}/fake"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer",
|
||||
)
|
||||
|
||||
def test_patch_fails_with_peer_group_association_not_found(self):
|
||||
"""Test patch fails with peer group association not found"""
|
||||
|
||||
self.url = f'{self.API_PREFIX}/999'
|
||||
self.url = f"{self.API_PREFIX}/999"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.NOT_FOUND,
|
||||
"Peer Group Association not found"
|
||||
response, http.client.NOT_FOUND, "Peer Group Association not found"
|
||||
)
|
||||
|
||||
def test_patch_fails_with_nothing_to_update(self):
|
||||
"""Test patch fails with nothing to update"""
|
||||
|
||||
self.params = {'fake key': 'fake value'}
|
||||
self.params = {"fake key": "fake value"}
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -597,27 +614,30 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
|
||||
# peer_group_priority must be an integer between 1 and 65536
|
||||
# All the entries in bad_values should be considered invalid
|
||||
bad_values = [65537, -2, 'abc', 0]
|
||||
bad_values = [65537, -2, "abc", 0]
|
||||
for index, bad_value in enumerate(bad_values, start=1):
|
||||
self.params['peer_group_priority'] = bad_value
|
||||
self.params["peer_group_priority"] = bad_value
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
'Invalid peer_group_priority', call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer_group_priority",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_patch_fails_with_peer_group_priority_and_sync_status(self):
|
||||
"""Test patch fails with peer group priority and sync status"""
|
||||
|
||||
self.params['sync_status'] = consts.ASSOCIATION_SYNC_STATUS_SYNCING
|
||||
self.params["sync_status"] = consts.ASSOCIATION_SYNC_STATUS_SYNCING
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"peer_group_priority and sync_status cannot be updated at the same time."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"peer_group_priority and sync_status cannot be updated at the same time.",
|
||||
)
|
||||
|
||||
def test_patch_fails_for_peer_group_priority_when_non_primary(self):
|
||||
@ -628,17 +648,18 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association peer_group_priority is not allowed to update "
|
||||
"when the association type is non-primary."
|
||||
"when the association type is non-primary.",
|
||||
)
|
||||
self.mock_rpc_client().peer_monitor_notify.assert_called_once()
|
||||
|
||||
def test_patch_fails_for_invalid_sync_status(self):
|
||||
"""Test patch fails for invalid sync status"""
|
||||
|
||||
self.params.pop('peer_group_priority')
|
||||
self.params['sync_status'] = 'fake value'
|
||||
self.params.pop("peer_group_priority")
|
||||
self.params["sync_status"] = "fake value"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -649,46 +670,48 @@ class TestPeerGroupAssociationPatch(BaseTestPeerGroupAssociationPatch):
|
||||
def test_patch_fails_for_sync_status_when_primary(self):
|
||||
"""Test patch fails for sync status when association type is primary"""
|
||||
|
||||
self.params.pop('peer_group_priority')
|
||||
self.params['sync_status'] = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||
self.params.pop("peer_group_priority")
|
||||
self.params["sync_status"] = consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association sync_status is not allowed "
|
||||
"to update when the association type is primary."
|
||||
"to update when the association type is primary.",
|
||||
)
|
||||
self.mock_rpc_client().peer_monitor_notify.assert_called_once()
|
||||
|
||||
def test_patch_fails_with_remote_error_for_rpc_client_update(self):
|
||||
"""Test patch fails with a remote error for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.side_effect = \
|
||||
RemoteError('msg', 'value')
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, 'value'
|
||||
response, http.client.UNPROCESSABLE_ENTITY, "value"
|
||||
)
|
||||
|
||||
def test_patch_fails_with_generic_exception_for_rpc_client_update(self):
|
||||
"""Test patch fails with a generic exception for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.side_effect = \
|
||||
Exception()
|
||||
self.mock_rpc_client().sync_subcloud_peer_group_only.side_effect = Exception()
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to update peer group association"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to update peer group association",
|
||||
)
|
||||
|
||||
|
||||
class TestPeerGroupAssociationPatchSync(BaseTestPeerGroupAssociationPatch):
|
||||
""""Test class for patch requests with sync verb"""
|
||||
"""Test class for patch requests with sync verb"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -707,16 +730,16 @@ class TestPeerGroupAssociationPatchSync(BaseTestPeerGroupAssociationPatch):
|
||||
"""Test patch sync fails without a valid peer group leader id"""
|
||||
|
||||
db_api.subcloud_peer_group_update(
|
||||
self.ctx, SAMPLE_SUBCLOUD_PEER_GROUP_ID,
|
||||
system_leader_id=str(uuid.uuid4())
|
||||
self.ctx, SAMPLE_SUBCLOUD_PEER_GROUP_ID, system_leader_id=str(uuid.uuid4())
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association sync is not allowed when the subcloud "
|
||||
"peer group system_leader_id is not the current system controller UUID."
|
||||
"peer group system_leader_id is not the current system controller UUID.",
|
||||
)
|
||||
|
||||
def test_patch_sync_fails_with_non_primary_association_type(self):
|
||||
@ -727,22 +750,24 @@ class TestPeerGroupAssociationPatchSync(BaseTestPeerGroupAssociationPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association sync is not allowed when the association type "
|
||||
"is non-primary. But the peer monitor notify was triggered."
|
||||
"is non-primary. But the peer monitor notify was triggered.",
|
||||
)
|
||||
self.mock_rpc_client().peer_monitor_notify.assert_called_once()
|
||||
|
||||
def test_patch_sync_fails_with_remote_error_for_rpc_client_sync(self):
|
||||
"""Test patch sync fails with remote error for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.side_effect = \
|
||||
RemoteError('msg', 'value')
|
||||
self.mock_rpc_client().sync_subcloud_peer_group.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, 'value'
|
||||
response, http.client.UNPROCESSABLE_ENTITY, "value"
|
||||
)
|
||||
|
||||
def test_patch_sync_fails_with_generic_exception_for_rpc_client_sync(self):
|
||||
@ -753,13 +778,14 @@ class TestPeerGroupAssociationPatchSync(BaseTestPeerGroupAssociationPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to sync peer group association"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to sync peer group association",
|
||||
)
|
||||
|
||||
|
||||
class TestPeerGroupAssociationDelete(BaseTestPeerGroupAssociationController):
|
||||
""""Test class for delete requests"""
|
||||
"""Test class for delete requests"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@ -768,12 +794,13 @@ class TestPeerGroupAssociationDelete(BaseTestPeerGroupAssociationController):
|
||||
self.ctx, self.peer_id, self.peer_group_id
|
||||
)
|
||||
|
||||
self.url = f'{self.url}/{self.single_obj.id}'
|
||||
self.url = f"{self.url}/{self.single_obj.id}"
|
||||
self.method = self.app.delete
|
||||
self.params = {}
|
||||
|
||||
self.mock_rpc_client().delete_peer_group_association.return_value = \
|
||||
self.mock_rpc_client().delete_peer_group_association.return_value = (
|
||||
self._get_test_peer_group_association_dict()
|
||||
)
|
||||
|
||||
def test_delete_succeeds(self):
|
||||
"""Test delete succeeds"""
|
||||
@ -809,42 +836,44 @@ class TestPeerGroupAssociationDelete(BaseTestPeerGroupAssociationController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.NOT_FOUND, 'Peer Group Association not found'
|
||||
response, http.client.NOT_FOUND, "Peer Group Association not found"
|
||||
)
|
||||
|
||||
def test_delete_fails_without_valid_association_id(self):
|
||||
"""Test delete fails without a valid association_id"""
|
||||
|
||||
self.url = f'{self.API_PREFIX}/fake'
|
||||
self.url = f"{self.API_PREFIX}/fake"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Peer Group Association ID must be an integer",
|
||||
)
|
||||
|
||||
def test_delete_fails_with_remote_error_on_delete(self):
|
||||
"""Test delete fails with remote error for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().delete_peer_group_association.side_effect = \
|
||||
RemoteError('msg', 'value')
|
||||
self.mock_rpc_client().delete_peer_group_association.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, 'value'
|
||||
response, http.client.UNPROCESSABLE_ENTITY, "value"
|
||||
)
|
||||
|
||||
def test_delete_fails_with_generic_exception_on_delete(self):
|
||||
"""Test delete fails with generic exception for rpc_client"""
|
||||
|
||||
self.mock_rpc_client().delete_peer_group_association.side_effect = \
|
||||
Exception()
|
||||
self.mock_rpc_client().delete_peer_group_association.side_effect = Exception()
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete peer group association"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete peer group association",
|
||||
)
|
||||
|
@ -108,10 +108,12 @@ class TestPhasedSubcloudDeployPost(BaseTestPhasedSubcloudDeployController):
|
||||
|
||||
self._mock_sysinv_client(psd_common)
|
||||
|
||||
self.mock_sysinv_client().get_management_address_pool.return_value = \
|
||||
self.mock_sysinv_client().get_management_address_pool.return_value = (
|
||||
FakeAddressPool("192.168.204.0", 24, "192.168.204.2", "192.168.204.100")
|
||||
self.mock_rpc_client().subcloud_deploy_create.side_effect = \
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_create.side_effect = (
|
||||
self.subcloud_deploy_create
|
||||
)
|
||||
|
||||
def subcloud_deploy_create(self, context, subcloud_id, _):
|
||||
subcloud = db_api.subcloud_get(context, subcloud_id)
|
||||
@ -134,8 +136,9 @@ class TestPhasedSubcloudDeployPost(BaseTestPhasedSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap_values, bootstrap-address"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap_values, bootstrap-address",
|
||||
)
|
||||
|
||||
def test_post_fails_without_bootstrap_address(self):
|
||||
@ -146,8 +149,9 @@ class TestPhasedSubcloudDeployPost(BaseTestPhasedSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap-address"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap-address",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_create.assert_not_called()
|
||||
|
||||
@ -159,16 +163,18 @@ class TestPhasedSubcloudDeployPost(BaseTestPhasedSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap_values"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Missing required parameter(s): bootstrap_values",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_create.assert_not_called()
|
||||
|
||||
def test_post_fails_with_rpc_client_remote_error(self):
|
||||
"""Test post fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_create.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_create.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -197,8 +203,9 @@ class BaseTestPhasedSubcloudDeployPatch(BaseTestPhasedSubcloudDeployController):
|
||||
super().setUp()
|
||||
|
||||
self.subcloud = fake_subcloud.create_fake_subcloud(
|
||||
self.ctx, name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"],
|
||||
deploy_status=consts.DEPLOY_STATE_INSTALLED
|
||||
self.ctx,
|
||||
name=fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA["name"],
|
||||
deploy_status=consts.DEPLOY_STATE_INSTALLED,
|
||||
)
|
||||
|
||||
self.method = self.app.patch
|
||||
@ -209,8 +216,7 @@ class BaseTestPhasedSubcloudDeployPatch(BaseTestPhasedSubcloudDeployController):
|
||||
self._mock_is_valid_software_deploy_state()
|
||||
self._mock_get_network_address_pool()
|
||||
|
||||
self.mock_get_vault_load_files.return_value = \
|
||||
("iso_file_path", "sig_file_path")
|
||||
self.mock_get_vault_load_files.return_value = ("iso_file_path", "sig_file_path")
|
||||
self.mock_is_initial_deployment.return_value = True
|
||||
self.mock_get_network_address_pool.return_value = FakeAddressPool(
|
||||
"192.168.204.0", 24, "192.168.204.2", "192.168.204.100"
|
||||
@ -226,18 +232,17 @@ class BaseTestPhasedSubcloudDeployPatch(BaseTestPhasedSubcloudDeployController):
|
||||
self.install_payload = {
|
||||
"install_values": self.data_install,
|
||||
"sysadmin_password": self._create_password("testpass"),
|
||||
"bmc_password": bmc_password
|
||||
"bmc_password": bmc_password,
|
||||
}
|
||||
|
||||
self.mock_load_yaml_file_return_value = {
|
||||
consts.BOOTSTRAP_ADDRESS:
|
||||
fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS],
|
||||
consts.BOOTSTRAP_ADDRESS: fake_subcloud.FAKE_BOOTSTRAP_VALUE[
|
||||
consts.BOOTSTRAP_ADDRESS
|
||||
],
|
||||
}
|
||||
|
||||
def _update_subcloud(self, **kwargs):
|
||||
self.subcloud = db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, **kwargs
|
||||
)
|
||||
self.subcloud = db_api.subcloud_update(self.ctx, self.subcloud.id, **kwargs)
|
||||
|
||||
|
||||
class TestPhasedSubcloudDeployPatch(BaseTestPhasedSubcloudDeployPatch):
|
||||
@ -289,10 +294,12 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
self.url = f"{self.url}/bootstrap"
|
||||
|
||||
self.params = fake_subcloud.FAKE_BOOTSTRAP_VALUE
|
||||
fake_content = \
|
||||
json.dumps(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA).encode("utf-8")
|
||||
self.upload_files = \
|
||||
[("bootstrap_values", "bootstrap_fake_filename", fake_content)]
|
||||
fake_content = json.dumps(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA).encode(
|
||||
"utf-8"
|
||||
)
|
||||
self.upload_files = [
|
||||
("bootstrap_values", "bootstrap_fake_filename", fake_content)
|
||||
]
|
||||
|
||||
self._mock_load_yaml_file()
|
||||
self._setup_mock_load_yaml_file()
|
||||
@ -301,24 +308,27 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
|
||||
def _setup_mock_os_path_exists(self):
|
||||
config_file = psd_common.get_config_file_path(self.subcloud.name)
|
||||
self.mock_os_path_exists.side_effect = \
|
||||
lambda file: True if file == config_file else False
|
||||
self.mock_os_path_exists.side_effect = lambda file: (
|
||||
True if file == config_file else False
|
||||
)
|
||||
|
||||
def _setup_mock_load_yaml_file(self):
|
||||
self.mock_load_yaml_file_return_value["software_version"] = \
|
||||
self.mock_load_yaml_file_return_value["software_version"] = (
|
||||
fake_subcloud.FAKE_SOFTWARE_VERSION
|
||||
)
|
||||
self.mock_load_yaml_file.return_value = self.mock_load_yaml_file_return_value
|
||||
|
||||
def _assert_payload(self):
|
||||
expected_payload = {
|
||||
**fake_subcloud.FAKE_BOOTSTRAP_VALUE,
|
||||
**fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA
|
||||
**fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA,
|
||||
}
|
||||
expected_payload["sysadmin_password"] = "testpass"
|
||||
expected_payload["software_version"] = fake_subcloud.FAKE_SOFTWARE_VERSION
|
||||
|
||||
(_, res_subcloud_id, res_payload), _ = \
|
||||
(_, res_subcloud_id, res_payload), _ = (
|
||||
self.mock_rpc_client.return_value.subcloud_deploy_bootstrap.call_args
|
||||
)
|
||||
|
||||
self.assertDictEqual(res_payload, expected_payload)
|
||||
self.assertEqual(res_subcloud_id, self.subcloud.id)
|
||||
@ -338,8 +348,7 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
self.upload_files = None
|
||||
|
||||
fake_bootstrap_values = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA)
|
||||
fake_bootstrap_values["software_version"] = \
|
||||
fake_subcloud.FAKE_SOFTWARE_VERSION
|
||||
fake_bootstrap_values["software_version"] = fake_subcloud.FAKE_SOFTWARE_VERSION
|
||||
self.mock_load_yaml_file.return_value = fake_bootstrap_values
|
||||
|
||||
response = self._send_request()
|
||||
@ -355,26 +364,30 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
"management_subnet": "192.168.102.0/24",
|
||||
"management_start_ip": "192.168.102.2",
|
||||
"management_end_ip": "192.168.102.50",
|
||||
"management_gateway_ip": "192.168.102.1"
|
||||
"management_gateway_ip": "192.168.102.1",
|
||||
}
|
||||
|
||||
fake_subcloud.create_fake_subcloud(
|
||||
self.ctx, name="existing_subcloud",
|
||||
deploy_status=consts.DEPLOY_STATE_DONE, **conflicting_subnet
|
||||
self.ctx,
|
||||
name="existing_subcloud",
|
||||
deploy_status=consts.DEPLOY_STATE_DONE,
|
||||
**conflicting_subnet,
|
||||
)
|
||||
|
||||
modified_bootstrap_data = copy.copy(fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA)
|
||||
modified_bootstrap_data.update(conflicting_subnet)
|
||||
fake_content = json.dumps(modified_bootstrap_data).encode("utf-8")
|
||||
|
||||
self.upload_files = \
|
||||
[("bootstrap_values", "bootstrap_fake_filename", fake_content)]
|
||||
self.upload_files = [
|
||||
("bootstrap_values", "bootstrap_fake_filename", fake_content)
|
||||
]
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "management_subnet invalid: Subnet "
|
||||
"overlaps with another configured subnet"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"management_subnet invalid: Subnet overlaps with another configured subnet",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.assert_not_called()
|
||||
|
||||
@ -386,8 +399,10 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud deploy status must be "
|
||||
f"either: {', '.join(psd_api.VALID_STATES_FOR_DEPLOY_BOOTSTRAP)}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be either: "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_BOOTSTRAP)}",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.assert_not_called()
|
||||
|
||||
@ -401,17 +416,20 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Required bootstrap-values file was "
|
||||
"not provided and it was not previously available at /opt/dc-vault/"
|
||||
f"ansible/{fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA['name']}.yml"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Required bootstrap-values file was not provided and it was not "
|
||||
"previously available at /opt/dc-vault/ansible/"
|
||||
f"{fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA['name']}.yml",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.assert_not_called()
|
||||
|
||||
def test_patch_bootstrap_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch bootstrap fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -428,8 +446,7 @@ class TestPhasedSubcloudDeployPatchBootstrap(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to bootstrap subcloud"
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to bootstrap subcloud"
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_bootstrap.assert_called_once()
|
||||
|
||||
@ -445,7 +462,7 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
|
||||
self._update_subcloud(
|
||||
deploy_status=consts.DEPLOY_STATE_DONE,
|
||||
data_install=json.dumps(self.data_install)
|
||||
data_install=json.dumps(self.data_install),
|
||||
)
|
||||
|
||||
self._mock_populate_payload()
|
||||
@ -458,8 +475,9 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
"""Test patch configure succeeds"""
|
||||
|
||||
mock_load_yaml_file.return_value = {
|
||||
consts.BOOTSTRAP_ADDRESS:
|
||||
fake_subcloud.FAKE_BOOTSTRAP_VALUE[consts.BOOTSTRAP_ADDRESS]
|
||||
consts.BOOTSTRAP_ADDRESS: fake_subcloud.FAKE_BOOTSTRAP_VALUE[
|
||||
consts.BOOTSTRAP_ADDRESS
|
||||
]
|
||||
}
|
||||
|
||||
response = self._send_request()
|
||||
@ -501,8 +519,10 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Failed to decode subcloud "
|
||||
"sysadmin_password, verify the password is base64 encoded"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Failed to decode subcloud sysadmin_password, "
|
||||
"verify the password is base64 encoded",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_config.assert_not_called()
|
||||
|
||||
@ -514,8 +534,10 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud deploy status must be "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_CONFIG)}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_CONFIG)}",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_config.assert_not_called()
|
||||
|
||||
@ -527,8 +549,9 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Subcloud prestage is ongoing prestaging-images"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud prestage is ongoing prestaging-images",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_config.assert_not_called()
|
||||
|
||||
@ -537,8 +560,9 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
|
||||
# Add subcloud to SPG with primary priority
|
||||
peer_group = TestSystemPeerManager.create_subcloud_peer_group_static(
|
||||
self.ctx, group_priority=consts.PEER_GROUP_PRIMARY_PRIORITY,
|
||||
peer_group_name="SubcloudPeerGroup1"
|
||||
self.ctx,
|
||||
group_priority=consts.PEER_GROUP_PRIMARY_PRIORITY,
|
||||
peer_group_name="SubcloudPeerGroup1",
|
||||
)
|
||||
|
||||
self._update_subcloud(peer_group_id=peer_group.id)
|
||||
@ -563,16 +587,18 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Subcloud can only be configured in its primary site."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud can only be configured in its primary site.",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_config.assert_not_called()
|
||||
|
||||
def test_patch_configure_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch configure fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_config.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_config.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -591,8 +617,7 @@ class TestPhasedSubcloudDeployPatchConfigure(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to configure subcloud"
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to configure subcloud"
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_config.assert_called_once_with(
|
||||
mock.ANY, self.subcloud.id, self.params, initial_deployment=True
|
||||
@ -647,7 +672,7 @@ class TestPhasedSubcloudDeployPatchInstall(BaseTestPhasedSubcloudDeployPatch):
|
||||
|
||||
with mock.patch(
|
||||
"builtins.open",
|
||||
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA)
|
||||
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA),
|
||||
):
|
||||
response = self._send_request()
|
||||
|
||||
@ -665,8 +690,9 @@ class TestPhasedSubcloudDeployPatchInstall(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "The deploy install command can only "
|
||||
"be used during initial deployment."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The deploy install command can only be used during initial deployment.",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_install.assert_not_called()
|
||||
|
||||
@ -709,9 +735,11 @@ class TestPhasedSubcloudDeployPatchInstall(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Failed to get {SW_VERSION} load "
|
||||
"image. Provide active/inactive load image via 'system --os-region-name "
|
||||
"SystemController load-import --active/--inactive'"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Failed to get {SW_VERSION} load image. "
|
||||
"Provide active/inactive load image via 'system --os-region-name "
|
||||
"SystemController load-import --active/--inactive'",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_install.assert_not_called()
|
||||
|
||||
@ -723,16 +751,19 @@ class TestPhasedSubcloudDeployPatchInstall(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud deploy status must be "
|
||||
f"either: {', '.join(psd_api.VALID_STATES_FOR_DEPLOY_INSTALL)}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be either: "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_INSTALL)}",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_install.assert_not_called()
|
||||
|
||||
def test_patch_install_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch install fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_install.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_install.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -768,11 +799,13 @@ class TestPhasedSubcloudDeployPatchComplete(BaseTestPhasedSubcloudDeployPatch):
|
||||
|
||||
self._update_subcloud(
|
||||
deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPED,
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
)
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_complete.return_value = \
|
||||
("subcloud_deploy_complete", {"subcloud_id": self.subcloud.id})
|
||||
self.mock_rpc_client().subcloud_deploy_complete.return_value = (
|
||||
"subcloud_deploy_complete",
|
||||
{"subcloud_id": self.subcloud.id},
|
||||
)
|
||||
|
||||
def test_patch_complete_succeeds(self):
|
||||
"""Test patch complete succeeds"""
|
||||
@ -792,17 +825,19 @@ class TestPhasedSubcloudDeployPatchComplete(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud deploy can only be "
|
||||
"completed when its deploy status is: "
|
||||
f"{consts.DEPLOY_STATE_BOOTSTRAPPED}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy can only be completed when its deploy status is: "
|
||||
f"{consts.DEPLOY_STATE_BOOTSTRAPPED}",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_complete.assert_not_called()
|
||||
|
||||
def test_patch_complete_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch complete fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_complete.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_complete.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -821,8 +856,9 @@ class TestPhasedSubcloudDeployPatchComplete(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to complete subcloud deployment"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to complete subcloud deployment",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_complete.assert_called_once_with(
|
||||
mock.ANY, self.subcloud.id
|
||||
@ -857,8 +893,9 @@ class TestPhasedSubcloudDeployPatchAbort(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"The subcloud can only be aborted during initial deployment."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The subcloud can only be aborted during initial deployment.",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_abort.assert_not_called()
|
||||
|
||||
@ -870,17 +907,19 @@ class TestPhasedSubcloudDeployPatchAbort(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud deploy status must be in "
|
||||
"one of the following states: "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_ABORT)}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be in one of the following states: "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_ABORT)}",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_abort.assert_not_called()
|
||||
|
||||
def test_patch_abort_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch abort fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_abort.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_abort.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -899,8 +938,9 @@ class TestPhasedSubcloudDeployPatchAbort(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to abort subcloud deployment"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to abort subcloud deployment",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_abort.assert_called_once_with(
|
||||
mock.ANY, self.subcloud.id, self.subcloud.deploy_status
|
||||
@ -916,8 +956,9 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
self.url = f"{self.url}/resume"
|
||||
|
||||
self._update_subcloud(
|
||||
deploy_status=consts.DEPLOY_STATE_CREATED, software_version=SW_VERSION,
|
||||
data_install=json.dumps(self.data_install)
|
||||
deploy_status=consts.DEPLOY_STATE_CREATED,
|
||||
software_version=SW_VERSION,
|
||||
data_install=json.dumps(self.data_install),
|
||||
)
|
||||
|
||||
self._mock_get_subcloud_db_install_values()
|
||||
@ -933,14 +974,13 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
self.mock_os_path_isdir.return_value = True
|
||||
self.mock_load_yaml_file.return_value = self.mock_load_yaml_file_return_value
|
||||
self.mock_os_listdir.return_value = [
|
||||
"deploy_chart_fake.tgz", "deploy_overrides_fake.yaml",
|
||||
"deploy_playbook_fake.yaml"
|
||||
"deploy_chart_fake.tgz",
|
||||
"deploy_overrides_fake.yaml",
|
||||
"deploy_playbook_fake.yaml",
|
||||
]
|
||||
|
||||
def _setup_mock_get_request_data(self, states_to_execute=psd_api.DEPLOY_PHASES):
|
||||
bootstrap_request = {
|
||||
"bootstrap_values": fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA
|
||||
}
|
||||
bootstrap_request = {"bootstrap_values": fake_subcloud.FAKE_BOOTSTRAP_FILE_DATA}
|
||||
config_request = {
|
||||
"deploy_config": "deploy config values",
|
||||
}
|
||||
@ -965,8 +1005,9 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
config_file = psd_common.get_config_file_path(
|
||||
self.subcloud.name, consts.DEPLOY_CONFIG
|
||||
)
|
||||
self.mock_os_path_exists.side_effect = \
|
||||
lambda file: True if file == config_file else False
|
||||
self.mock_os_path_exists.side_effect = lambda file: (
|
||||
True if file == config_file else False
|
||||
)
|
||||
|
||||
def _assert_response_payload(self, response):
|
||||
next_deploy_phase = psd_api.RESUMABLE_STATES[self.subcloud.deploy_status][0]
|
||||
@ -1017,9 +1058,10 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The subcloud can only be resumed during initial deployment.",
|
||||
call_count=index
|
||||
call_count=index,
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_resume.assert_not_called()
|
||||
|
||||
@ -1027,8 +1069,9 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
"""Test patch resume fails with subcloud in invalid state"""
|
||||
|
||||
invalid_resume_states = [
|
||||
consts.DEPLOY_STATE_INSTALLING, consts.DEPLOY_STATE_BOOTSTRAPPING,
|
||||
consts.DEPLOY_STATE_CONFIGURING
|
||||
consts.DEPLOY_STATE_INSTALLING,
|
||||
consts.DEPLOY_STATE_BOOTSTRAPPING,
|
||||
consts.DEPLOY_STATE_CONFIGURING,
|
||||
]
|
||||
|
||||
for index, state in enumerate(invalid_resume_states, start=1):
|
||||
@ -1037,16 +1080,20 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud deploy status must be "
|
||||
f"either: {', '.join(psd_api.RESUMABLE_STATES)}", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be either: "
|
||||
f"{', '.join(psd_api.RESUMABLE_STATES)}",
|
||||
call_count=index,
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_resume.assert_not_called()
|
||||
|
||||
def test_patch_resume_succeeds_with_sysadmin_password_only_in_params(self):
|
||||
"""Test patch succeeds with sysadmin password only in params"""
|
||||
|
||||
self.mock_load_yaml_file_return_value["software_version"] = \
|
||||
self.mock_load_yaml_file_return_value["software_version"] = (
|
||||
fake_subcloud.FAKE_SOFTWARE_VERSION
|
||||
)
|
||||
self.mock_load_yaml_file.return_value = self.mock_load_yaml_file_return_value
|
||||
|
||||
for index, state in enumerate(psd_api.RESUMABLE_STATES, start=1):
|
||||
@ -1097,10 +1144,12 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"{states_executed[0].title()} "
|
||||
"was already executed and "
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"{states_executed[0].title()} was already executed and "
|
||||
f"{psd_api.FILES_MAPPING[states_executed[0]][0].replace('_', '-')} "
|
||||
"is not required", call_count=index - skipped_count
|
||||
"is not required",
|
||||
call_count=index - skipped_count,
|
||||
)
|
||||
|
||||
def test_patch_resume_fails_with_deploy_state_to_run_as_config(self):
|
||||
@ -1115,18 +1164,20 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Only deploy phase left is deploy "
|
||||
f"config. Required {consts.DEPLOY_CONFIG} file was not provided and it "
|
||||
"was not previously available. If manually configuring the subcloud, "
|
||||
"please run 'dcmanager subcloud deploy complete'"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Only deploy phase left is deploy config. Required {consts.DEPLOY_CONFIG} "
|
||||
"file was not provided and it was not previously available. If manually "
|
||||
"configuring the subcloud, please run 'dcmanager subcloud deploy complete'",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_resume.assert_not_called()
|
||||
|
||||
def test_patch_resume_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch resume fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().subcloud_deploy_resume.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().subcloud_deploy_resume.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -1143,14 +1194,16 @@ class TestPhasedSubcloudDeployPatchResume(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to resume subcloud deployment"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to resume subcloud deployment",
|
||||
)
|
||||
self.mock_rpc_client().subcloud_deploy_resume.assert_called_once()
|
||||
|
||||
|
||||
class TestPhasedSubcloudDeployPatchEnroll(BaseTestPhasedSubcloudDeployPatch):
|
||||
"""Test class for patch requests with enroll verb"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
@ -1161,15 +1214,16 @@ class TestPhasedSubcloudDeployPatchEnroll(BaseTestPhasedSubcloudDeployPatch):
|
||||
)
|
||||
|
||||
modified_bootstrap_data = copy.copy(
|
||||
fake_subcloud.FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD)
|
||||
modified_bootstrap_data.update({"name": "fake subcloud1"})
|
||||
fake_subcloud.FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD
|
||||
)
|
||||
modified_bootstrap_data.update({"name": "fake_subcloud1"})
|
||||
modified_install_data = copy.copy(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES)
|
||||
fake_content = json.dumps(modified_bootstrap_data).encode("utf-8")
|
||||
install_fake_content = json.dumps(modified_install_data).encode("utf-8")
|
||||
|
||||
self.upload_files = [(
|
||||
"bootstrap_values", "bootstrap_fake_filename", fake_content),
|
||||
("install_values", "install_values_fake_filename", install_fake_content)
|
||||
self.upload_files = [
|
||||
("bootstrap_values", "bootstrap_fake_filename", fake_content),
|
||||
("install_values", "install_values_fake_filename", install_fake_content),
|
||||
]
|
||||
|
||||
def test_patch_enroll_fails(self):
|
||||
@ -1178,21 +1232,21 @@ class TestPhasedSubcloudDeployPatchEnroll(BaseTestPhasedSubcloudDeployPatch):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "subcloud deploy enrollment is not "
|
||||
"available yet"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"subcloud deploy enrollment is not available yet",
|
||||
)
|
||||
|
||||
def test_patch_enroll_fails_invalid_deploy_status(self):
|
||||
"""Test patch enroll fails with invalid deploy status"""
|
||||
|
||||
self._update_subcloud(
|
||||
deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPED
|
||||
)
|
||||
self._update_subcloud(deploy_status=consts.DEPLOY_STATE_BOOTSTRAPPED)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud deploy status must be either: "
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_ENROLL)}"
|
||||
f"{', '.join(psd_api.VALID_STATES_FOR_DEPLOY_ENROLL)}",
|
||||
)
|
||||
|
@ -75,16 +75,8 @@ FAKE_SYSTEM_HEALTH_K8S_FAIL = (
|
||||
"All kubernetes nodes are ready: [OK]\n"
|
||||
"All kubernetes control plane pods are ready: [OK]\n"
|
||||
)
|
||||
FAKE_RESTORE_VALUES_INVALID_IP = {
|
||||
"bootstrap_address": {
|
||||
"subcloud1": "10.10.20.12.22"
|
||||
}
|
||||
}
|
||||
FAKE_RESTORE_VALUES_VALID_IP = {
|
||||
"bootstrap_address": {
|
||||
"subcloud1": "10.10.20.12"
|
||||
}
|
||||
}
|
||||
FAKE_RESTORE_VALUES_INVALID_IP = {"bootstrap_address": {"subcloud1": "10.10.20.12.22"}}
|
||||
FAKE_RESTORE_VALUES_VALID_IP = {"bootstrap_address": {"subcloud1": "10.10.20.12"}}
|
||||
|
||||
|
||||
class BaseTestSubcloudBackupController(DCManagerApiTest):
|
||||
@ -93,7 +85,7 @@ class BaseTestSubcloudBackupController(DCManagerApiTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.url = '/v1.0/subcloud-backup'
|
||||
self.url = "/v1.0/subcloud-backup"
|
||||
|
||||
self.subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
|
||||
@ -103,17 +95,25 @@ class BaseTestSubcloudBackupController(DCManagerApiTest):
|
||||
self._mock_sysinv_client(dcmanager.common.utils)
|
||||
|
||||
def _update_subcloud(
|
||||
self, availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
self,
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED,
|
||||
deploy_status=consts.DEPLOY_STATE_DONE,
|
||||
backup_datetime=None, backup_status=consts.BACKUP_STATE_UNKNOWN,
|
||||
data_install=None, group_id=None
|
||||
backup_datetime=None,
|
||||
backup_status=consts.BACKUP_STATE_UNKNOWN,
|
||||
data_install=None,
|
||||
group_id=None,
|
||||
):
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, availability_status=availability_status,
|
||||
management_state=management_state, backup_datetime=backup_datetime,
|
||||
backup_status=backup_status, deploy_status=deploy_status,
|
||||
data_install=data_install, group_id=group_id
|
||||
self.ctx,
|
||||
self.subcloud.id,
|
||||
availability_status=availability_status,
|
||||
management_state=management_state,
|
||||
backup_datetime=backup_datetime,
|
||||
backup_status=backup_status,
|
||||
deploy_status=deploy_status,
|
||||
data_install=data_install,
|
||||
group_id=group_id,
|
||||
)
|
||||
|
||||
|
||||
@ -131,7 +131,7 @@ class TestSubcloudBackupController(BaseTestSubcloudBackupController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.text, 'null')
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class BaseTestSubcloudBackupPost(BaseTestSubcloudBackupController):
|
||||
@ -202,21 +202,26 @@ class TestSubcloudBackupPost(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Failed to decode subcloud "
|
||||
"sysadmin_password, verify the password is base64 encoded"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Failed to decode subcloud sysadmin_password, "
|
||||
"verify the password is base64 encoded",
|
||||
)
|
||||
|
||||
def test_post_fails_with_subcloud_and_group(self):
|
||||
"""Test post fails with subcloud and group"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "group": {self.subcloud.id}}}'
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "'subcloud' and 'group' parameters "
|
||||
"should not be given at the same time"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' and 'group' parameters should not be given at the same time",
|
||||
)
|
||||
|
||||
def test_post_fails_without_subcloud_and_group(self):
|
||||
@ -227,8 +232,9 @@ class TestSubcloudBackupPost(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required",
|
||||
)
|
||||
|
||||
|
||||
@ -238,14 +244,17 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}"}}'
|
||||
)
|
||||
|
||||
def test_post_subcloud_succeeds(self):
|
||||
"""Test post subcloud succeeds"""
|
||||
|
||||
good_health_states = [
|
||||
FAKE_GOOD_SYSTEM_HEALTH, FAKE_GOOD_SYSTEM_HEALTH_NO_ALARMS
|
||||
FAKE_GOOD_SYSTEM_HEALTH,
|
||||
FAKE_GOOD_SYSTEM_HEALTH_NO_ALARMS,
|
||||
]
|
||||
|
||||
for system_health in good_health_states:
|
||||
@ -260,8 +269,9 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
"""Test post subcloud fails with bad system health"""
|
||||
|
||||
bad_health_states = [
|
||||
FAKE_SYSTEM_HEALTH_MGMT_ALARM, FAKE_SYSTEM_HEALTH_CEPH_FAIL,
|
||||
FAKE_SYSTEM_HEALTH_K8S_FAIL
|
||||
FAKE_SYSTEM_HEALTH_MGMT_ALARM,
|
||||
FAKE_SYSTEM_HEALTH_CEPH_FAIL,
|
||||
FAKE_SYSTEM_HEALTH_K8S_FAIL,
|
||||
]
|
||||
|
||||
for index, system_health in enumerate(bad_health_states, start=1):
|
||||
@ -271,15 +281,19 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} "
|
||||
"must be in good health for subcloud-backup create.", index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be in good health for "
|
||||
"subcloud-backup create.",
|
||||
index,
|
||||
)
|
||||
|
||||
def test_post_subcloud_fails_with_unknown_subcloud(self):
|
||||
"""Test post subcloud fails with unknown subcloud"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
f'"subcloud": "123"}}'
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}","subcloud": "123"}}'
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -290,16 +304,15 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_fails_with_subcloud_offline(self):
|
||||
"""Test post subcloud fails with subcloud offline"""
|
||||
|
||||
self._update_subcloud(
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE
|
||||
)
|
||||
self._update_subcloud(availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} must "
|
||||
"be deployed, online, managed, and no ongoing prestage for the "
|
||||
"subcloud-backup create operation."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be deployed, online, managed, "
|
||||
"and no ongoing prestage for the subcloud-backup create operation.",
|
||||
)
|
||||
|
||||
def test_post_subcloud_fails_with_unmanaged_subcloud(self):
|
||||
@ -310,9 +323,10 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} must "
|
||||
"be deployed, online, managed, and no ongoing prestage for the "
|
||||
"subcloud-backup create operation."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be deployed, online, managed, "
|
||||
"and no ongoing prestage for the subcloud-backup create operation.",
|
||||
)
|
||||
|
||||
def test_post_subcloud_fails_with_subcloud_in_invalid_deploy_state(self):
|
||||
@ -323,22 +337,25 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} must "
|
||||
"be deployed, online, managed, and no ongoing prestage for the "
|
||||
"subcloud-backup create operation."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be deployed, online, managed, and "
|
||||
"no ongoing prestage for the subcloud-backup create operation.",
|
||||
)
|
||||
|
||||
def test_post_subcloud_succeeds_with_backup_values(self):
|
||||
"""Test post subcloud succeeds with backup values"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
f'"subcloud": "{self.subcloud.id}",' \
|
||||
f'"backup_values": "TestFileDirectory"}}'
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}","backup_values": "TestFileDirectory"}}'
|
||||
)
|
||||
|
||||
self._update_subcloud()
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -358,13 +375,16 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_succeeds_with_local_only(self):
|
||||
"""Test post subcloud succeeds with local only"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "local_only": "True"}}'
|
||||
)
|
||||
|
||||
self._update_subcloud()
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -373,32 +393,39 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_fails_with_invalid_local_only(self):
|
||||
"""Test post subcloud fails with invalid local only"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "local_only": "fake"}}'
|
||||
)
|
||||
|
||||
self._update_subcloud()
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid local_only value, should be boolean"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid local_only value, should be boolean",
|
||||
)
|
||||
|
||||
def test_post_subcloud_succeeds_with_local_only_and_registry_images(self):
|
||||
"""Test post subcloud succeeds with local only and registry images"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
f'"subcloud": "{self.subcloud.id}", "local_only": "True", ' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "local_only": "True", '
|
||||
f'"registry_images": "True"}}'
|
||||
)
|
||||
|
||||
self._update_subcloud()
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -407,21 +434,26 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_fails_with_registry_images(self):
|
||||
"""Test post subcloud fails with registry images"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "registry_images": "True"}}'
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Option registry_images can not be "
|
||||
"used without local_only option."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Option registry_images can not be used without local_only option.",
|
||||
)
|
||||
|
||||
def test_post_subcloud_fails_with_unknown_parameter(self):
|
||||
"""Test post subcloud fails with unknown parameter"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"subcloud": "{self.subcloud.id}", "unknown_parameter": "fake"}}'
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -443,13 +475,17 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_succeeds_with_final_backup_states(self):
|
||||
"""Test post subcloud succeeds with final backup states"""
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
final_backup_states = [
|
||||
consts.BACKUP_STATE_VALIDATE_FAILED, consts.BACKUP_STATE_PREP_FAILED,
|
||||
consts.BACKUP_STATE_FAILED, consts.BACKUP_STATE_UNKNOWN,
|
||||
consts.BACKUP_STATE_COMPLETE_CENTRAL, consts.BACKUP_STATE_COMPLETE_LOCAL
|
||||
consts.BACKUP_STATE_VALIDATE_FAILED,
|
||||
consts.BACKUP_STATE_PREP_FAILED,
|
||||
consts.BACKUP_STATE_FAILED,
|
||||
consts.BACKUP_STATE_UNKNOWN,
|
||||
consts.BACKUP_STATE_COMPLETE_CENTRAL,
|
||||
consts.BACKUP_STATE_COMPLETE_LOCAL,
|
||||
]
|
||||
|
||||
for backup_state in final_backup_states:
|
||||
@ -462,8 +498,9 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
def test_post_subcloud_fails_with_ongoing_backup_states(self):
|
||||
"""Test post subcloud fails with ongoing backup states"""
|
||||
|
||||
self.mock_sysinv_client().get_system_health.return_value = \
|
||||
self.mock_sysinv_client().get_system_health.return_value = (
|
||||
FAKE_GOOD_SYSTEM_HEALTH
|
||||
)
|
||||
|
||||
for index, state in enumerate(consts.STATES_FOR_ONGOING_BACKUP, start=1):
|
||||
self._update_subcloud(backup_status=state)
|
||||
@ -471,8 +508,10 @@ class TestSubcloudBackupPostSubcloud(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Subcloud(s) already have a "
|
||||
"backup operation in progress.", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Subcloud(s) already have a backup operation in progress.",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
|
||||
@ -482,8 +521,10 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}",'
|
||||
f'"group": "{self.subcloud.id}"}}'
|
||||
)
|
||||
|
||||
def test_post_group_succeeds(self):
|
||||
"""Test post group succeeds"""
|
||||
@ -497,8 +538,9 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
def test_post_group_fails_with_unknown_group(self):
|
||||
"""Test post group fails with unknown group"""
|
||||
|
||||
self.params = f'{{"sysadmin_password": "{self._create_password()}",' \
|
||||
f'"group": "123"}}'
|
||||
self.params = (
|
||||
f'{{"sysadmin_password": "{self._create_password()}","group": "123"}}'
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -509,15 +551,15 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
def test_post_group_fails_with_subcloud_offline(self):
|
||||
"""Test post group fails with subcloud offline"""
|
||||
|
||||
self._update_subcloud(
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE
|
||||
)
|
||||
self._update_subcloud(availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "None of the subclouds in group "
|
||||
f"{self.subcloud.id} are in a valid state for subcloud-backup create"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"None of the subclouds in group {self.subcloud.id} are in "
|
||||
"a valid state for subcloud-backup create",
|
||||
)
|
||||
|
||||
def test_post_group_fails_with_unmanaged_subcloud(self):
|
||||
@ -528,8 +570,10 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "None of the subclouds in group "
|
||||
f"{self.subcloud.id} are in a valid state for subcloud-backup create"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"None of the subclouds in group {self.subcloud.id} are in "
|
||||
"a valid state for subcloud-backup create",
|
||||
)
|
||||
|
||||
def test_post_group_fails_with_subcloud_in_invalid_deploy_state(self):
|
||||
@ -540,8 +584,10 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "None of the subclouds in group "
|
||||
f"{self.subcloud.id} are in a valid state for subcloud-backup create"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"None of the subclouds in group {self.subcloud.id} are in "
|
||||
"a valid state for subcloud-backup create",
|
||||
)
|
||||
|
||||
def test_post_group_fails_with_rpc_client_remote_error(self):
|
||||
@ -549,8 +595,9 @@ class TestSubcloudBackupPostGroup(BaseTestSubcloudBackupPost):
|
||||
|
||||
self._update_subcloud()
|
||||
|
||||
self.mock_rpc_client().backup_subclouds.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().backup_subclouds.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -623,7 +670,8 @@ class BaseTestSubcloudBackupPatchDelete(BaseTestSubcloudBackupPatch):
|
||||
self.url = f"{self.url}/delete/22.12"
|
||||
|
||||
self.mock_rpc_client().delete_subcloud_backups.return_value = (
|
||||
"delete_subcloud_backups", {"release_version": "22.12"}
|
||||
"delete_subcloud_backups",
|
||||
{"release_version": "22.12"},
|
||||
)
|
||||
|
||||
|
||||
@ -639,14 +687,15 @@ class TestSubcloudBackupPatchDelete(BaseTestSubcloudBackupPatchDelete):
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"subcloud": str(self.subcloud.id),
|
||||
"group": str(self.subcloud.id)
|
||||
"group": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "'subcloud' and 'group' parameters "
|
||||
"should not be given at the same time"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' and 'group' parameters should not be given at the same time",
|
||||
)
|
||||
|
||||
def test_patch_delete_fails_without_subcloud_and_group(self):
|
||||
@ -657,8 +706,9 @@ class TestSubcloudBackupPatchDelete(BaseTestSubcloudBackupPatchDelete):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required",
|
||||
)
|
||||
|
||||
def test_patch_delete_fails_without_release_version(self):
|
||||
@ -669,8 +719,7 @@ class TestSubcloudBackupPatchDelete(BaseTestSubcloudBackupPatchDelete):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Release version required"
|
||||
response, http.client.BAD_REQUEST, "Release version required"
|
||||
)
|
||||
|
||||
|
||||
@ -682,7 +731,7 @@ class TestSubcloudBackupPatchDeleteSubcloud(BaseTestSubcloudBackupPatchDelete):
|
||||
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"subcloud": str(self.subcloud.id)
|
||||
"subcloud": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
def test_patch_delete_subcloud_succeeds(self):
|
||||
@ -729,8 +778,9 @@ class TestSubcloudBackupPatchDeleteSubcloud(BaseTestSubcloudBackupPatchDelete):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid local_only value, should be boolean"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid local_only value, should be boolean",
|
||||
)
|
||||
|
||||
def test_patch_delete_subcloud_fails_with_unknown_subcloud(self):
|
||||
@ -747,8 +797,9 @@ class TestSubcloudBackupPatchDeleteSubcloud(BaseTestSubcloudBackupPatchDelete):
|
||||
def test_patch_delete_subcloud_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch delete subcloud fails with rpc client remote error"""
|
||||
|
||||
self.mock_rpc_client().delete_subcloud_backups.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().delete_subcloud_backups.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -764,8 +815,9 @@ class TestSubcloudBackupPatchDeleteSubcloud(BaseTestSubcloudBackupPatchDelete):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud backups"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud backups",
|
||||
)
|
||||
|
||||
|
||||
@ -777,7 +829,7 @@ class TestSubcloudBackupPatchGroup(BaseTestSubcloudBackupPatchDelete):
|
||||
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"group": str(self.subcloud.id)
|
||||
"group": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
def test_patch_delete_group_succeeds(self):
|
||||
@ -790,9 +842,7 @@ class TestSubcloudBackupPatchGroup(BaseTestSubcloudBackupPatchDelete):
|
||||
def test_patch_delete_group_fails_with_unknown_group(self):
|
||||
"""Test patch delete group fails with unknown group"""
|
||||
|
||||
self._update_subcloud(
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE
|
||||
)
|
||||
self._update_subcloud(availability_status=dccommon_consts.AVAILABILITY_OFFLINE)
|
||||
|
||||
self.params["group"] = "999"
|
||||
|
||||
@ -816,8 +866,10 @@ class TestSubcloudBackupPatchGroup(BaseTestSubcloudBackupPatchDelete):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "None of the subclouds in group "
|
||||
f"{self.subcloud.id} are in a valid state for subcloud-backup delete"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"None of the subclouds in group {self.subcloud.id} are in "
|
||||
"a valid state for subcloud-backup delete",
|
||||
)
|
||||
|
||||
|
||||
@ -851,14 +903,15 @@ class TestSubcloudBackupPatchRestore(BaseTestSubcloudBackupPatchRestore):
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"subcloud": str(self.subcloud.id),
|
||||
"group": str(self.subcloud.id)
|
||||
"group": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "'subcloud' and 'group' parameters "
|
||||
"should not be given at the same time"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' and 'group' parameters should not be given at the same time",
|
||||
)
|
||||
|
||||
def test_patch_restore_fails_without_subcloud_and_group(self):
|
||||
@ -869,8 +922,9 @@ class TestSubcloudBackupPatchRestore(BaseTestSubcloudBackupPatchRestore):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"'subcloud' or 'group' parameter is required",
|
||||
)
|
||||
|
||||
|
||||
@ -882,7 +936,7 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"subcloud": str(self.subcloud.id)
|
||||
"subcloud": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
self._mock_os_listdir()
|
||||
@ -915,8 +969,10 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} must "
|
||||
"have a valid bootstrap address: 10.10.20.12.22"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must "
|
||||
"have a valid bootstrap address: 10.10.20.12.22",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_invalid_restore_values(self):
|
||||
@ -927,8 +983,9 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "The bootstrap_address provided in "
|
||||
"restore_values is in invalid format."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The bootstrap_address provided in restore_values is in invalid format.",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_unknown_subcloud(self):
|
||||
@ -950,8 +1007,9 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Option registry_images cannot be "
|
||||
"used without local_only option."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Option registry_images cannot be used without local_only option.",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_managed_subcloud(self):
|
||||
@ -962,24 +1020,28 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} "
|
||||
"must be unmanaged and in a valid deploy state for the "
|
||||
"subcloud-backup restore operation."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be unmanaged and in a valid "
|
||||
"deploy state for the subcloud-backup restore operation.",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_subcloud_in_invalid_state(self):
|
||||
"""Test patch restore subcloud fails with subcloud in invalid state"""
|
||||
|
||||
for index, status in \
|
||||
enumerate(consts.INVALID_DEPLOY_STATES_FOR_RESTORE, start=1):
|
||||
for index, status in enumerate(
|
||||
consts.INVALID_DEPLOY_STATES_FOR_RESTORE, start=1
|
||||
):
|
||||
self._update_subcloud(deploy_status=status)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, f"Subcloud {self.subcloud.name} "
|
||||
"must be unmanaged and in a valid deploy state for the "
|
||||
"subcloud-backup restore operation.", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"Subcloud {self.subcloud.name} must be unmanaged and in a valid "
|
||||
"deploy state for the subcloud-backup restore operation.",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_succeeds_with_install_without_release(self):
|
||||
@ -987,11 +1049,10 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
|
||||
self.params["with_install"] = "True"
|
||||
|
||||
data_install = \
|
||||
str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('/', '"')
|
||||
data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace("/", '"')
|
||||
self._update_subcloud(
|
||||
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||
data_install=data_install
|
||||
data_install=data_install,
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
@ -1004,17 +1065,15 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
self.params["with_install"] = "True"
|
||||
self.params["release"] = "22.12"
|
||||
|
||||
data_install = \
|
||||
str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('/', '"')
|
||||
data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace("/", '"')
|
||||
self._update_subcloud(
|
||||
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||
data_install=data_install
|
||||
data_install=data_install,
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
'builtins.open', mock.mock_open(
|
||||
read_data=fake_subcloud.FAKE_UPGRADES_METADATA
|
||||
)
|
||||
"builtins.open",
|
||||
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA),
|
||||
):
|
||||
response = self._send_request()
|
||||
|
||||
@ -1025,23 +1084,22 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
|
||||
self.params["release"] = "22.12"
|
||||
|
||||
data_install = \
|
||||
str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace('/', '"')
|
||||
data_install = str(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES).replace("/", '"')
|
||||
self._update_subcloud(
|
||||
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||
data_install=data_install
|
||||
data_install=data_install,
|
||||
)
|
||||
|
||||
with mock.patch(
|
||||
'builtins.open', mock.mock_open(
|
||||
read_data=fake_subcloud.FAKE_UPGRADES_METADATA
|
||||
)
|
||||
"builtins.open",
|
||||
mock.mock_open(read_data=fake_subcloud.FAKE_UPGRADES_METADATA),
|
||||
):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Option release cannot be used "
|
||||
"without with_install option."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Option release cannot be used without with_install option.",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_install_without_install_values(self):
|
||||
@ -1056,9 +1114,10 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "The restore operation was requested "
|
||||
"with_install, but the following subcloud(s) does not contain install "
|
||||
f"values: {self.subcloud.name}"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The restore operation was requested with_install, but the following "
|
||||
f"subcloud(s) does not contain install values: {self.subcloud.name}",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_install_without_matching_iso(self):
|
||||
@ -1073,8 +1132,9 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "No matching: .iso found in vault: "
|
||||
"/opt/dc-vault/loads/TEST.SW.VERSION/"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"No matching: .iso found in vault: /opt/dc-vault/loads/TEST.SW.VERSION/",
|
||||
)
|
||||
|
||||
def test_patch_restore_subcloud_fails_with_install_without_matching_sig(self):
|
||||
@ -1089,11 +1149,12 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "No matching: .sig found in vault: "
|
||||
"/opt/dc-vault/loads/TEST.SW.VERSION/"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"No matching: .sig found in vault: /opt/dc-vault/loads/TEST.SW.VERSION/",
|
||||
)
|
||||
|
||||
@mock.patch('dcmanager.common.utils.get_matching_iso')
|
||||
@mock.patch("dcmanager.common.utils.get_matching_iso")
|
||||
def test_patch_restore_subcloud_fails_with_invalid_release(self, matching_iso):
|
||||
"""Test patch restore subcloud fails with invalid release"""
|
||||
|
||||
@ -1105,8 +1166,9 @@ class TestSubcloudBackupPatchRestoreSubcloud(BaseTestSubcloudBackupPatchRestore)
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Error: unable to validate the release version."
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Error: unable to validate the release version.",
|
||||
)
|
||||
|
||||
|
||||
@ -1118,7 +1180,7 @@ class TestSubcloudBackupPatchRestoreGroup(BaseTestSubcloudBackupPatchRestore):
|
||||
|
||||
self.params = {
|
||||
"sysadmin_password": self._create_password(),
|
||||
"group": str(self.subcloud.id)
|
||||
"group": str(self.subcloud.id),
|
||||
}
|
||||
|
||||
def test_patch_restore_group_succeeds(self):
|
||||
@ -1136,8 +1198,10 @@ class TestSubcloudBackupPatchRestoreGroup(BaseTestSubcloudBackupPatchRestore):
|
||||
"""
|
||||
|
||||
fake_subcloud.create_fake_subcloud(
|
||||
self.ctx, group_id=self.subcloud.id, name=base.SUBCLOUD_2["name"],
|
||||
region_name=base.SUBCLOUD_2["region_name"]
|
||||
self.ctx,
|
||||
group_id=self.subcloud.id,
|
||||
name=base.SUBCLOUD_2["name"],
|
||||
region_name=base.SUBCLOUD_2["region_name"],
|
||||
)
|
||||
|
||||
self._update_subcloud()
|
||||
@ -1171,8 +1235,9 @@ class TestSubcloudBackupPatchRestoreGroup(BaseTestSubcloudBackupPatchRestore):
|
||||
def test_patch_restore_fails_with_rpc_client_remote_error(self):
|
||||
"""Test patch restore fails when rpc client raises a remote error"""
|
||||
|
||||
self.mock_rpc_client().restore_subcloud_backups.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_client().restore_subcloud_backups.side_effect = RemoteError(
|
||||
"msg", "value"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
|
@ -29,9 +29,9 @@ from dcmanager.tests.unit.api.test_root_controller import DCManagerApiTest
|
||||
from dcmanager.tests.unit.common import fake_subcloud
|
||||
|
||||
FAKE_SOFTWARE_VERSION = "22.12"
|
||||
FAKE_DEPLOY_PLAYBOOK_FILE = 'deployment-manager.yaml'
|
||||
FAKE_DEPLOY_OVERRIDES_FILE = 'deployment-manager-overrides-subcloud.yaml'
|
||||
FAKE_DEPLOY_CHART_FILE = 'deployment-manager.tgz'
|
||||
FAKE_DEPLOY_PLAYBOOK_FILE = "deployment-manager.yaml"
|
||||
FAKE_DEPLOY_OVERRIDES_FILE = "deployment-manager-overrides-subcloud.yaml"
|
||||
FAKE_DEPLOY_CHART_FILE = "deployment-manager.tgz"
|
||||
FAKE_DEPLOY_FILES = {
|
||||
f"{consts.DEPLOY_PLAYBOOK}_": FAKE_DEPLOY_PLAYBOOK_FILE,
|
||||
f"{consts.DEPLOY_OVERRIDES}_": FAKE_DEPLOY_OVERRIDES_FILE,
|
||||
@ -59,29 +59,28 @@ class BaseTestSubcloudDeployController(DCManagerApiTest):
|
||||
def _mock_os_open(self):
|
||||
"""Mock os' open"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os, 'open')
|
||||
mock_patch_object = mock.patch.object(os, "open")
|
||||
self.mock_os_open = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_os_write(self):
|
||||
"""Mock os' write"""
|
||||
|
||||
mock_patch_object = mock.patch.object(os, 'write')
|
||||
mock_patch_object = mock.patch.object(os, "write")
|
||||
self.mock_os_write = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _mock_get_filename_by_prefix(self):
|
||||
"""Mock dutils' get_filename_by_prefix"""
|
||||
|
||||
mock_patch_object = mock.patch.object(
|
||||
dutils, "get_filename_by_prefix"
|
||||
)
|
||||
mock_patch_object = mock.patch.object(dutils, "get_filename_by_prefix")
|
||||
self.mock_get_filename_by_prefix = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
def _setup_get_filename_by_prefix(self):
|
||||
self.mock_get_filename_by_prefix.side_effect = \
|
||||
self.mock_get_filename_by_prefix.side_effect = (
|
||||
self._mock_get_filename_by_prefix_side_effect
|
||||
)
|
||||
|
||||
def _mock_get_filename_by_prefix_side_effect(self, _, prefix):
|
||||
filename = FAKE_DEPLOY_FILES.get(prefix)
|
||||
@ -119,7 +118,7 @@ class TestSubcloudDeployController(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.text, 'null')
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class TestSubcloudDeployPost(BaseTestSubcloudDeployController):
|
||||
@ -174,15 +173,18 @@ class TestSubcloudDeployPost(BaseTestSubcloudDeployController):
|
||||
"""Test post fails with missing deploy chart"""
|
||||
|
||||
file_options = [
|
||||
consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_PRESTAGE
|
||||
consts.DEPLOY_PLAYBOOK,
|
||||
consts.DEPLOY_OVERRIDES,
|
||||
consts.DEPLOY_PRESTAGE,
|
||||
]
|
||||
self.upload_files = self._create_fake_fields(file_options, False)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_CHART} is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_CHART} is required",
|
||||
)
|
||||
|
||||
def test_post_fails_with_missing_deploy_chart_and_deploy_prestage(self):
|
||||
@ -194,45 +196,54 @@ class TestSubcloudDeployPost(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_CHART} is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_CHART} is required",
|
||||
)
|
||||
|
||||
def test_post_fails_with_missing_deploy_playbook(self):
|
||||
"""Test post fails with missing deploy playbook"""
|
||||
|
||||
file_options = [
|
||||
consts.DEPLOY_CHART, consts.DEPLOY_OVERRIDES, consts.DEPLOY_PRESTAGE
|
||||
consts.DEPLOY_CHART,
|
||||
consts.DEPLOY_OVERRIDES,
|
||||
consts.DEPLOY_PRESTAGE,
|
||||
]
|
||||
self.upload_files = self._create_fake_fields(file_options, False)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_PLAYBOOK} is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_PLAYBOOK} is required",
|
||||
)
|
||||
|
||||
def test_post_fails_with_missing_deploy_overrides(self):
|
||||
"""Test post fails with missing deploy overrides"""
|
||||
|
||||
file_options = [
|
||||
consts.DEPLOY_PLAYBOOK, consts.DEPLOY_CHART, consts.DEPLOY_PRESTAGE
|
||||
consts.DEPLOY_PLAYBOOK,
|
||||
consts.DEPLOY_CHART,
|
||||
consts.DEPLOY_PRESTAGE,
|
||||
]
|
||||
self.upload_files = self._create_fake_fields(file_options, False)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_OVERRIDES} is required"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"error: argument --{consts.DEPLOY_OVERRIDES} is required",
|
||||
)
|
||||
|
||||
def test_post_succeeds_with_missing_deploy_prestage(self):
|
||||
"""Test post succeeds with missing deploy prestage"""
|
||||
|
||||
file_options = [
|
||||
consts.DEPLOY_PLAYBOOK, consts.DEPLOY_OVERRIDES, consts.DEPLOY_CHART
|
||||
consts.DEPLOY_PLAYBOOK,
|
||||
consts.DEPLOY_OVERRIDES,
|
||||
consts.DEPLOY_CHART,
|
||||
]
|
||||
self.upload_files = self._create_fake_fields(file_options, False)
|
||||
|
||||
@ -271,8 +282,9 @@ class TestSubcloudDeployPost(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
f"No {consts.DEPLOY_PLAYBOOK} file uploaded"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
f"No {consts.DEPLOY_PLAYBOOK} file uploaded",
|
||||
)
|
||||
|
||||
def test_post_fails_with_internal_server_error(self):
|
||||
@ -283,8 +295,9 @@ class TestSubcloudDeployPost(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
f"Failed to upload {consts.DEPLOY_PLAYBOOK} file: fake file name"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
f"Failed to upload {consts.DEPLOY_PLAYBOOK} file: fake file name",
|
||||
)
|
||||
|
||||
|
||||
@ -311,24 +324,21 @@ class TestSubcloudDeployGet(BaseTestSubcloudDeployController):
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(
|
||||
FAKE_SOFTWARE_VERSION,
|
||||
response.json["subcloud_deploy"]["software_version"]
|
||||
FAKE_SOFTWARE_VERSION, response.json["subcloud_deploy"]["software_version"]
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_PLAYBOOK_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK],
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_OVERRIDES_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES],
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_CHART_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_CHART]
|
||||
)
|
||||
self.assertEqual(
|
||||
None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_CHART],
|
||||
)
|
||||
self.assertEqual(None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE])
|
||||
|
||||
def test_get_succeeds_without_release(self):
|
||||
"""Test get succeeds without release"""
|
||||
@ -339,24 +349,21 @@ class TestSubcloudDeployGet(BaseTestSubcloudDeployController):
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(
|
||||
FAKE_SOFTWARE_VERSION,
|
||||
response.json["subcloud_deploy"]["software_version"]
|
||||
FAKE_SOFTWARE_VERSION, response.json["subcloud_deploy"]["software_version"]
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_PLAYBOOK_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_PLAYBOOK],
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_OVERRIDES_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_OVERRIDES],
|
||||
)
|
||||
self.assertEqual(
|
||||
FAKE_DEPLOY_CHART_FILE,
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_CHART]
|
||||
)
|
||||
self.assertEqual(
|
||||
None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE]
|
||||
response.json["subcloud_deploy"][consts.DEPLOY_CHART],
|
||||
)
|
||||
self.assertEqual(None, response.json["subcloud_deploy"][consts.DEPLOY_PRESTAGE])
|
||||
|
||||
|
||||
class TestSubcloudDeployDelete(BaseTestSubcloudDeployController):
|
||||
@ -388,8 +395,9 @@ class TestSubcloudDeployDelete(BaseTestSubcloudDeployController):
|
||||
def test_delete_succeeds_with_release(self):
|
||||
"""Test delete succeeds with release"""
|
||||
|
||||
self.url = f"{self.url}/{self.version}?prestage_images=False"\
|
||||
"&deployment_files=False"
|
||||
self.url = (
|
||||
f"{self.url}/{self.version}?prestage_images=False&deployment_files=False"
|
||||
)
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
@ -437,8 +445,9 @@ class TestSubcloudDeployDelete(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.NOT_FOUND,
|
||||
f"Directory not found: {self.sw_version_directory}{version}"
|
||||
response,
|
||||
http.client.NOT_FOUND,
|
||||
f"Directory not found: {self.sw_version_directory}{version}",
|
||||
)
|
||||
|
||||
def test_delete_fails_with_internal_server_error(self):
|
||||
@ -449,6 +458,7 @@ class TestSubcloudDeployDelete(BaseTestSubcloudDeployController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Failed to delete file: fake file name"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Failed to delete file: fake file name",
|
||||
)
|
||||
|
@ -31,18 +31,23 @@ from dcmanager.tests.unit.api.v1.controllers.mixins import PostJSONMixin
|
||||
from dcmanager.tests.unit.api.v1.controllers.mixins import UpdateMixin
|
||||
from dcmanager.tests.unit.common import fake_subcloud
|
||||
|
||||
SAMPLE_SUBCLOUD_GROUP_NAME = 'GroupX'
|
||||
SAMPLE_SUBCLOUD_GROUP_DESCRIPTION = 'A Group of mystery'
|
||||
SAMPLE_SUBCLOUD_GROUP_NAME = "GroupX"
|
||||
SAMPLE_SUBCLOUD_GROUP_DESCRIPTION = "A Group of mystery"
|
||||
SAMPLE_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE = consts.SUBCLOUD_APPLY_TYPE_SERIAL
|
||||
SAMPLE_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS = 3
|
||||
|
||||
|
||||
class SubcloudGroupAPIMixin(APIMixin):
|
||||
API_PREFIX = '/v1.0/subcloud-groups'
|
||||
RESULT_KEY = 'subcloud_groups'
|
||||
API_PREFIX = "/v1.0/subcloud-groups"
|
||||
RESULT_KEY = "subcloud_groups"
|
||||
EXPECTED_FIELDS = [
|
||||
'id', 'name', 'description', 'max_parallel_subclouds', 'update_apply_type',
|
||||
'created-at', 'updated-at'
|
||||
"id",
|
||||
"name",
|
||||
"description",
|
||||
"max_parallel_subclouds",
|
||||
"update_apply_type",
|
||||
"created-at",
|
||||
"updated-at",
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
@ -51,15 +56,14 @@ class SubcloudGroupAPIMixin(APIMixin):
|
||||
def _get_test_subcloud_group_dict(self, **kw):
|
||||
# id should not be part of the structure
|
||||
return {
|
||||
'name': kw.get('name', SAMPLE_SUBCLOUD_GROUP_NAME),
|
||||
'description': kw.get('description', SAMPLE_SUBCLOUD_GROUP_DESCRIPTION),
|
||||
'update_apply_type': kw.get(
|
||||
'update_apply_type', SAMPLE_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE
|
||||
"name": kw.get("name", SAMPLE_SUBCLOUD_GROUP_NAME),
|
||||
"description": kw.get("description", SAMPLE_SUBCLOUD_GROUP_DESCRIPTION),
|
||||
"update_apply_type": kw.get(
|
||||
"update_apply_type", SAMPLE_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE
|
||||
),
|
||||
"max_parallel_subclouds": kw.get(
|
||||
"max_parallel_subclouds", SAMPLE_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS
|
||||
),
|
||||
'max_parallel_subclouds': kw.get(
|
||||
'max_parallel_subclouds',
|
||||
SAMPLE_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS
|
||||
)
|
||||
}
|
||||
|
||||
# The following methods are required for subclasses of APIMixin
|
||||
@ -174,14 +178,14 @@ class TestSubcloudGroupPost(BaseTestSubcloudGroupController, PostJSONMixin):
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid group name"
|
||||
|
||||
)
|
||||
|
||||
def test_post_fails_with_invalid_description(self):
|
||||
"""Test post fails with invalid description"""
|
||||
|
||||
invalid_values = [
|
||||
"", "a" * (subcloud_group.MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN + 1)
|
||||
"",
|
||||
"a" * (subcloud_group.MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN + 1),
|
||||
]
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
@ -190,8 +194,10 @@ class TestSubcloudGroupPost(BaseTestSubcloudGroupController, PostJSONMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid group description",
|
||||
call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid group description",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_post_fails_with_invalid_update_apply_type(self):
|
||||
@ -233,8 +239,10 @@ class TestSubcloudGroupPost(BaseTestSubcloudGroupController, PostJSONMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid group max_parallel_subclouds", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid group max_parallel_subclouds",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_post_fails_with_db_api_duplicate_entry(self):
|
||||
@ -245,8 +253,9 @@ class TestSubcloudGroupPost(BaseTestSubcloudGroupController, PostJSONMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"A subcloud group with this name already exists"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"A subcloud group with this name already exists",
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "subcloud_group_create")
|
||||
@ -270,8 +279,9 @@ class TestSubcloudGroupPost(BaseTestSubcloudGroupController, PostJSONMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to create subcloud group"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to create subcloud group",
|
||||
)
|
||||
|
||||
|
||||
@ -331,8 +341,8 @@ class TestSubcloudGroupGetSubclouds(BaseTestSubcloudGroupGet):
|
||||
|
||||
self._assert_response(response)
|
||||
# This API returns 'subclouds' rather than 'subcloud-groups'
|
||||
self.assertIn('subclouds', response.json)
|
||||
self.assertEqual(0, len(response.json.get('subclouds')))
|
||||
self.assertIn("subclouds", response.json)
|
||||
self.assertEqual(0, len(response.json.get("subclouds")))
|
||||
|
||||
def test_get_subclouds_succeeds_with_subcloud_in_group(self):
|
||||
"""Test get subclouds succeeds with subcloud in group
|
||||
@ -345,8 +355,8 @@ class TestSubcloudGroupGetSubclouds(BaseTestSubcloudGroupGet):
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self.assertIn('subclouds', response.json)
|
||||
self.assertEqual(1, len(response.json.get('subclouds')))
|
||||
self.assertIn("subclouds", response.json)
|
||||
self.assertEqual(1, len(response.json.get("subclouds")))
|
||||
|
||||
|
||||
class TestSubcloudGroupPatch(BaseTestSubcloudGroupController, UpdateMixin):
|
||||
@ -440,22 +450,25 @@ class TestSubcloudGroupPatch(BaseTestSubcloudGroupController, UpdateMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid group max_parallel_subclouds", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid group max_parallel_subclouds",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_patch_fails_with_invalid_description(self):
|
||||
"""Test patch fails with invalid description"""
|
||||
|
||||
self.params = {
|
||||
"description":
|
||||
"a" * (subcloud_group.MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN + 1)
|
||||
"description": "a" * (subcloud_group.MAX_SUBCLOUD_GROUP_DESCRIPTION_LEN + 1)
|
||||
}
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid group description",
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid group description",
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "subcloud_group_update")
|
||||
@ -483,8 +496,9 @@ class TestSubcloudGroupPatch(BaseTestSubcloudGroupController, UpdateMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to update subcloud group"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to update subcloud group",
|
||||
)
|
||||
|
||||
|
||||
@ -508,25 +522,26 @@ class TestSubcloudGroupDelete(BaseTestSubcloudGroupController, DeleteMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Default Subcloud Group may not be deleted"
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Default Subcloud Group may not be deleted",
|
||||
)
|
||||
|
||||
def test_delete_fails_with_subcloud_in_group(self):
|
||||
"""Test delete fails with subcloud in group"""
|
||||
|
||||
subcloud_group = self._create_db_object(self.ctx)
|
||||
fake_subcloud.create_fake_subcloud(
|
||||
self.ctx, group_id=subcloud_group.id
|
||||
)
|
||||
fake_subcloud.create_fake_subcloud(self.ctx, group_id=subcloud_group.id)
|
||||
|
||||
self.url = f"{self.API_PREFIX}/{subcloud_group.id}"
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud group", call_count=2
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud group",
|
||||
call_count=2,
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "subcloud_group_destroy")
|
||||
@ -558,6 +573,7 @@ class TestSubcloudGroupDelete(BaseTestSubcloudGroupController, DeleteMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud group"
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete subcloud group",
|
||||
)
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -90,12 +90,15 @@ class BaseTestSwUpdateOptionsController(DCManagerApiTest):
|
||||
return {
|
||||
"storage_apply_type": SW_UPDATE_OPTS_CONST_DEFAULT["storage-apply-type"],
|
||||
"worker_apply_type": SW_UPDATE_OPTS_CONST_DEFAULT["worker-apply-type"],
|
||||
"max_parallel_workers":
|
||||
SW_UPDATE_OPTS_CONST_DEFAULT["max-parallel-workers"],
|
||||
"alarm_restriction_type":
|
||||
SW_UPDATE_OPTS_CONST_DEFAULT["alarm-restriction-type"],
|
||||
"default_instance_action":
|
||||
SW_UPDATE_OPTS_CONST_DEFAULT["default-instance-action"]
|
||||
"max_parallel_workers": SW_UPDATE_OPTS_CONST_DEFAULT[
|
||||
"max-parallel-workers"
|
||||
],
|
||||
"alarm_restriction_type": SW_UPDATE_OPTS_CONST_DEFAULT[
|
||||
"alarm-restriction-type"
|
||||
],
|
||||
"default_instance_action": SW_UPDATE_OPTS_CONST_DEFAULT[
|
||||
"default-instance-action"
|
||||
],
|
||||
}
|
||||
|
||||
def _create_sw_update_opts(self):
|
||||
@ -128,9 +131,7 @@ class TestSwUpdateOptionsController(BaseTestSwUpdateOptionsController):
|
||||
self.assertEqual(response.text, "null")
|
||||
|
||||
|
||||
class TestSwUpdateOptionsGet(
|
||||
BaseTestSwUpdateOptionsController, SwUpdateOptionsMixin
|
||||
):
|
||||
class TestSwUpdateOptionsGet(BaseTestSwUpdateOptionsController, SwUpdateOptionsMixin):
|
||||
"""Test class for get requests"""
|
||||
|
||||
def setUp(self):
|
||||
@ -141,12 +142,16 @@ class TestSwUpdateOptionsGet(
|
||||
self._create_sw_update_opts()
|
||||
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_get', 'mock_sw_update_opts',
|
||||
db_api.sw_update_opts_get
|
||||
db_api,
|
||||
"sw_update_opts_get",
|
||||
"mock_sw_update_opts",
|
||||
db_api.sw_update_opts_get,
|
||||
)
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_default_get', 'mock_sw_update_opts_default',
|
||||
db_api.sw_update_opts_default_get
|
||||
db_api,
|
||||
"sw_update_opts_default_get",
|
||||
"mock_sw_update_opts_default",
|
||||
db_api.sw_update_opts_default_get,
|
||||
)
|
||||
|
||||
def test_get_succeeds_without_subcloud_ref(self):
|
||||
@ -180,8 +185,10 @@ class TestSwUpdateOptionsGet(
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.NOT_FOUND, "No options found for Subcloud with id "
|
||||
f"{self.subcloud.id}, defaults will be used."
|
||||
response,
|
||||
http.client.NOT_FOUND,
|
||||
f"No options found for Subcloud with id {self.subcloud.id}, "
|
||||
"defaults will be used.",
|
||||
)
|
||||
|
||||
|
||||
@ -215,9 +222,7 @@ class TestSwUpdateOptionsPost(BaseTestSwUpdateOptionsPost):
|
||||
)
|
||||
|
||||
|
||||
class TestSwUpdateOptionsPostUpdate(
|
||||
BaseTestSwUpdateOptionsPost, SwUpdateOptionsMixin
|
||||
):
|
||||
class TestSwUpdateOptionsPostUpdate(BaseTestSwUpdateOptionsPost, SwUpdateOptionsMixin):
|
||||
"""Test class for post requests to update sw_update_opts
|
||||
|
||||
When a post request is performed for an existing sw_update_opts, it's updated.
|
||||
@ -231,12 +236,16 @@ class TestSwUpdateOptionsPostUpdate(
|
||||
self._create_sw_update_opts_default()
|
||||
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_update', 'mock_sw_update_opts',
|
||||
db_api.sw_update_opts_update
|
||||
db_api,
|
||||
"sw_update_opts_update",
|
||||
"mock_sw_update_opts",
|
||||
db_api.sw_update_opts_update,
|
||||
)
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_default_update', 'mock_sw_update_opts_default',
|
||||
db_api.sw_update_opts_default_update
|
||||
db_api,
|
||||
"sw_update_opts_default_update",
|
||||
"mock_sw_update_opts_default",
|
||||
db_api.sw_update_opts_default_update,
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "sw_update_opts_default_update")
|
||||
@ -250,14 +259,15 @@ class TestSwUpdateOptionsPostUpdate(
|
||||
mock_db_api.side_effect = FakeException()
|
||||
|
||||
self.assertRaises(
|
||||
FakeException, self.method, self.url, headers=self.headers,
|
||||
params=self.params
|
||||
FakeException,
|
||||
self.method,
|
||||
self.url,
|
||||
headers=self.headers,
|
||||
params=self.params,
|
||||
)
|
||||
|
||||
|
||||
class TestSwUpdateOptionsPostCreate(
|
||||
BaseTestSwUpdateOptionsPost, SwUpdateOptionsMixin
|
||||
):
|
||||
class TestSwUpdateOptionsPostCreate(BaseTestSwUpdateOptionsPost, SwUpdateOptionsMixin):
|
||||
"""Test class for post requests to create sw_update_opts
|
||||
|
||||
When a post request is performed for an existing sw_update_opts, it's updated.
|
||||
@ -268,12 +278,16 @@ class TestSwUpdateOptionsPostCreate(
|
||||
super().setUp()
|
||||
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_create', 'mock_sw_update_opts',
|
||||
db_api.sw_update_opts_create
|
||||
db_api,
|
||||
"sw_update_opts_create",
|
||||
"mock_sw_update_opts",
|
||||
db_api.sw_update_opts_create,
|
||||
)
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_default_create', 'mock_sw_update_opts_default',
|
||||
db_api.sw_update_opts_default_create
|
||||
db_api,
|
||||
"sw_update_opts_default_create",
|
||||
"mock_sw_update_opts_default",
|
||||
db_api.sw_update_opts_default_create,
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "sw_update_opts_default_create")
|
||||
@ -290,8 +304,11 @@ class TestSwUpdateOptionsPostCreate(
|
||||
mock_db_api.side_effect = FakeException()
|
||||
|
||||
self.assertRaises(
|
||||
FakeException, self.method, self.url, headers=self.headers,
|
||||
params=self.params
|
||||
FakeException,
|
||||
self.method,
|
||||
self.url,
|
||||
headers=self.headers,
|
||||
params=self.params,
|
||||
)
|
||||
|
||||
|
||||
@ -309,12 +326,16 @@ class TestSwUpdateOptionsDelete(
|
||||
self._create_sw_update_opts_default()
|
||||
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_destroy', 'mock_sw_update_opts',
|
||||
db_api.sw_update_opts_destroy
|
||||
db_api,
|
||||
"sw_update_opts_destroy",
|
||||
"mock_sw_update_opts",
|
||||
db_api.sw_update_opts_destroy,
|
||||
)
|
||||
self._mock_object(
|
||||
db_api, 'sw_update_opts_default_destroy', 'mock_sw_update_opts_default',
|
||||
db_api.sw_update_opts_default_destroy
|
||||
db_api,
|
||||
"sw_update_opts_default_destroy",
|
||||
"mock_sw_update_opts_default",
|
||||
db_api.sw_update_opts_default_destroy,
|
||||
)
|
||||
|
||||
def test_delete_succeeds_with_generic_exception_for_default_region_name(self):
|
||||
|
@ -42,7 +42,7 @@ class BaseTestSwUpdateStrategyController(DCManagerApiTest):
|
||||
def _mock_rpc_orchestrator_client(self):
|
||||
"""Mock rpc's manager orchestrator client"""
|
||||
|
||||
mock_patch = mock.patch.object(rpc_client, 'ManagerOrchestratorClient')
|
||||
mock_patch = mock.patch.object(rpc_client, "ManagerOrchestratorClient")
|
||||
self.mock_rpc_orchestrator_client = mock_patch.start()
|
||||
self.addCleanup(mock_patch.stop)
|
||||
|
||||
@ -84,8 +84,8 @@ class TestSwUpdateStrategyGet(BaseTestSwUpdateStrategyGet):
|
||||
)
|
||||
|
||||
def _assert_response_payload(self, response):
|
||||
self.assertEqual(response.json['type'], consts.SW_UPDATE_TYPE_PATCH)
|
||||
self.assertEqual(response.json['state'], consts.SW_UPDATE_STATE_INITIAL)
|
||||
self.assertEqual(response.json["type"], consts.SW_UPDATE_TYPE_PATCH)
|
||||
self.assertEqual(response.json["state"], consts.SW_UPDATE_STATE_INITIAL)
|
||||
|
||||
def test_get_succeeds(self):
|
||||
"""Test get succeeds"""
|
||||
@ -140,8 +140,9 @@ class TestSwUpdateStrategyGet(BaseTestSwUpdateStrategyGet):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.NOT_FOUND,
|
||||
f"Strategy of type '{consts.SW_UPDATE_TYPE_PATCH}' not found"
|
||||
response,
|
||||
http.client.NOT_FOUND,
|
||||
f"Strategy of type '{consts.SW_UPDATE_TYPE_PATCH}' not found",
|
||||
)
|
||||
|
||||
|
||||
@ -165,8 +166,7 @@ class TestSwUpdateStrategyGetSteps(BaseTestSwUpdateStrategyGet):
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(
|
||||
response.json['strategy-steps'][0]['state'],
|
||||
consts.STRATEGY_STATE_INITIAL
|
||||
response.json["strategy-steps"][0]["state"], consts.STRATEGY_STATE_INITIAL
|
||||
)
|
||||
|
||||
def test_get_steps_succeeds_with_subcloud_name(self):
|
||||
@ -177,7 +177,7 @@ class TestSwUpdateStrategyGetSteps(BaseTestSwUpdateStrategyGet):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.assertEqual(response.json['cloud'], self.subcloud.name)
|
||||
self.assertEqual(response.json["cloud"], self.subcloud.name)
|
||||
|
||||
def test_get_steps_fails_with_inexistent_subcloud(self):
|
||||
"""Test get steps fails with inexistent subcloud"""
|
||||
@ -222,12 +222,15 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
"subcloud-apply-type": consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
|
||||
"max-parallel-subclouds": "10",
|
||||
"stop-on-failure": "true",
|
||||
"release_id": "stx-10.0.0"
|
||||
"release_id": "stx-10.0.0",
|
||||
}
|
||||
|
||||
self.mock_rpc_orchestrator_client().\
|
||||
create_sw_update_strategy.return_value = (
|
||||
"create_sw_update_strategy", {"payload": self.params}
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.return_value = (
|
||||
"create_sw_update_strategy",
|
||||
{"payload": self.params},
|
||||
)
|
||||
self.create_update_strategy = (
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy
|
||||
)
|
||||
|
||||
def test_post_succeeds(self):
|
||||
@ -236,8 +239,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.create_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_succeeds_with_force_option(self):
|
||||
"""Test post succeeds with force option"""
|
||||
@ -248,8 +250,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.create_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_fails_with_invalid_type(self):
|
||||
"""Test post fails with invalid type"""
|
||||
@ -261,8 +262,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "type invalid"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_invalid_subcloud_apply_type(self):
|
||||
"""Test post fails with invalid subcloud apply type"""
|
||||
@ -274,8 +274,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "subcloud-apply-type invalid"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_invalid_max_parallel_subclouds(self):
|
||||
"""Test post fails with invalid max parallel subclouds"""
|
||||
@ -288,17 +287,20 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"max-parallel-subclouds invalid", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"max-parallel-subclouds invalid",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_invalid_stop_on_failure(self):
|
||||
"""Test post fails with invalid stop on failure"""
|
||||
|
||||
invalid_values = ["fake", ]
|
||||
invalid_values = [
|
||||
"fake",
|
||||
]
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
self.params["stop-on-failure"] = invalid_value
|
||||
@ -306,12 +308,13 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"stop-on-failure invalid", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"stop-on-failure invalid",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_invalid_force(self):
|
||||
"""Test post fails with invalid force"""
|
||||
@ -323,8 +326,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "force invalid"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_force_without_cloud_name(self):
|
||||
"""Test post fails with force without cloud name"""
|
||||
@ -334,11 +336,12 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "The --force option can only be "
|
||||
"applied for a single subcloud. Please specify the subcloud name."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"The --force option can only be "
|
||||
"applied for a single subcloud. Please specify the subcloud name.",
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_succeeds_with_force_all_types(self):
|
||||
"""Test post succeeds with force all types
|
||||
@ -352,8 +355,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.create_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_fails_with_inexistent_subcloud_group_name(self):
|
||||
"""Test post fails with inexistent subcloud group name"""
|
||||
@ -369,11 +371,9 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid group_id",
|
||||
call_count=index
|
||||
response, http.client.BAD_REQUEST, "Invalid group_id", call_count=index
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_cloud_name_and_subcloud_group(self):
|
||||
"""Test post fails with cloud name and subcloud group"""
|
||||
@ -390,11 +390,12 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "cloud_name and subcloud_group "
|
||||
"are mutually exclusive", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"cloud_name and subcloud_group are mutually exclusive",
|
||||
call_count=index,
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_subcloud_group_and_other_values(self):
|
||||
"""Test post fails with subcloud group and other values
|
||||
@ -411,12 +412,13 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "subcloud-apply-type and "
|
||||
"max-parallel-subclouds are not supported when subcloud_group is "
|
||||
"applied", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"subcloud-apply-type and max-parallel-subclouds "
|
||||
"are not supported when subcloud_group is applied",
|
||||
call_count=index,
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_without_params(self):
|
||||
"""Test post fails without params"""
|
||||
@ -428,8 +430,7 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Body required"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_without_type(self):
|
||||
"""Test post fails without type"""
|
||||
@ -441,37 +442,34 @@ class TestSwUpdateStrategyPost(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "type required"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.create_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_fails_with_rpc_remote_error(self):
|
||||
"""Test post fails with rpc remote error"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.create_update_strategy.side_effect = RemoteError("msg", "value")
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, "Unable to create strategy "
|
||||
f"of type '{consts.SW_UPDATE_TYPE_SOFTWARE}': value"
|
||||
response,
|
||||
http.client.UNPROCESSABLE_ENTITY,
|
||||
"Unable to create strategy "
|
||||
f"of type '{consts.SW_UPDATE_TYPE_SOFTWARE}': value",
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.create_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_fails_with_rpc_generic_exception(self):
|
||||
"""Test post fails with rpc generic exception"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.side_effect = \
|
||||
Exception()
|
||||
self.create_update_strategy.side_effect = Exception()
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to create strategy"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().create_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.create_update_strategy.assert_called_once()
|
||||
|
||||
|
||||
class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
@ -483,10 +481,18 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
self.url = f"{self.url}/actions"
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.return_value = (
|
||||
"apply_sw_update_strategy", {"update_type": None}
|
||||
"apply_sw_update_strategy",
|
||||
{"update_type": None},
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.return_value = (
|
||||
"abort_sw_update_strategy", {"update_type": None}
|
||||
"abort_sw_update_strategy",
|
||||
{"update_type": None},
|
||||
)
|
||||
self.apply_update_strate = (
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy
|
||||
)
|
||||
self.abort_update_strategy = (
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy
|
||||
)
|
||||
|
||||
def test_post_actions_succeeds(self):
|
||||
@ -501,10 +507,8 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
|
||||
self._assert_response(response)
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.apply_update_strate.assert_called_once()
|
||||
self.abort_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_actions_succeeds_with_type(self):
|
||||
"""Test post actions succeeds with type"""
|
||||
@ -520,10 +524,8 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
|
||||
self._assert_response(response)
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.apply_update_strate.assert_called_once()
|
||||
self.abort_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_actions_succeeds_with_inexistent_action(self):
|
||||
"""Test post actions succeeds with inexistent action
|
||||
@ -536,10 +538,8 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.apply_update_strate.assert_not_called()
|
||||
self.abort_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_actions_fails_without_action(self):
|
||||
"""Test post actions fails without action"""
|
||||
@ -551,18 +551,14 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "action required"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_not_called()
|
||||
self.apply_update_strate.assert_not_called()
|
||||
self.abort_update_strategy.assert_not_called()
|
||||
|
||||
def test_post_actions_fails_with_rpc_remote_error(self):
|
||||
"""Test post actions fails with rpc remote error"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.apply_update_strate.side_effect = RemoteError("msg", "value")
|
||||
self.abort_update_strategy.side_effect = RemoteError("msg", "value")
|
||||
|
||||
actions = [consts.SW_UPDATE_ACTION_APPLY, consts.SW_UPDATE_ACTION_ABORT]
|
||||
|
||||
@ -572,22 +568,20 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY, f"Unable to {action} "
|
||||
f"strategy of type 'None': value", call_count=index
|
||||
response,
|
||||
http.client.UNPROCESSABLE_ENTITY,
|
||||
f"Unable to {action} strategy of type 'None': value",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.apply_update_strate.assert_called_once()
|
||||
self.abort_update_strategy.assert_called_once()
|
||||
|
||||
def test_post_actions_fails_with_rpc_generic_exception(self):
|
||||
"""Test post actions fails with rpc generic exception"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.side_effect = \
|
||||
Exception()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.side_effect = \
|
||||
Exception()
|
||||
self.apply_update_strate.side_effect = Exception()
|
||||
self.abort_update_strategy.side_effect = Exception()
|
||||
|
||||
actions = [consts.SW_UPDATE_ACTION_APPLY, consts.SW_UPDATE_ACTION_ABORT]
|
||||
|
||||
@ -597,14 +591,14 @@ class TestSwUpdateStrategyPostActions(BaseTestSwUpdateStrategyPost):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
f"Unable to {action} strategy", call_count=index
|
||||
response,
|
||||
http.client.INTERNAL_SERVER_ERROR,
|
||||
f"Unable to {action} strategy",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
self.mock_rpc_orchestrator_client().apply_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.mock_rpc_orchestrator_client().abort_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.apply_update_strate.assert_called_once()
|
||||
self.abort_update_strategy.assert_called_once()
|
||||
|
||||
|
||||
class TestSwUpdateStrategyDelete(BaseTestSwUpdateStrategyController):
|
||||
@ -615,8 +609,13 @@ class TestSwUpdateStrategyDelete(BaseTestSwUpdateStrategyController):
|
||||
|
||||
self.method = self.app.delete
|
||||
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.\
|
||||
return_value = ("delete_sw_update_strategy", {"update_type": None})
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.return_value = (
|
||||
"delete_sw_update_strategy",
|
||||
{"update_type": None},
|
||||
)
|
||||
self.delete_update_strategy = (
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy
|
||||
)
|
||||
|
||||
def test_delete_succeeds(self):
|
||||
"""Test delete succeeds"""
|
||||
@ -624,8 +623,7 @@ class TestSwUpdateStrategyDelete(BaseTestSwUpdateStrategyController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.delete_update_strategy.assert_called_once()
|
||||
|
||||
def test_delete_succeeds_with_type(self):
|
||||
"""Test delete succeeds with type"""
|
||||
@ -635,34 +633,30 @@ class TestSwUpdateStrategyDelete(BaseTestSwUpdateStrategyController):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_response(response)
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.delete_update_strategy.assert_called_once()
|
||||
|
||||
def test_delete_fails_with_rpc_remote_error(self):
|
||||
"""Test delete fails with rpc remote error"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.side_effect = \
|
||||
RemoteError("msg", "value")
|
||||
self.delete_update_strategy.side_effect = RemoteError("msg", "value")
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.UNPROCESSABLE_ENTITY,
|
||||
"Unable to delete strategy of type 'None': value"
|
||||
response,
|
||||
http.client.UNPROCESSABLE_ENTITY,
|
||||
"Unable to delete strategy of type 'None': value",
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.delete_update_strategy.assert_called_once()
|
||||
|
||||
def test_delete_fails_with_rpc_generic_exception(self):
|
||||
"""Test delete fails with rpc generic exception"""
|
||||
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.side_effect = \
|
||||
Exception()
|
||||
self.delete_update_strategy.side_effect = Exception()
|
||||
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to delete strategy"
|
||||
)
|
||||
self.mock_rpc_orchestrator_client().delete_sw_update_strategy.\
|
||||
assert_called_once()
|
||||
self.delete_update_strategy.assert_called_once()
|
||||
|
@ -22,76 +22,90 @@ from dcmanager.tests.unit.api.v1.controllers.mixins import UpdateMixin
|
||||
from dcmanager.tests.unit.common import fake_subcloud
|
||||
|
||||
SAMPLE_SYSTEM_PEER_UUID = str(uuid.uuid4())
|
||||
SAMPLE_SYSTEM_PEER_NAME = 'SystemPeer1'
|
||||
SAMPLE_MANAGER_ENDPOINT = 'http://127.0.0.1:5000'
|
||||
SAMPLE_MANAGER_USERNAME = 'admin'
|
||||
SAMPLE_MANAGER_PASSWORD = 'password'
|
||||
SAMPLE_ADMINISTRATIVE_STATE = 'enabled'
|
||||
SAMPLE_SYSTEM_PEER_NAME = "SystemPeer1"
|
||||
SAMPLE_MANAGER_ENDPOINT = "http://127.0.0.1:5000"
|
||||
SAMPLE_MANAGER_USERNAME = "admin"
|
||||
SAMPLE_MANAGER_PASSWORD = "password"
|
||||
SAMPLE_ADMINISTRATIVE_STATE = "enabled"
|
||||
SAMPLE_HEARTBEAT_INTERVAL = 10
|
||||
SAMPLE_HEARTBEAT_FAILURE_THRESHOLD = 3
|
||||
SAMPLE_HEARTBEAT_FAILURES_POLICY = 'alarm'
|
||||
SAMPLE_HEARTBEAT_FAILURES_POLICY = "alarm"
|
||||
SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT = 600
|
||||
SAMPLE_PEER_CONTROLLER_GATEWAY_IP = '128.128.128.1'
|
||||
SAMPLE_PEER_CONTROLLER_GATEWAY_IP = "128.128.128.1"
|
||||
|
||||
|
||||
class SystemPeersAPIMixin(APIMixin):
|
||||
API_PREFIX = '/v1.0/system-peers'
|
||||
RESULT_KEY = 'system_peers'
|
||||
API_PREFIX = "/v1.0/system-peers"
|
||||
RESULT_KEY = "system_peers"
|
||||
EXPECTED_FIELDS = [
|
||||
'id', 'peer-uuid', 'peer-name', 'manager-endpoint', 'manager-username',
|
||||
'peer-controller-gateway-address', 'administrative-state',
|
||||
'heartbeat-interval', 'heartbeat-failure-threshold',
|
||||
'heartbeat-failure-policy', 'heartbeat-maintenance-timeout', 'created-at',
|
||||
'updated-at'
|
||||
"id",
|
||||
"peer-uuid",
|
||||
"peer-name",
|
||||
"manager-endpoint",
|
||||
"manager-username",
|
||||
"peer-controller-gateway-address",
|
||||
"administrative-state",
|
||||
"heartbeat-interval",
|
||||
"heartbeat-failure-threshold",
|
||||
"heartbeat-failure-policy",
|
||||
"heartbeat-maintenance-timeout",
|
||||
"created-at",
|
||||
"updated-at",
|
||||
]
|
||||
|
||||
def _get_test_system_peer_dict(self, data_type, **kw):
|
||||
# id should not be part of the structure
|
||||
system_peer = {
|
||||
'peer_uuid': kw.get('peer_uuid', SAMPLE_SYSTEM_PEER_UUID),
|
||||
'peer_name': kw.get('peer_name', SAMPLE_SYSTEM_PEER_NAME),
|
||||
'administrative_state': kw.get(
|
||||
'administrative_state', SAMPLE_ADMINISTRATIVE_STATE
|
||||
"peer_uuid": kw.get("peer_uuid", SAMPLE_SYSTEM_PEER_UUID),
|
||||
"peer_name": kw.get("peer_name", SAMPLE_SYSTEM_PEER_NAME),
|
||||
"administrative_state": kw.get(
|
||||
"administrative_state", SAMPLE_ADMINISTRATIVE_STATE
|
||||
),
|
||||
'heartbeat_interval': kw.get(
|
||||
'heartbeat_interval', SAMPLE_HEARTBEAT_INTERVAL
|
||||
"heartbeat_interval": kw.get(
|
||||
"heartbeat_interval", SAMPLE_HEARTBEAT_INTERVAL
|
||||
),
|
||||
'heartbeat_failure_threshold': kw.get(
|
||||
'heartbeat_failure_threshold', SAMPLE_HEARTBEAT_FAILURE_THRESHOLD
|
||||
"heartbeat_failure_threshold": kw.get(
|
||||
"heartbeat_failure_threshold", SAMPLE_HEARTBEAT_FAILURE_THRESHOLD
|
||||
),
|
||||
'heartbeat_failure_policy': kw.get(
|
||||
'heartbeat_failure_policy', SAMPLE_HEARTBEAT_FAILURES_POLICY
|
||||
"heartbeat_failure_policy": kw.get(
|
||||
"heartbeat_failure_policy", SAMPLE_HEARTBEAT_FAILURES_POLICY
|
||||
),
|
||||
"heartbeat_maintenance_timeout": kw.get(
|
||||
"heartbeat_maintenance_timeout", SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT
|
||||
),
|
||||
'heartbeat_maintenance_timeout': kw.get(
|
||||
'heartbeat_maintenance_timeout', SAMPLE_HEARTBEAT_MAINTENANCE_TIMEOUT
|
||||
)
|
||||
}
|
||||
|
||||
if data_type == 'db':
|
||||
system_peer['endpoint'] = \
|
||||
kw.get('manager_endpoint', SAMPLE_MANAGER_ENDPOINT)
|
||||
system_peer['username'] = \
|
||||
kw.get('manager_username', SAMPLE_MANAGER_USERNAME)
|
||||
system_peer['password'] = \
|
||||
kw.get('manager_password', SAMPLE_MANAGER_PASSWORD)
|
||||
system_peer['gateway_ip'] = kw.get(
|
||||
'peer_controller_gateway_ip', SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
if data_type == "db":
|
||||
system_peer["endpoint"] = kw.get(
|
||||
"manager_endpoint", SAMPLE_MANAGER_ENDPOINT
|
||||
)
|
||||
system_peer["username"] = kw.get(
|
||||
"manager_username", SAMPLE_MANAGER_USERNAME
|
||||
)
|
||||
system_peer["password"] = kw.get(
|
||||
"manager_password", SAMPLE_MANAGER_PASSWORD
|
||||
)
|
||||
system_peer["gateway_ip"] = kw.get(
|
||||
"peer_controller_gateway_ip", SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
)
|
||||
else:
|
||||
system_peer['manager_endpoint'] = \
|
||||
kw.get('manager_endpoint', SAMPLE_MANAGER_ENDPOINT)
|
||||
system_peer['manager_username'] = \
|
||||
kw.get('manager_username', SAMPLE_MANAGER_USERNAME)
|
||||
system_peer['manager_password'] = \
|
||||
kw.get('manager_password', SAMPLE_MANAGER_PASSWORD)
|
||||
system_peer['peer_controller_gateway_address'] = kw.get(
|
||||
'peer_controller_gateway_ip', SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
system_peer["manager_endpoint"] = kw.get(
|
||||
"manager_endpoint", SAMPLE_MANAGER_ENDPOINT
|
||||
)
|
||||
system_peer["manager_username"] = kw.get(
|
||||
"manager_username", SAMPLE_MANAGER_USERNAME
|
||||
)
|
||||
system_peer["manager_password"] = kw.get(
|
||||
"manager_password", SAMPLE_MANAGER_PASSWORD
|
||||
)
|
||||
system_peer["peer_controller_gateway_address"] = kw.get(
|
||||
"peer_controller_gateway_ip", SAMPLE_PEER_CONTROLLER_GATEWAY_IP
|
||||
)
|
||||
|
||||
return system_peer
|
||||
|
||||
def _post_get_test_system_peer(self, **kw):
|
||||
return self._get_test_system_peer_dict('dict', **kw)
|
||||
return self._get_test_system_peer_dict("dict", **kw)
|
||||
|
||||
# The following methods are required for subclasses of APIMixin
|
||||
def get_api_prefix(self):
|
||||
@ -107,14 +121,14 @@ class SystemPeersAPIMixin(APIMixin):
|
||||
return []
|
||||
|
||||
def _create_db_object(self, context, **kw):
|
||||
creation_fields = self._get_test_system_peer_dict('db', **kw)
|
||||
creation_fields = self._get_test_system_peer_dict("db", **kw)
|
||||
return db_api.system_peer_create(context, **creation_fields)
|
||||
|
||||
def get_post_object(self):
|
||||
return self._post_get_test_system_peer()
|
||||
|
||||
def get_update_object(self):
|
||||
return {'peer_controller_gateway_address': '192.168.205.1'}
|
||||
return {"peer_controller_gateway_address": "192.168.205.1"}
|
||||
|
||||
|
||||
class SystemPeersPropertiesValidationMixin(object):
|
||||
@ -142,7 +156,7 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response, http.client.BAD_REQUEST, "Body required"
|
||||
)
|
||||
|
||||
@mock.patch.object(json, 'loads')
|
||||
@mock.patch.object(json, "loads")
|
||||
def test_request_fails_with_json_loads_exception(self, mock_json_loads):
|
||||
"""Test request fails with json loads exception"""
|
||||
|
||||
@ -205,8 +219,9 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
"""Test request fails with invalid manager endpoint"""
|
||||
|
||||
invalid_values = [
|
||||
"", "ftp://somepath",
|
||||
"a" * system_peers.MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN
|
||||
"",
|
||||
"ftp://somepath",
|
||||
"a" * system_peers.MAX_SYSTEM_PEER_MANAGER_ENDPOINT_LEN,
|
||||
]
|
||||
self._remove_empty_string_in_patch_request(invalid_values)
|
||||
|
||||
@ -216,16 +231,16 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid peer manager_endpoint",
|
||||
call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer manager_endpoint",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_manager_username(self):
|
||||
"""Test request fails with invalid manager username"""
|
||||
|
||||
invalid_values = [
|
||||
"", "a" * system_peers.MAX_SYSTEM_PEER_MANAGER_USERNAME_LEN
|
||||
]
|
||||
invalid_values = ["", "a" * system_peers.MAX_SYSTEM_PEER_MANAGER_USERNAME_LEN]
|
||||
self._remove_empty_string_in_patch_request(invalid_values)
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
@ -234,16 +249,16 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid peer manager_username",
|
||||
call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer manager_username",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_manager_password(self):
|
||||
"""Test request fails with invalid manager password"""
|
||||
|
||||
invalid_values = [
|
||||
"", "a" * system_peers.MAX_SYSTEM_PEER_MANAGER_PASSWORD_LEN
|
||||
]
|
||||
invalid_values = ["", "a" * system_peers.MAX_SYSTEM_PEER_MANAGER_PASSWORD_LEN]
|
||||
self._remove_empty_string_in_patch_request(invalid_values)
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
@ -252,16 +267,19 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid peer manager_password",
|
||||
call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer manager_password",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_peer_controller_gateway_address(self):
|
||||
"""Test request fails with invalid peer controller gateway address"""
|
||||
|
||||
invalid_values = [
|
||||
"", "a" * system_peers.MAX_SYSTEM_PEER_STRING_DEFAULT_LEN,
|
||||
"192.168.0.0.1"
|
||||
"",
|
||||
"a" * system_peers.MAX_SYSTEM_PEER_STRING_DEFAULT_LEN,
|
||||
"192.168.0.0.1",
|
||||
]
|
||||
self._remove_empty_string_in_patch_request(invalid_values)
|
||||
|
||||
@ -271,8 +289,10 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid peer peer_controller_gateway_address", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer peer_controller_gateway_address",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_administrative_state(self):
|
||||
@ -298,7 +318,8 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
invalid_values = [
|
||||
system_peers.MIN_SYSTEM_PEER_HEARTBEAT_INTERVAL - 1,
|
||||
system_peers.MAX_SYSTEM_PEER_HEARTBEAT_INTERVAL + 1,
|
||||
-1, "fake"
|
||||
-1,
|
||||
"fake",
|
||||
]
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
@ -307,8 +328,10 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST, "Invalid peer heartbeat_interval",
|
||||
call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_interval",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_heartbeat_failure_threshold(self):
|
||||
@ -317,7 +340,8 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
invalid_values = [
|
||||
system_peers.MIN_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD - 1,
|
||||
system_peers.MAX_SYSTEM_PEER_HEARTBEAT_FAILURE_THRESHOLD + 1,
|
||||
-1, "fake"
|
||||
-1,
|
||||
"fake",
|
||||
]
|
||||
|
||||
# When the request method is patch, the invalid_value 0 results in the if
|
||||
@ -334,8 +358,10 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_failure_threshold", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_failure_threshold",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_heartbeat_failure_policy(self):
|
||||
@ -349,8 +375,7 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_failure_policy"
|
||||
response, http.client.BAD_REQUEST, "Invalid peer heartbeat_failure_policy"
|
||||
)
|
||||
|
||||
def test_request_fails_with_invalid_heartbeat_maintenance_timeout(self):
|
||||
@ -359,7 +384,8 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
invalid_values = [
|
||||
system_peers.MIN_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT - 1,
|
||||
system_peers.MAX_SYSTEM_PEER_HEARTBEAT_MAINTENACE_TIMEOUT + 1,
|
||||
-1, "fake"
|
||||
-1,
|
||||
"fake",
|
||||
]
|
||||
|
||||
for index, invalid_value in enumerate(invalid_values, start=1):
|
||||
@ -368,8 +394,10 @@ class SystemPeersPropertiesValidationMixin(object):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_maintenance_timeout", call_count=index
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Invalid peer heartbeat_maintenance_timeout",
|
||||
call_count=index,
|
||||
)
|
||||
|
||||
|
||||
@ -467,8 +495,7 @@ class TestSystemPeersGet(BaseTestSystemPeersController, GetMixin):
|
||||
|
||||
|
||||
class TestSystemPeersPost(
|
||||
BaseTestSystemPeersController, SystemPeersPropertiesValidationMixin,
|
||||
PostJSONMixin
|
||||
BaseTestSystemPeersController, SystemPeersPropertiesValidationMixin, PostJSONMixin
|
||||
):
|
||||
"""Test class for post requests"""
|
||||
|
||||
@ -488,8 +515,9 @@ class TestSystemPeersPost(
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.CONFLICT,
|
||||
"A system peer with this UUID already exists"
|
||||
response,
|
||||
http.client.CONFLICT,
|
||||
"A system peer with this UUID already exists",
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "system_peer_create")
|
||||
@ -513,8 +541,7 @@ class TestSystemPeersPost(
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to create system peer"
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to create system peer"
|
||||
)
|
||||
|
||||
|
||||
@ -549,7 +576,7 @@ class TestSystemPeersPatch(BaseTestSystemPeersPatch, UpdateMixin):
|
||||
# Overrides validate_updated_fields from UpdateMixin
|
||||
def validate_updated_fields(self, sub_dict, full_obj):
|
||||
for key, value in sub_dict.items():
|
||||
key = key.replace('_', '-')
|
||||
key = key.replace("_", "-")
|
||||
self.assertEqual(value, full_obj.get(key))
|
||||
|
||||
def test_patch_fails_with_inexistent_system_peer(self):
|
||||
@ -606,8 +633,7 @@ class TestSystemPeersPatch(BaseTestSystemPeersPatch, UpdateMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to update system peer"
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to update system peer"
|
||||
)
|
||||
|
||||
|
||||
@ -650,9 +676,13 @@ class TestSystemPeersDelete(BaseTestSystemPeersController, DeleteMixin):
|
||||
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
|
||||
db_api.peer_group_association_create(
|
||||
self.ctx, subcloud.peer_group_id, system_peer.id,
|
||||
consts.PEER_GROUP_PRIMARY_PRIORITY, consts.ASSOCIATION_TYPE_PRIMARY,
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC, "None"
|
||||
self.ctx,
|
||||
subcloud.peer_group_id,
|
||||
system_peer.id,
|
||||
consts.PEER_GROUP_PRIMARY_PRIORITY,
|
||||
consts.ASSOCIATION_TYPE_PRIMARY,
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
"None",
|
||||
)
|
||||
|
||||
self.url = f"{self.url}/{system_peer.peer_uuid}"
|
||||
@ -660,8 +690,9 @@ class TestSystemPeersDelete(BaseTestSystemPeersController, DeleteMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.BAD_REQUEST,
|
||||
"Cannot delete a system peer which is associated with peer group."
|
||||
response,
|
||||
http.client.BAD_REQUEST,
|
||||
"Cannot delete a system peer which is associated with peer group.",
|
||||
)
|
||||
|
||||
@mock.patch.object(db_api, "system_peer_destroy")
|
||||
@ -693,6 +724,5 @@ class TestSystemPeersDelete(BaseTestSystemPeersController, DeleteMixin):
|
||||
response = self._send_request()
|
||||
|
||||
self._assert_pecan_and_response(
|
||||
response, http.client.INTERNAL_SERVER_ERROR,
|
||||
"Unable to delete system peer"
|
||||
response, http.client.INTERNAL_SERVER_ERROR, "Unable to delete system peer"
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -70,15 +70,17 @@ class FakeFmClientSubcloud3(object):
|
||||
|
||||
class FakeFmClientSubcloud4(object):
|
||||
def get_alarm_summary(self):
|
||||
raise exceptions.SubcloudNotFound(subcloud_id='subcloud4')
|
||||
raise exceptions.SubcloudNotFound(subcloud_id="subcloud4")
|
||||
|
||||
|
||||
class FakeOpenStackDriver(object):
|
||||
def __init__(self, region_name='RegionOne'):
|
||||
fm_clients = {'subcloud1': FakeFmClientSubcloud1,
|
||||
'subcloud2': FakeFmClientSubcloud2,
|
||||
'subcloud3': FakeFmClientSubcloud3,
|
||||
'subcloud4': FakeFmClientSubcloud4}
|
||||
def __init__(self, region_name="RegionOne"):
|
||||
fm_clients = {
|
||||
"subcloud1": FakeFmClientSubcloud1,
|
||||
"subcloud2": FakeFmClientSubcloud2,
|
||||
"subcloud3": FakeFmClientSubcloud3,
|
||||
"subcloud4": FakeFmClientSubcloud4,
|
||||
}
|
||||
self.fm_client = fm_clients[region_name]()
|
||||
|
||||
|
||||
@ -94,67 +96,78 @@ class TestAlarmAggregation(base.DCManagerTestCase):
|
||||
|
||||
@staticmethod
|
||||
def alarms_to_dict(alarms):
|
||||
return {'critical_alarms': alarms.critical_alarms,
|
||||
'major_alarms': alarms.major_alarms,
|
||||
'minor_alarms': alarms.minor_alarms,
|
||||
'warnings': alarms.warnings,
|
||||
'cloud_status': alarms.cloud_status,
|
||||
}
|
||||
return {
|
||||
"critical_alarms": alarms.critical_alarms,
|
||||
"major_alarms": alarms.major_alarms,
|
||||
"minor_alarms": alarms.minor_alarms,
|
||||
"warnings": alarms.warnings,
|
||||
"cloud_status": alarms.cloud_status,
|
||||
}
|
||||
|
||||
@mock.patch.object(alarm_aggregation, 'LOG')
|
||||
@mock.patch.object(sdk, 'OpenStackDriver')
|
||||
def test_update_alarm_summary(self, mock_openstack_driver,
|
||||
mock_logging):
|
||||
@mock.patch.object(alarm_aggregation, "LOG")
|
||||
@mock.patch.object(sdk, "OpenStackDriver")
|
||||
def test_update_alarm_summary(self, mock_openstack_driver, mock_logging):
|
||||
mock_openstack_driver.side_effect = FakeOpenStackDriver
|
||||
aam = alarm_aggregation.AlarmAggregation(self.ctxt)
|
||||
|
||||
fake_openstackdriver = FakeOpenStackDriver('subcloud1')
|
||||
db_api.subcloud_alarms_create(self.ctx, 'subcloud1', values={})
|
||||
fake_openstackdriver = FakeOpenStackDriver("subcloud1")
|
||||
db_api.subcloud_alarms_create(self.ctx, "subcloud1", values={})
|
||||
alarms_summary = aam.get_alarm_summary(
|
||||
fake_openstackdriver.fm_client, 'subcloud1'
|
||||
fake_openstackdriver.fm_client, "subcloud1"
|
||||
)
|
||||
aam.update_alarm_summary("subcloud1", alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, "subcloud1")
|
||||
self.assertEqual(
|
||||
self.alarms_to_dict(alarms),
|
||||
{
|
||||
"critical_alarms": 1,
|
||||
"major_alarms": 2,
|
||||
"minor_alarms": 3,
|
||||
"warnings": 4,
|
||||
"cloud_status": "critical",
|
||||
},
|
||||
)
|
||||
aam.update_alarm_summary('subcloud1', alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, 'subcloud1')
|
||||
self.assertEqual(self.alarms_to_dict(alarms),
|
||||
{'critical_alarms': 1,
|
||||
'major_alarms': 2,
|
||||
'minor_alarms': 3,
|
||||
'warnings': 4,
|
||||
'cloud_status': 'critical'}
|
||||
)
|
||||
|
||||
fake_openstackdriver = FakeOpenStackDriver('subcloud2')
|
||||
db_api.subcloud_alarms_create(self.ctx, 'subcloud2', values={})
|
||||
fake_openstackdriver = FakeOpenStackDriver("subcloud2")
|
||||
db_api.subcloud_alarms_create(self.ctx, "subcloud2", values={})
|
||||
alarms_summary = aam.get_alarm_summary(
|
||||
fake_openstackdriver.fm_client, 'subcloud2'
|
||||
fake_openstackdriver.fm_client, "subcloud2"
|
||||
)
|
||||
aam.update_alarm_summary("subcloud2", alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, "subcloud2")
|
||||
self.assertEqual(
|
||||
self.alarms_to_dict(alarms),
|
||||
{
|
||||
"critical_alarms": 0,
|
||||
"major_alarms": 1,
|
||||
"minor_alarms": 2,
|
||||
"warnings": 3,
|
||||
"cloud_status": "degraded",
|
||||
},
|
||||
)
|
||||
aam.update_alarm_summary('subcloud2', alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, 'subcloud2')
|
||||
self.assertEqual(self.alarms_to_dict(alarms),
|
||||
{'critical_alarms': 0,
|
||||
'major_alarms': 1,
|
||||
'minor_alarms': 2,
|
||||
'warnings': 3,
|
||||
'cloud_status': 'degraded'}
|
||||
)
|
||||
|
||||
fake_openstackdriver = FakeOpenStackDriver('subcloud3')
|
||||
db_api.subcloud_alarms_create(self.ctx, 'subcloud3', values={})
|
||||
fake_openstackdriver = FakeOpenStackDriver("subcloud3")
|
||||
db_api.subcloud_alarms_create(self.ctx, "subcloud3", values={})
|
||||
alarms_summary = aam.get_alarm_summary(
|
||||
fake_openstackdriver.fm_client, 'subcloud3'
|
||||
fake_openstackdriver.fm_client, "subcloud3"
|
||||
)
|
||||
aam.update_alarm_summary("subcloud3", alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, "subcloud3")
|
||||
self.assertEqual(
|
||||
self.alarms_to_dict(alarms),
|
||||
{
|
||||
"critical_alarms": 0,
|
||||
"major_alarms": 0,
|
||||
"minor_alarms": 0,
|
||||
"warnings": 1,
|
||||
"cloud_status": "OK",
|
||||
},
|
||||
)
|
||||
aam.update_alarm_summary('subcloud3', alarms_summary)
|
||||
alarms = db_api.subcloud_alarms_get(self.ctx, 'subcloud3')
|
||||
self.assertEqual(self.alarms_to_dict(alarms),
|
||||
{'critical_alarms': 0,
|
||||
'major_alarms': 0,
|
||||
'minor_alarms': 0,
|
||||
'warnings': 1,
|
||||
'cloud_status': 'OK'}
|
||||
)
|
||||
|
||||
fake_openstackdriver = FakeOpenStackDriver('subcloud4')
|
||||
aam.update_alarm_summary('subcloud4', alarms_summary)
|
||||
mock_logging.error.assert_called_with("Failed to update alarms for "
|
||||
"subcloud4. Error: Subcloud with "
|
||||
"name subcloud4 doesn't exist.")
|
||||
fake_openstackdriver = FakeOpenStackDriver("subcloud4")
|
||||
aam.update_alarm_summary("subcloud4", alarms_summary)
|
||||
mock_logging.error.assert_called_with(
|
||||
"Failed to update alarms for "
|
||||
"subcloud4. Error: Subcloud with "
|
||||
"name subcloud4 doesn't exist."
|
||||
)
|
||||
|
@ -28,9 +28,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
class PCIDevice(object):
|
||||
def __init__(self, uuid, name,
|
||||
pciaddr, pvendor_id,
|
||||
pdevice_id, enabled):
|
||||
def __init__(self, uuid, name, pciaddr, pvendor_id, pdevice_id, enabled):
|
||||
self.uuid = uuid
|
||||
self.name = name
|
||||
self.pciaddr = pciaddr
|
||||
@ -68,8 +66,7 @@ class DeviceImage(object):
|
||||
|
||||
|
||||
class DeviceImageState(object):
|
||||
def __init__(self, pcidevice_uuid, image_uuid,
|
||||
status):
|
||||
def __init__(self, pcidevice_uuid, image_uuid, status):
|
||||
self.pcidevice_uuid = pcidevice_uuid
|
||||
self.image_uuid = image_uuid
|
||||
self.status = status
|
||||
@ -82,47 +79,33 @@ class Host(object):
|
||||
|
||||
|
||||
class DeviceLabels(object):
|
||||
def __init__(self, pcidevice_uuid,
|
||||
label_key, label_value):
|
||||
def __init__(self, pcidevice_uuid, label_key, label_value):
|
||||
self.pcidevice_uuid = pcidevice_uuid
|
||||
self.label_key = label_key
|
||||
self.label_value = label_value
|
||||
|
||||
|
||||
HOST1 = Host('04ae0e01-13b6-4105',
|
||||
'controller-0')
|
||||
HOST1 = Host("04ae0e01-13b6-4105", "controller-0")
|
||||
|
||||
# Device not enabled
|
||||
PCI_DEVICE1 = PCIDevice('06789e01-13b6-2345',
|
||||
'pci_0000_00_01_0',
|
||||
'0000:00:02.0',
|
||||
'1111',
|
||||
'2222',
|
||||
False)
|
||||
PCI_DEVICE1 = PCIDevice(
|
||||
"06789e01-13b6-2345", "pci_0000_00_01_0", "0000:00:02.0", "1111", "2222", False
|
||||
)
|
||||
|
||||
# Device not enabled
|
||||
PCI_DEVICE2 = PCIDevice('06789e01-13b6-2346',
|
||||
'pci_0000_00_02_0',
|
||||
'0000:00:03.0',
|
||||
'1111',
|
||||
'2222',
|
||||
False)
|
||||
PCI_DEVICE2 = PCIDevice(
|
||||
"06789e01-13b6-2346", "pci_0000_00_02_0", "0000:00:03.0", "1111", "2222", False
|
||||
)
|
||||
|
||||
# Device enabled
|
||||
PCI_DEVICE3 = PCIDevice('06789e01-13b6-2347',
|
||||
'pci_0000_00_03_0',
|
||||
'0000:00:04.0',
|
||||
'1111',
|
||||
'2222',
|
||||
True)
|
||||
PCI_DEVICE3 = PCIDevice(
|
||||
"06789e01-13b6-2347", "pci_0000_00_03_0", "0000:00:04.0", "1111", "2222", True
|
||||
)
|
||||
|
||||
# Device enabled
|
||||
PCI_DEVICE4 = PCIDevice('06789e01-13b6-2347',
|
||||
'pci_0000_00_03_0',
|
||||
'0000:00:04.0',
|
||||
'1000',
|
||||
'2000',
|
||||
True)
|
||||
PCI_DEVICE4 = PCIDevice(
|
||||
"06789e01-13b6-2347", "pci_0000_00_03_0", "0000:00:04.0", "1000", "2000", True
|
||||
)
|
||||
|
||||
# Device image has been applied
|
||||
DEVICE_IMAGE1 = DeviceImage(
|
||||
@ -169,20 +152,18 @@ DEVICE_IMAGE3 = DeviceImage(
|
||||
[{"key1": "value1"}],
|
||||
)
|
||||
|
||||
DEVICE_LABEL1 = DeviceLabels('06789e01-13b6-2347',
|
||||
'key1',
|
||||
'value1')
|
||||
DEVICE_LABEL1 = DeviceLabels("06789e01-13b6-2347", "key1", "value1")
|
||||
|
||||
# Device image state where image is written to device
|
||||
DEVICE_IMAGE_STATE1 = DeviceImageState(PCI_DEVICE4.uuid,
|
||||
'04ae0e01-13b6-4105',
|
||||
'completed')
|
||||
DEVICE_IMAGE_STATE1 = DeviceImageState(
|
||||
PCI_DEVICE4.uuid, "04ae0e01-13b6-4105", "completed"
|
||||
)
|
||||
|
||||
# Device image state where image is applied but not written
|
||||
# to the device
|
||||
DEVICE_IMAGE_STATE2 = DeviceImageState(PCI_DEVICE4.uuid,
|
||||
'04ae0e01-13b6-4105',
|
||||
'pending')
|
||||
DEVICE_IMAGE_STATE2 = DeviceImageState(
|
||||
PCI_DEVICE4.uuid, "04ae0e01-13b6-4105", "pending"
|
||||
)
|
||||
|
||||
|
||||
class FakeSysinvClientNoEnabledDevices(object):
|
||||
@ -423,8 +404,9 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
self._mock_rpc_api_manager_audit_worker_client()
|
||||
self._mock_sysinv_client(subcloud_audit_worker_manager)
|
||||
self._mock_subcloud_audit_manager_context()
|
||||
self.mock_subcloud_audit_manager_context.get_admin_context.\
|
||||
return_value = self.ctx
|
||||
self.mock_subcloud_audit_manager_context.get_admin_context.return_value = (
|
||||
self.ctx
|
||||
)
|
||||
|
||||
self.fm = firmware_audit.FirmwareAudit()
|
||||
self.am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
@ -438,30 +420,35 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
return dict_results
|
||||
|
||||
def get_fw_audit_data(self):
|
||||
(_, firmware_audit_data, _, _, _) = \
|
||||
self.am._get_audit_data(True, True, True, True, True)
|
||||
(_, firmware_audit_data, _, _, _) = self.am._get_audit_data(
|
||||
True, True, True, True, True
|
||||
)
|
||||
|
||||
# Convert to dict like what would happen calling via RPC
|
||||
firmware_audit_data = self._rpc_convert(firmware_audit_data)
|
||||
return firmware_audit_data
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_no_firmware_audit_data_to_sync(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_no_firmware_audit_data_to_sync(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientNoAuditData
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -469,23 +456,27 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_no_enabled_devices_on_subcloud(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_no_enabled_devices_on_subcloud(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientNoEnabledDevices
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -493,22 +484,26 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_apply_image_to_all_devices(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_apply_image_to_all_devices(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientImageWithoutLabels
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -516,23 +511,27 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_image_not_applied(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_image_not_applied(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientImageNotApplied
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientImageNotApplied
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -540,23 +539,27 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_image_not_written(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_image_not_written(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientImageNotWritten
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientImageNotWritten
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -564,22 +567,26 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_image_with_labels(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_image_with_labels(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientImageWithLabels
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -587,22 +594,26 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_no_matching_label_for_device_on_subcloud(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_no_matching_label_for_device_on_subcloud(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientNoMatchingDeviceLabel
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
@ -610,22 +621,26 @@ class TestFirmwareAudit(base.DCManagerTestCase):
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(firmware_audit, 'SysinvClient')
|
||||
@mock.patch.object(firmware_audit, 'OpenStackDriver')
|
||||
def test_no_matching_device_id_on_subcloud(self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
@mock.patch.object(firmware_audit, "SysinvClient")
|
||||
@mock.patch.object(firmware_audit, "OpenStackDriver")
|
||||
def test_no_matching_device_id_on_subcloud(
|
||||
self,
|
||||
mock_fw_openstack_driver,
|
||||
mock_fw_sysinv_client,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client,
|
||||
):
|
||||
mock_fw_sysinv_client.side_effect = FakeSysinvClientNoMatchingDeviceId
|
||||
firmware_audit_data = self.get_fw_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.fm.subcloud_firmware_audit(
|
||||
self.mock_sysinv_client(), name, firmware_audit_data
|
||||
|
@ -22,16 +22,14 @@ from dcmanager.audit import subcloud_audit_manager
|
||||
from dcmanager.audit import subcloud_audit_worker_manager
|
||||
from dcmanager.tests import base
|
||||
|
||||
PREVIOUS_KUBE_VERSION = 'v1.2.3'
|
||||
UPGRADED_KUBE_VERSION = 'v1.2.3-a'
|
||||
PREVIOUS_KUBE_VERSION = "v1.2.3"
|
||||
UPGRADED_KUBE_VERSION = "v1.2.3-a"
|
||||
|
||||
|
||||
class FakeKubeVersion(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'):
|
||||
def __init__(
|
||||
self, obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.version = version
|
||||
@ -72,15 +70,18 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
||||
self.am.kubernetes_audit = self.audit
|
||||
|
||||
def get_kube_audit_data(self):
|
||||
(_, _, kubernetes_audit_data, _, _) = \
|
||||
self.am._get_audit_data(True, True, True, True, True)
|
||||
(_, _, kubernetes_audit_data, _, _) = self.am._get_audit_data(
|
||||
True, True, True, True, True
|
||||
)
|
||||
return kubernetes_audit_data
|
||||
|
||||
def test_no_kubernetes_audit_data_to_sync(self):
|
||||
kubernetes_audit_data = self.get_kube_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
response = self.audit.subcloud_kubernetes_audit(
|
||||
self.mock_sysinv_client(), name, kubernetes_audit_data
|
||||
@ -96,8 +97,10 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
||||
]
|
||||
kubernetes_audit_data = self.get_kube_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
# return different kube versions in the subclouds
|
||||
self.kube_sysinv_client().get_kube_versions.return_value = [
|
||||
@ -117,8 +120,10 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
||||
]
|
||||
kubernetes_audit_data = self.get_kube_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
# return different kube versions in the subclouds
|
||||
self.kube_sysinv_client().get_kube_versions.return_value = [
|
||||
@ -141,8 +146,10 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
||||
]
|
||||
kubernetes_audit_data = self.get_kube_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
# return same kube versions in the subclouds
|
||||
self.kube_sysinv_client().get_kube_versions.return_value = [
|
||||
@ -159,17 +166,17 @@ class TestKubernetesAudit(base.DCManagerTestCase):
|
||||
# even if the kube versions match
|
||||
|
||||
# mock that there is a kube upgrade (only queried in subclouds)
|
||||
self.kube_sysinv_client().get_kube_upgrades.return_value = [
|
||||
FakeKubeUpgrade()
|
||||
]
|
||||
self.kube_sysinv_client().get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||
# Set the region one data as being the upgraded version
|
||||
self.kube_sysinv_client().get_kube_versions.return_value = [
|
||||
FakeKubeVersion(version=UPGRADED_KUBE_VERSION),
|
||||
]
|
||||
kubernetes_audit_data = self.get_kube_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for name, region in subclouds.items():
|
||||
# return same kube versions in the subclouds
|
||||
self.kube_sysinv_client().get_kube_versions.return_value = [
|
||||
|
@ -24,9 +24,9 @@ class FakeAlarm(object):
|
||||
|
||||
class FakeSubcloudObj(object):
|
||||
def __init__(self, subcloud_dict):
|
||||
self.name = subcloud_dict['name']
|
||||
self.region_name = subcloud_dict['region_name']
|
||||
self.software_version = subcloud_dict['software_version']
|
||||
self.name = subcloud_dict["name"]
|
||||
self.region_name = subcloud_dict["region_name"]
|
||||
self.software_version = subcloud_dict["software_version"]
|
||||
|
||||
|
||||
class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
@ -43,29 +43,38 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
self._mock_subcloud_audit_manager_context()
|
||||
|
||||
# Set the Kubeernetes Root CA cert identifier as cert1 for all regions
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id = (
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id
|
||||
)
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
|
||||
# Set get_alarms_by_ids returns none by default
|
||||
self.mock_fm_client().get_alarms_by_ids.return_value = None
|
||||
|
||||
self.mock_subcloud_audit_manager_context.\
|
||||
get_admin_context.return_value = self.ctx
|
||||
self.mock_subcloud_audit_manager_context.get_admin_context.return_value = (
|
||||
self.ctx
|
||||
)
|
||||
|
||||
self.audit = kube_rootca_update_audit.KubeRootcaUpdateAudit()
|
||||
self.am = SubcloudAuditManager()
|
||||
self.am.kube_rootca_update_audit = self.audit
|
||||
|
||||
def get_rootca_audit_data(self):
|
||||
(_, _, _, kube_rootca_audit_data, _) = \
|
||||
self.am._get_audit_data(True, True, True, True, True)
|
||||
(_, _, _, kube_rootca_audit_data, _) = self.am._get_audit_data(
|
||||
True, True, True, True, True
|
||||
)
|
||||
|
||||
return kube_rootca_audit_data
|
||||
|
||||
def test_no_kube_rootca_update_audit_data_to_sync(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("", "error")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("", "error"),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_1, base.SUBCLOUD_2]
|
||||
@ -76,15 +85,17 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
def test_kube_rootca_update_audit_in_sync_cert_based(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_1, base.SUBCLOUD_2]
|
||||
@ -92,24 +103,29 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
subcloud = FakeSubcloudObj(subcloud_dict)
|
||||
|
||||
# return same kube root ca ID in the subclouds
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.\
|
||||
return_value = True, FakeKubeRootcaData("cert1", "")
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = True, FakeKubeRootcaData(
|
||||
"cert1", ""
|
||||
)
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
|
||||
response = self.audit.subcloud_kube_rootca_audit(
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
def test_kube_rootca_update_audit_out_of_sync_cert_based(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_1, base.SUBCLOUD_2]
|
||||
@ -117,23 +133,28 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
subcloud = FakeSubcloudObj(subcloud_dict)
|
||||
|
||||
# return different kube root ca ID in the subclouds
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.\
|
||||
return_value = True, FakeKubeRootcaData("cert2", "")
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert2", "")
|
||||
self.kube_rootca_cert_id.return_value = True, FakeKubeRootcaData(
|
||||
"cert2", ""
|
||||
)
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert2", ""),
|
||||
)
|
||||
response = self.audit.subcloud_kube_rootca_audit(
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
|
||||
def test_kube_rootca_update_audit_in_sync_alarm_based(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_1, base.SUBCLOUD_2]
|
||||
@ -141,25 +162,28 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
subcloud = FakeSubcloudObj(subcloud_dict)
|
||||
|
||||
# return API cert ID request failed
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.\
|
||||
return_value = False, None
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
False, None
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
False,
|
||||
None,
|
||||
)
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = False, None
|
||||
self.mock_fm_client().get_alarms_by_ids.return_value = None
|
||||
|
||||
response = self.audit.subcloud_kube_rootca_audit(
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
def test_kube_rootca_update_audit_out_of_sync_alarm_based(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_1, base.SUBCLOUD_2]
|
||||
@ -167,26 +191,30 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
subcloud = FakeSubcloudObj(subcloud_dict)
|
||||
|
||||
# return API cert ID request failed
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.\
|
||||
return_value = False, None
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
False, None
|
||||
self.mock_fm_client().get_alarms_by_ids.return_value = \
|
||||
[FakeAlarm('system.certificate.kubernetes-root-ca'), ]
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
False,
|
||||
None,
|
||||
)
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = False, None
|
||||
self.mock_fm_client().get_alarms_by_ids.return_value = [
|
||||
FakeAlarm("system.certificate.kubernetes-root-ca"),
|
||||
]
|
||||
|
||||
response = self.audit.subcloud_kube_rootca_audit(
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
|
||||
def test_kube_rootca_update_audit_fail_to_get_audit_data(self):
|
||||
# Set the region one data
|
||||
self.mock_region_one_sysinv_client().get_kube_rootca_cert_id.return_value = \
|
||||
True, FakeKubeRootcaData("cert1", "")
|
||||
self.kube_rootca_cert_id.return_value = (
|
||||
True,
|
||||
FakeKubeRootcaData("cert1", ""),
|
||||
)
|
||||
kube_rootca_update_audit_data = self.get_rootca_audit_data()
|
||||
|
||||
subclouds = [base.SUBCLOUD_3, base.SUBCLOUD_4]
|
||||
@ -194,14 +222,15 @@ class TestKubeRootcaUpdateAudit(base.DCManagerTestCase):
|
||||
subcloud = FakeSubcloudObj(subcloud_dict)
|
||||
|
||||
# return API cert ID request failed
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.\
|
||||
return_value = base.FakeException("API cert ID request failed")
|
||||
self.mock_sysinv_client().get_kube_rootca_cert_id.return_value = (
|
||||
base.FakeException("API cert ID request failed")
|
||||
)
|
||||
|
||||
response = self.audit.subcloud_kube_rootca_audit(
|
||||
self.mock_sysinv_client(),
|
||||
self.mock_fm_client(),
|
||||
subcloud,
|
||||
kube_rootca_update_audit_data
|
||||
kube_rootca_update_audit_data,
|
||||
)
|
||||
|
||||
self.assertEqual(response, None)
|
||||
|
@ -49,40 +49,62 @@ class FakePatchingClientInSync(object):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def query(self):
|
||||
if self.region == 'RegionOne':
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.3': {'sw_version': '17.07',
|
||||
'repostate': 'Committed',
|
||||
'patchstate': 'Committed'},
|
||||
'DC.4': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
# This patch won't make us out of sync because it is for
|
||||
# a different release.
|
||||
'OTHER_REL_DC.1': {'sw_version': '17.08',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
}
|
||||
elif self.region in [base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['region_name']]:
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.3': {'sw_version': '17.07',
|
||||
'repostate': 'Committed',
|
||||
'patchstate': 'Committed'},
|
||||
'DC.4': {'sw_version': '17.07',
|
||||
'repostate': 'Committed',
|
||||
'patchstate': 'Committed'},
|
||||
}
|
||||
if self.region == "RegionOne":
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.3": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Committed",
|
||||
"patchstate": "Committed",
|
||||
},
|
||||
"DC.4": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
# This patch won't make us out of sync because it is for
|
||||
# a different release.
|
||||
"OTHER_REL_DC.1": {
|
||||
"sw_version": "17.08",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
elif self.region in [
|
||||
base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["region_name"],
|
||||
]:
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.3": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Committed",
|
||||
"patchstate": "Committed",
|
||||
},
|
||||
"DC.4": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Committed",
|
||||
"patchstate": "Committed",
|
||||
},
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
@ -94,38 +116,66 @@ class FakePatchingClientOutOfSync(object):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def query(self):
|
||||
if self.region == 'RegionOne':
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Partial-Apply'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
elif self.region == base.SUBCLOUD_1['region_name']:
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Available',
|
||||
'patchstate': 'Available'}}
|
||||
elif self.region == base.SUBCLOUD_2['region_name']:
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
elif self.region == base.SUBCLOUD_3['region_name']:
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
elif self.region == base.SUBCLOUD_4['region_name']:
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Partial-Apply'}}
|
||||
if self.region == "RegionOne":
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Partial-Apply",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
elif self.region == base.SUBCLOUD_1["region_name"]:
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Available",
|
||||
"patchstate": "Available",
|
||||
},
|
||||
}
|
||||
elif self.region == base.SUBCLOUD_2["region_name"]:
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
}
|
||||
}
|
||||
elif self.region == base.SUBCLOUD_3["region_name"]:
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
elif self.region == base.SUBCLOUD_4["region_name"]:
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Partial-Apply",
|
||||
},
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
@ -137,33 +187,55 @@ class FakePatchingClientExtraPatches(object):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def query(self):
|
||||
if self.region == 'RegionOne':
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
elif self.region == 'subcloud1':
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.3': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
elif self.region == 'subcloud2':
|
||||
return {'DC.1': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'DC.2': {'sw_version': '17.07',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'},
|
||||
'OTHER_REL_DC.1': {'sw_version': '17.08',
|
||||
'repostate': 'Applied',
|
||||
'patchstate': 'Applied'}}
|
||||
if self.region == "RegionOne":
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
elif self.region == "subcloud1":
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.3": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
elif self.region == "subcloud2":
|
||||
return {
|
||||
"DC.1": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"DC.2": {
|
||||
"sw_version": "17.07",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
"OTHER_REL_DC.1": {
|
||||
"sw_version": "17.08",
|
||||
"repostate": "Applied",
|
||||
"patchstate": "Applied",
|
||||
},
|
||||
}
|
||||
else:
|
||||
return {}
|
||||
|
||||
@ -173,9 +245,9 @@ class FakeSysinvClientOneLoad(object):
|
||||
self.region = region
|
||||
self.session = session
|
||||
self.endpoint = endpoint
|
||||
self.loads = [Load('17.07', 'active')]
|
||||
self.loads = [Load("17.07", "active")]
|
||||
self.upgrades = []
|
||||
self.system = System('17.07')
|
||||
self.system = System("17.07")
|
||||
|
||||
def get_loads(self):
|
||||
return self.loads
|
||||
@ -192,9 +264,9 @@ class FakeSysinvClientOneLoadUnmatchedSoftwareVersion(object):
|
||||
self.region = region
|
||||
self.session = session
|
||||
self.endpoint = endpoint
|
||||
self.loads = [Load('17.07', 'active')]
|
||||
self.loads = [Load("17.07", "active")]
|
||||
self.upgrades = []
|
||||
self.system = System('17.07')
|
||||
self.system = System("17.07")
|
||||
|
||||
def get_loads(self):
|
||||
return self.loads
|
||||
@ -203,8 +275,8 @@ class FakeSysinvClientOneLoadUnmatchedSoftwareVersion(object):
|
||||
return self.upgrades
|
||||
|
||||
def get_system(self):
|
||||
if self.region == base.SUBCLOUD_2['region_name']:
|
||||
return System('17.06')
|
||||
if self.region == base.SUBCLOUD_2["region_name"]:
|
||||
return System("17.06")
|
||||
else:
|
||||
return self.system
|
||||
|
||||
@ -214,16 +286,16 @@ class FakeSysinvClientOneLoadUpgradeInProgress(object):
|
||||
self.region = region
|
||||
self.session = session
|
||||
self.endpoint = endpoint
|
||||
self.loads = [Load('17.07', 'active')]
|
||||
self.loads = [Load("17.07", "active")]
|
||||
self.upgrades = []
|
||||
self.system = System('17.07')
|
||||
self.system = System("17.07")
|
||||
|
||||
def get_loads(self):
|
||||
return self.loads
|
||||
|
||||
def get_upgrades(self):
|
||||
if self.region == base.SUBCLOUD_2['region_name']:
|
||||
return [Upgrade('started')]
|
||||
if self.region == base.SUBCLOUD_2["region_name"]:
|
||||
return [Upgrade("started")]
|
||||
else:
|
||||
return self.upgrades
|
||||
|
||||
@ -240,41 +312,48 @@ class TestPatchAudit(base.DCManagerTestCase):
|
||||
self._mock_sysinv_client(subcloud_audit_worker_manager)
|
||||
self._mock_subcloud_audit_manager_context()
|
||||
|
||||
self.mock_subcloud_audit_manager_context.get_admin_context.\
|
||||
return_value = self.ctx
|
||||
self.mock_subcloud_audit_manager_context.get_admin_context.return_value = (
|
||||
self.ctx
|
||||
)
|
||||
|
||||
self.pm = patch_audit.PatchAudit(self.ctx)
|
||||
self.am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
self.am.patch_audit = self.pm
|
||||
|
||||
def get_patch_audit_data(self):
|
||||
(patch_audit_data, _, _, _, _) = \
|
||||
self.am._get_audit_data(True, True, True, True, True)
|
||||
(patch_audit_data, _, _, _, _) = self.am._get_audit_data(
|
||||
True, True, True, True, True
|
||||
)
|
||||
# Convert to dict like what would happen calling via RPC
|
||||
patch_audit_data = patch_audit_data.to_dict()
|
||||
return patch_audit_data
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
def test_periodic_patch_audit_in_sync(self,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
def test_periodic_patch_audit_in_sync(
|
||||
self, mock_openstack_driver, mock_patching_client, mock_sysinv_client
|
||||
):
|
||||
mock_patching_client.side_effect = FakePatchingClientInSync
|
||||
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
patch_audit_data = self.get_patch_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
|
||||
for index, subcloud in enumerate(subclouds.keys(), start=2):
|
||||
subcloud_region = subclouds[subcloud]
|
||||
|
||||
patch_response = self.pm.subcloud_patch_audit(
|
||||
mock.MagicMock(), self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}", subcloud, subcloud_region, patch_audit_data
|
||||
mock.MagicMock(),
|
||||
self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}",
|
||||
subcloud,
|
||||
subcloud_region,
|
||||
patch_audit_data,
|
||||
)
|
||||
load_response = self.pm.subcloud_load_audit(
|
||||
self.mock_sysinv_client(subcloud_region), subcloud, patch_audit_data
|
||||
@ -283,29 +362,34 @@ class TestPatchAudit(base.DCManagerTestCase):
|
||||
self.assertEqual(patch_response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
self.assertEqual(load_response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
def test_periodic_patch_audit_out_of_sync(self,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
def test_periodic_patch_audit_out_of_sync(
|
||||
self, mock_openstack_driver, mock_patching_client, mock_sysinv_client
|
||||
):
|
||||
mock_patching_client.side_effect = FakePatchingClientOutOfSync
|
||||
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
|
||||
patch_audit_data = self.get_patch_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name'],
|
||||
base.SUBCLOUD_3['name']: base.SUBCLOUD_3['region_name'],
|
||||
base.SUBCLOUD_4['name']: base.SUBCLOUD_4['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
base.SUBCLOUD_3["name"]: base.SUBCLOUD_3["region_name"],
|
||||
base.SUBCLOUD_4["name"]: base.SUBCLOUD_4["region_name"],
|
||||
}
|
||||
for index, subcloud in enumerate(subclouds.keys(), start=2):
|
||||
subcloud_region = subclouds[subcloud]
|
||||
|
||||
patch_response = self.pm.subcloud_patch_audit(
|
||||
mock.MagicMock(), self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}", subcloud, subcloud_region, patch_audit_data
|
||||
mock.MagicMock(),
|
||||
self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}",
|
||||
subcloud,
|
||||
subcloud_region,
|
||||
patch_audit_data,
|
||||
)
|
||||
load_response = self.pm.subcloud_load_audit(
|
||||
self.mock_sysinv_client(subcloud_region), subcloud, patch_audit_data
|
||||
@ -317,27 +401,32 @@ class TestPatchAudit(base.DCManagerTestCase):
|
||||
self.assertEqual(patch_response, expected_patch_response)
|
||||
self.assertEqual(load_response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
def test_periodic_patch_audit_extra_patches(self,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
def test_periodic_patch_audit_extra_patches(
|
||||
self, mock_openstack_driver, mock_patching_client, mock_sysinv_client
|
||||
):
|
||||
mock_patching_client.side_effect = FakePatchingClientExtraPatches
|
||||
mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientOneLoad
|
||||
|
||||
patch_audit_data = self.get_patch_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for index, subcloud in enumerate(subclouds.keys(), start=2):
|
||||
subcloud_region = subclouds[subcloud]
|
||||
|
||||
patch_response = self.pm.subcloud_patch_audit(
|
||||
mock.MagicMock(), self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}", subcloud, subcloud_region, patch_audit_data
|
||||
mock.MagicMock(),
|
||||
self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}",
|
||||
subcloud,
|
||||
subcloud_region,
|
||||
patch_audit_data,
|
||||
)
|
||||
load_response = self.pm.subcloud_load_audit(
|
||||
self.mock_sysinv_client(subcloud_region), subcloud, patch_audit_data
|
||||
@ -346,30 +435,34 @@ class TestPatchAudit(base.DCManagerTestCase):
|
||||
self.assertEqual(patch_response, dccommon_consts.SYNC_STATUS_OUT_OF_SYNC)
|
||||
self.assertEqual(load_response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
def test_periodic_patch_audit_unmatched_software_version(
|
||||
self,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
self, mock_openstack_driver, mock_patching_client, mock_sysinv_client
|
||||
):
|
||||
mock_patching_client.side_effect = FakePatchingClientInSync
|
||||
mock_sysinv_client.side_effect = (
|
||||
FakeSysinvClientOneLoadUnmatchedSoftwareVersion)
|
||||
self.mock_sysinv_client.side_effect = \
|
||||
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUnmatchedSoftwareVersion
|
||||
self.mock_sysinv_client.side_effect = (
|
||||
FakeSysinvClientOneLoadUnmatchedSoftwareVersion
|
||||
)
|
||||
|
||||
patch_audit_data = self.get_patch_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for index, subcloud in enumerate(subclouds.keys(), start=2):
|
||||
subcloud_region = subclouds[subcloud]
|
||||
|
||||
patch_response = self.pm.subcloud_patch_audit(
|
||||
mock.MagicMock(), self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}", subcloud, subcloud_region, patch_audit_data
|
||||
mock.MagicMock(),
|
||||
self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}",
|
||||
subcloud,
|
||||
subcloud_region,
|
||||
patch_audit_data,
|
||||
)
|
||||
load_response = self.pm.subcloud_load_audit(
|
||||
self.mock_sysinv_client(subcloud_region), subcloud, patch_audit_data
|
||||
@ -383,30 +476,33 @@ class TestPatchAudit(base.DCManagerTestCase):
|
||||
self.assertEqual(patch_response, dccommon_consts.SYNC_STATUS_IN_SYNC)
|
||||
self.assertEqual(load_response, expected_load_response)
|
||||
|
||||
@mock.patch.object(patch_audit, 'SysinvClient')
|
||||
@mock.patch.object(patch_audit, 'PatchingClient')
|
||||
@mock.patch.object(patch_audit, 'OpenStackDriver')
|
||||
@mock.patch.object(patch_audit, "SysinvClient")
|
||||
@mock.patch.object(patch_audit, "PatchingClient")
|
||||
@mock.patch.object(patch_audit, "OpenStackDriver")
|
||||
def test_periodic_patch_audit_upgrade_in_progress(
|
||||
self,
|
||||
mock_openstack_driver,
|
||||
mock_patching_client,
|
||||
mock_sysinv_client):
|
||||
self, mock_openstack_driver, mock_patching_client, mock_sysinv_client
|
||||
):
|
||||
|
||||
mock_patching_client.side_effect = FakePatchingClientInSync
|
||||
mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUpgradeInProgress
|
||||
self.mock_sysinv_client.side_effect = \
|
||||
FakeSysinvClientOneLoadUpgradeInProgress
|
||||
self.mock_sysinv_client.side_effect = FakeSysinvClientOneLoadUpgradeInProgress
|
||||
|
||||
patch_audit_data = self.get_patch_audit_data()
|
||||
|
||||
subclouds = {base.SUBCLOUD_1['name']: base.SUBCLOUD_1['region_name'],
|
||||
base.SUBCLOUD_2['name']: base.SUBCLOUD_2['region_name']}
|
||||
subclouds = {
|
||||
base.SUBCLOUD_1["name"]: base.SUBCLOUD_1["region_name"],
|
||||
base.SUBCLOUD_2["name"]: base.SUBCLOUD_2["region_name"],
|
||||
}
|
||||
for index, subcloud in enumerate(subclouds.keys(), start=2):
|
||||
subcloud_region = subclouds[subcloud]
|
||||
|
||||
patch_response = self.pm.subcloud_patch_audit(
|
||||
mock.MagicMock(), self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}", subcloud, subcloud_region, patch_audit_data
|
||||
mock.MagicMock(),
|
||||
self.mock_sysinv_client(subcloud_region),
|
||||
f"192.168.1.{index}",
|
||||
subcloud,
|
||||
subcloud_region,
|
||||
patch_audit_data,
|
||||
)
|
||||
load_response = self.pm.subcloud_load_audit(
|
||||
self.mock_sysinv_client(subcloud_region), subcloud, patch_audit_data
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -39,43 +39,56 @@ class ManagerRpcAuditAPITestCase(base.DCManagerTestCase):
|
||||
transport = messaging.get_transport()
|
||||
transport._send = mock.Mock()
|
||||
|
||||
fake_endpoints = {'service': 'fake_ip', 'service2': 'other_fake_ip'}
|
||||
fake_endpoints = {"service": "fake_ip", "service2": "other_fake_ip"}
|
||||
|
||||
rpcapi.update_subcloud_endpoints(
|
||||
self.context, 'subcloud', fake_endpoints)
|
||||
rpcapi.update_subcloud_endpoints(self.context, "subcloud", fake_endpoints)
|
||||
|
||||
exp_msg = {'method': 'update_subcloud_endpoints',
|
||||
'args': {'subcloud_name': 'subcloud',
|
||||
'endpoints': fake_endpoints},
|
||||
'version': '1.0'}
|
||||
exp_msg = {
|
||||
"method": "update_subcloud_endpoints",
|
||||
"args": {"subcloud_name": "subcloud", "endpoints": fake_endpoints},
|
||||
"version": "1.0",
|
||||
}
|
||||
|
||||
# With fanout a new target is created
|
||||
new_target = oslo_messaging.Target(
|
||||
fanout=True, version=rpcapi.BASE_RPC_API_VERSION,
|
||||
topic=consts.TOPIC_DC_MANAGER_AUDIT_WORKER)
|
||||
transport._send.assert_called_with(new_target,
|
||||
mock.ANY,
|
||||
exp_msg,
|
||||
retry=None,
|
||||
transport_options=None)
|
||||
fanout=True,
|
||||
version=rpcapi.BASE_RPC_API_VERSION,
|
||||
topic=consts.TOPIC_DC_MANAGER_AUDIT_WORKER,
|
||||
)
|
||||
transport._send.assert_called_with(
|
||||
new_target, mock.ANY, exp_msg, retry=None, transport_options=None
|
||||
)
|
||||
|
||||
# Without fanout the target is the same
|
||||
rpcapi.audit_subclouds(
|
||||
self.context, ['subcloud1', 'subcloud2'],
|
||||
True, False, True, True, False, False)
|
||||
self.context,
|
||||
["subcloud1", "subcloud2"],
|
||||
True,
|
||||
False,
|
||||
True,
|
||||
True,
|
||||
False,
|
||||
False,
|
||||
)
|
||||
|
||||
exp_msg2 = {'method': 'audit_subclouds',
|
||||
'args': {'subcloud_ids': ['subcloud1', 'subcloud2'],
|
||||
'patch_audit_data': True,
|
||||
'firmware_audit_data': False,
|
||||
'kubernetes_audit_data': True,
|
||||
'do_openstack_audit': True,
|
||||
'kube_rootca_update_audit_data': False,
|
||||
'software_audit_data': False},
|
||||
'version': '1.0'}
|
||||
exp_msg2 = {
|
||||
"method": "audit_subclouds",
|
||||
"args": {
|
||||
"subcloud_ids": ["subcloud1", "subcloud2"],
|
||||
"patch_audit_data": True,
|
||||
"firmware_audit_data": False,
|
||||
"kubernetes_audit_data": True,
|
||||
"do_openstack_audit": True,
|
||||
"kube_rootca_update_audit_data": False,
|
||||
"software_audit_data": False,
|
||||
},
|
||||
"version": "1.0",
|
||||
}
|
||||
|
||||
transport._send.assert_called_with(rpcapi._client.target,
|
||||
mock.ANY,
|
||||
exp_msg2,
|
||||
retry=None,
|
||||
transport_options=None)
|
||||
transport._send.assert_called_with(
|
||||
rpcapi._client.target,
|
||||
mock.ANY,
|
||||
exp_msg2,
|
||||
retry=None,
|
||||
transport_options=None,
|
||||
)
|
||||
|
@ -28,15 +28,14 @@ class TestDCManagerAuditService(base.DCManagerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestDCManagerAuditService, self).setUp()
|
||||
self.tenant_id = 'fake_admin'
|
||||
self.tenant_id = "fake_admin"
|
||||
self.thm = scheduler.ThreadGroupManager()
|
||||
self.context = utils.dummy_context(user='test_user',
|
||||
tenant=self.tenant_id)
|
||||
self.context = utils.dummy_context(user="test_user", tenant=self.tenant_id)
|
||||
self.service_obj = service.DCManagerAuditService()
|
||||
|
||||
def test_init(self):
|
||||
self.assertEqual(self.service_obj.host, 'localhost')
|
||||
self.assertEqual(self.service_obj.topic, 'dcmanager-audit')
|
||||
self.assertEqual(self.service_obj.host, "localhost")
|
||||
self.assertEqual(self.service_obj.topic, "dcmanager-audit")
|
||||
|
||||
def test_init_tgm(self):
|
||||
self.service_obj.init_tgm()
|
||||
|
@ -24,7 +24,7 @@ from dcmanager.common import consts
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.tests import base
|
||||
|
||||
sys.modules['fm_core'] = mock.Mock()
|
||||
sys.modules["fm_core"] = mock.Mock()
|
||||
|
||||
|
||||
class FakeAuditWorkerAPI(object):
|
||||
@ -60,8 +60,17 @@ class FakeKubeRootcaUpdateAudit(object):
|
||||
|
||||
|
||||
class FakeServiceGroup(object):
|
||||
def __init__(self, status, desired_state, service_group_name, uuid,
|
||||
node_name, state, condition, name):
|
||||
def __init__(
|
||||
self,
|
||||
status,
|
||||
desired_state,
|
||||
service_group_name,
|
||||
uuid,
|
||||
node_name,
|
||||
state,
|
||||
condition,
|
||||
name,
|
||||
):
|
||||
self.status = status
|
||||
self.desired_state = desired_state
|
||||
self.service_group_name = service_group_name
|
||||
@ -73,8 +82,9 @@ class FakeServiceGroup(object):
|
||||
|
||||
|
||||
class FakeApplication(object):
|
||||
def __init__(self, status, name, manifest_name, active, progress,
|
||||
app_version, manifest_file):
|
||||
def __init__(
|
||||
self, status, name, manifest_name, active, progress, app_version, manifest_file
|
||||
):
|
||||
self.status = status
|
||||
self.name = name
|
||||
self.manifest_name = manifest_name
|
||||
@ -85,103 +95,127 @@ class FakeApplication(object):
|
||||
|
||||
|
||||
FAKE_SERVICE_GROUPS = [
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"distributed-cloud-services",
|
||||
"b00fd252-5bd7-44b5-bbde-7d525e7125c7",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"storage-monitoring-services",
|
||||
"5a14a1d1-dac1-48b0-9598-3702e0b0338a",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"storage-services",
|
||||
"5cbfa903-379f-4329-81b4-2e88acdfa215",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"web-services",
|
||||
"42829858-008f-4931-94e1-4b86fe31ce3c",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"directory-services",
|
||||
"74225295-2601-4376-a52c-7cbd149146f6",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"patching-services",
|
||||
"6870c079-e1c3-4402-b88b-63a5ef06a77a",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"vim-services",
|
||||
"d8367a52-316e-418b-9211-a13331e073ef",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"cloud-services",
|
||||
"12682dc0-cef5-427a-b1a6-145cf950b49c",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"controller-services",
|
||||
"daac63fb-24b3-4cd1-b895-260a32e356ae",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup("",
|
||||
"active",
|
||||
"oam-services",
|
||||
"4b66913d-98ba-4a4a-86c3-168625f629eb",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller"),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"distributed-cloud-services",
|
||||
"b00fd252-5bd7-44b5-bbde-7d525e7125c7",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"storage-monitoring-services",
|
||||
"5a14a1d1-dac1-48b0-9598-3702e0b0338a",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"storage-services",
|
||||
"5cbfa903-379f-4329-81b4-2e88acdfa215",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"web-services",
|
||||
"42829858-008f-4931-94e1-4b86fe31ce3c",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"directory-services",
|
||||
"74225295-2601-4376-a52c-7cbd149146f6",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"patching-services",
|
||||
"6870c079-e1c3-4402-b88b-63a5ef06a77a",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"vim-services",
|
||||
"d8367a52-316e-418b-9211-a13331e073ef",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"cloud-services",
|
||||
"12682dc0-cef5-427a-b1a6-145cf950b49c",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"controller-services",
|
||||
"daac63fb-24b3-4cd1-b895-260a32e356ae",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
FakeServiceGroup(
|
||||
"",
|
||||
"active",
|
||||
"oam-services",
|
||||
"4b66913d-98ba-4a4a-86c3-168625f629eb",
|
||||
"controller-0",
|
||||
"active",
|
||||
"",
|
||||
"controller",
|
||||
),
|
||||
]
|
||||
|
||||
FAKE_APPLICATIONS = [
|
||||
FakeApplication("applied",
|
||||
"platform-integ-apps",
|
||||
"platform-integration-manifest",
|
||||
True,
|
||||
"completed",
|
||||
"1.0-8",
|
||||
"manifest.yaml"),
|
||||
FakeApplication("applied",
|
||||
"stx-openstack",
|
||||
"stx-openstack-manifest",
|
||||
True,
|
||||
"completed",
|
||||
"1.0-8",
|
||||
"manifest.yaml"),
|
||||
FakeApplication(
|
||||
"applied",
|
||||
"platform-integ-apps",
|
||||
"platform-integration-manifest",
|
||||
True,
|
||||
"completed",
|
||||
"1.0-8",
|
||||
"manifest.yaml",
|
||||
),
|
||||
FakeApplication(
|
||||
"applied",
|
||||
"stx-openstack",
|
||||
"stx-openstack-manifest",
|
||||
True,
|
||||
"completed",
|
||||
"1.0-8",
|
||||
"manifest.yaml",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -207,7 +241,7 @@ class FakeFmClient(object):
|
||||
class FakeOpenStackDriver(object):
|
||||
|
||||
def __init__(self, region_name):
|
||||
self.sysinv_client = FakeSysinvClient('fake_region', 'fake_session')
|
||||
self.sysinv_client = FakeSysinvClient("fake_region", "fake_session")
|
||||
self.fm_client = FakeFmClient()
|
||||
|
||||
|
||||
@ -217,70 +251,66 @@ class TestAuditManager(base.DCManagerTestCase):
|
||||
|
||||
# Mock the Audit Worker API
|
||||
self.fake_audit_worker_api = FakeAuditWorkerAPI()
|
||||
p = mock.patch('dcmanager.audit.rpcapi.ManagerAuditWorkerClient')
|
||||
p = mock.patch("dcmanager.audit.rpcapi.ManagerAuditWorkerClient")
|
||||
self.mock_audit_worker_api = p.start()
|
||||
self.mock_audit_worker_api.return_value = self.fake_audit_worker_api
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the context
|
||||
p = mock.patch.object(subcloud_audit_manager, 'context')
|
||||
p = mock.patch.object(subcloud_audit_manager, "context")
|
||||
self.mock_context = p.start()
|
||||
self.mock_context.get_admin_context.return_value = self.ctx
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock patch audit
|
||||
self.fake_patch_audit = FakePatchAudit()
|
||||
p = mock.patch.object(subcloud_audit_manager,
|
||||
'patch_audit')
|
||||
p = mock.patch.object(subcloud_audit_manager, "patch_audit")
|
||||
self.mock_patch_audit = p.start()
|
||||
self.mock_patch_audit.PatchAudit.return_value = \
|
||||
self.fake_patch_audit
|
||||
self.mock_patch_audit.PatchAudit.return_value = self.fake_patch_audit
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock firmware audit
|
||||
self.fake_firmware_audit = FakeFirmwareAudit()
|
||||
p = mock.patch.object(subcloud_audit_manager,
|
||||
'firmware_audit')
|
||||
p = mock.patch.object(subcloud_audit_manager, "firmware_audit")
|
||||
self.mock_firmware_audit = p.start()
|
||||
self.mock_firmware_audit.FirmwareAudit.return_value = \
|
||||
self.fake_firmware_audit
|
||||
self.mock_firmware_audit.FirmwareAudit.return_value = self.fake_firmware_audit
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock kubernetes audit
|
||||
self.fake_kubernetes_audit = FakeKubernetesAudit()
|
||||
p = mock.patch.object(subcloud_audit_manager,
|
||||
'kubernetes_audit')
|
||||
p = mock.patch.object(subcloud_audit_manager, "kubernetes_audit")
|
||||
self.mock_kubernetes_audit = p.start()
|
||||
self.mock_kubernetes_audit.KubernetesAudit.return_value = \
|
||||
self.mock_kubernetes_audit.KubernetesAudit.return_value = (
|
||||
self.fake_kubernetes_audit
|
||||
)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock kube rootca update audit
|
||||
self.fake_kube_rootca_update_audit = FakeKubeRootcaUpdateAudit()
|
||||
p = mock.patch.object(subcloud_audit_manager,
|
||||
'kube_rootca_update_audit')
|
||||
p = mock.patch.object(subcloud_audit_manager, "kube_rootca_update_audit")
|
||||
self.mock_kube_rootca_update_audit = p.start()
|
||||
self.mock_kubernetes_audit.KubeRootcaUpdateAudit.return_value = \
|
||||
self.mock_kubernetes_audit.KubeRootcaUpdateAudit.return_value = (
|
||||
self.fake_kube_rootca_update_audit
|
||||
)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
@staticmethod
|
||||
def create_subcloud_static(ctxt, **kwargs):
|
||||
values = {
|
||||
'name': "subcloud1",
|
||||
'description': "This is a subcloud",
|
||||
'location': "This is the location of the subcloud",
|
||||
'software_version': "10.04",
|
||||
'management_subnet': "192.168.101.0/24",
|
||||
'management_gateway_ip': "192.168.101.1",
|
||||
'management_start_ip': "192.168.101.2",
|
||||
'management_end_ip': "192.168.101.50",
|
||||
'systemcontroller_gateway_ip': "192.168.204.101",
|
||||
'deploy_status': "not-deployed",
|
||||
'error_description': 'No errors present',
|
||||
'region_name': base.SUBCLOUD_1['region_name'],
|
||||
'openstack_installed': False,
|
||||
'group_id': 1,
|
||||
"name": "subcloud1",
|
||||
"description": "This is a subcloud",
|
||||
"location": "This is the location of the subcloud",
|
||||
"software_version": "10.04",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_ip": "192.168.101.1",
|
||||
"management_start_ip": "192.168.101.2",
|
||||
"management_end_ip": "192.168.101.50",
|
||||
"systemcontroller_gateway_ip": "192.168.204.101",
|
||||
"deploy_status": "not-deployed",
|
||||
"error_description": "No errors present",
|
||||
"region_name": base.SUBCLOUD_1["region_name"],
|
||||
"openstack_installed": False,
|
||||
"group_id": 1,
|
||||
}
|
||||
values.update(kwargs)
|
||||
return db_api.subcloud_create(ctxt, **values)
|
||||
@ -288,24 +318,25 @@ class TestAuditManager(base.DCManagerTestCase):
|
||||
def test_init(self):
|
||||
am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
self.assertIsNotNone(am)
|
||||
self.assertEqual('subcloud_audit_manager', am.service_name)
|
||||
self.assertEqual('localhost', am.host)
|
||||
self.assertEqual("subcloud_audit_manager", am.service_name)
|
||||
self.assertEqual("localhost", am.host)
|
||||
self.assertEqual(self.ctx, am.context)
|
||||
|
||||
def test_periodic_subcloud_audit(self):
|
||||
am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
am._periodic_subcloud_audit_loop()
|
||||
|
||||
@mock.patch.object(subcloud_audit_manager.db_api,
|
||||
'subcloud_audits_bulk_end_audit')
|
||||
@mock.patch.object(subcloud_audit_manager.db_api, "subcloud_audits_bulk_end_audit")
|
||||
def test_skip_subcloud_audit(self, mock_subcloud_audits_bulk_end_audit):
|
||||
subcloud = self.create_subcloud_static(self.ctx)
|
||||
am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
subcloud = db_api.subcloud_update(
|
||||
self.ctx, subcloud.id,
|
||||
management_state='unmanaged',
|
||||
self.ctx,
|
||||
subcloud.id,
|
||||
management_state="unmanaged",
|
||||
availability_status=dccommon_consts.AVAILABILITY_OFFLINE,
|
||||
deploy_status=consts.DEPLOY_STATE_CREATED)
|
||||
deploy_status=consts.DEPLOY_STATE_CREATED,
|
||||
)
|
||||
am._periodic_subcloud_audit_loop()
|
||||
# Verify that the audit is skipped
|
||||
mock_subcloud_audits_bulk_end_audit.assert_called_once()
|
||||
@ -316,25 +347,27 @@ class TestAuditManager(base.DCManagerTestCase):
|
||||
am.trigger_subcloud_audits(self.ctx, subcloud.id, None)
|
||||
# Subaudits should be requested.
|
||||
result = db_api.subcloud_audits_get(self.ctx, subcloud.id)
|
||||
self.assertEqual(result['patch_audit_requested'], True)
|
||||
self.assertEqual(result['firmware_audit_requested'], True)
|
||||
self.assertEqual(result['load_audit_requested'], True)
|
||||
self.assertEqual(result['kubernetes_audit_requested'], True)
|
||||
self.assertEqual(result['kube_rootca_update_audit_requested'], True)
|
||||
self.assertEqual(result["patch_audit_requested"], True)
|
||||
self.assertEqual(result["firmware_audit_requested"], True)
|
||||
self.assertEqual(result["load_audit_requested"], True)
|
||||
self.assertEqual(result["kubernetes_audit_requested"], True)
|
||||
self.assertEqual(result["kube_rootca_update_audit_requested"], True)
|
||||
|
||||
def test_audit_one_subcloud_exclude_endpoints(self):
|
||||
subcloud = self.create_subcloud_static(self.ctx)
|
||||
am = subcloud_audit_manager.SubcloudAuditManager()
|
||||
exclude_endpoints = [dccommon_consts.ENDPOINT_TYPE_PATCHING,
|
||||
dccommon_consts.ENDPOINT_TYPE_LOAD]
|
||||
exclude_endpoints = [
|
||||
dccommon_consts.ENDPOINT_TYPE_PATCHING,
|
||||
dccommon_consts.ENDPOINT_TYPE_LOAD,
|
||||
]
|
||||
am.trigger_subcloud_audits(self.ctx, subcloud.id, exclude_endpoints)
|
||||
# Verify subaudits be requested.
|
||||
result = db_api.subcloud_audits_get(self.ctx, subcloud.id)
|
||||
self.assertEqual(result['patch_audit_requested'], False)
|
||||
self.assertEqual(result['firmware_audit_requested'], True)
|
||||
self.assertEqual(result['load_audit_requested'], False)
|
||||
self.assertEqual(result['kubernetes_audit_requested'], True)
|
||||
self.assertEqual(result['kube_rootca_update_audit_requested'], True)
|
||||
self.assertEqual(result["patch_audit_requested"], False)
|
||||
self.assertEqual(result["firmware_audit_requested"], True)
|
||||
self.assertEqual(result["load_audit_requested"], False)
|
||||
self.assertEqual(result["kubernetes_audit_requested"], True)
|
||||
self.assertEqual(result["kube_rootca_update_audit_requested"], True)
|
||||
|
||||
def test_trigger_load_audit(self):
|
||||
subcloud = self.create_subcloud_static(self.ctx)
|
||||
@ -342,8 +375,8 @@ class TestAuditManager(base.DCManagerTestCase):
|
||||
am.trigger_load_audit(self.ctx)
|
||||
# Load audit should be requested.
|
||||
result = db_api.subcloud_audits_get(self.ctx, subcloud.id)
|
||||
self.assertEqual(result['patch_audit_requested'], False)
|
||||
self.assertEqual(result['load_audit_requested'], True)
|
||||
self.assertEqual(result["patch_audit_requested"], False)
|
||||
self.assertEqual(result["load_audit_requested"], True)
|
||||
|
||||
def test_trigger_one_subcloud_patch_load_audits(self):
|
||||
subcloud = self.create_subcloud_static(self.ctx)
|
||||
@ -351,9 +384,9 @@ class TestAuditManager(base.DCManagerTestCase):
|
||||
am.trigger_subcloud_patch_load_audits(self.ctx, subcloud.id)
|
||||
# Subcloud patch and load audits should be requested.
|
||||
result = db_api.subcloud_audits_get(self.ctx, subcloud.id)
|
||||
self.assertEqual(result['patch_audit_requested'], True)
|
||||
self.assertEqual(result['load_audit_requested'], True)
|
||||
self.assertEqual(result["patch_audit_requested"], True)
|
||||
self.assertEqual(result["load_audit_requested"], True)
|
||||
# Other audits should not be requested
|
||||
self.assertEqual(result['firmware_audit_requested'], False)
|
||||
self.assertEqual(result['kubernetes_audit_requested'], False)
|
||||
self.assertEqual(result['kube_rootca_update_audit_requested'], False)
|
||||
self.assertEqual(result["firmware_audit_requested"], False)
|
||||
self.assertEqual(result["kubernetes_audit_requested"], False)
|
||||
self.assertEqual(result["kube_rootca_update_audit_requested"], False)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,6 +5,6 @@
|
||||
#
|
||||
|
||||
# Content-type
|
||||
TEXT_PLAIN = 'text/plain'
|
||||
TEXT_HTML = 'text/html'
|
||||
APPLICATION_JSON = 'application/json'
|
||||
TEXT_PLAIN = "text/plain"
|
||||
TEXT_HTML = "text/html"
|
||||
APPLICATION_JSON = "application/json"
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -9,30 +9,32 @@ from dcmanager.db.sqlalchemy import api as db_api
|
||||
|
||||
|
||||
def create_fake_strategy(
|
||||
ctxt,
|
||||
strategy_type,
|
||||
subcloud_apply_type=consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
|
||||
state=consts.SW_UPDATE_STATE_INITIAL,
|
||||
max_parallel_subclouds=2,
|
||||
stop_on_failure=True,
|
||||
extra_args=None):
|
||||
ctxt,
|
||||
strategy_type,
|
||||
subcloud_apply_type=consts.SUBCLOUD_APPLY_TYPE_PARALLEL,
|
||||
state=consts.SW_UPDATE_STATE_INITIAL,
|
||||
max_parallel_subclouds=2,
|
||||
stop_on_failure=True,
|
||||
extra_args=None,
|
||||
):
|
||||
values = {
|
||||
"type": strategy_type,
|
||||
"subcloud_apply_type": subcloud_apply_type,
|
||||
"max_parallel_subclouds": max_parallel_subclouds,
|
||||
"stop_on_failure": stop_on_failure,
|
||||
"state": state,
|
||||
"extra_args": extra_args
|
||||
"extra_args": extra_args,
|
||||
}
|
||||
return db_api.sw_update_strategy_create(ctxt, **values)
|
||||
|
||||
|
||||
def create_fake_strategy_step(
|
||||
ctxt,
|
||||
state=consts.STRATEGY_STATE_INITIAL,
|
||||
subcloud_id=1,
|
||||
stage=1,
|
||||
details="Dummy details"):
|
||||
ctxt,
|
||||
state=consts.STRATEGY_STATE_INITIAL,
|
||||
subcloud_id=1,
|
||||
stage=1,
|
||||
details="Dummy details",
|
||||
):
|
||||
values = {
|
||||
"subcloud_id": subcloud_id,
|
||||
"stage": stage,
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020-2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -13,40 +13,46 @@ from dcmanager.tests import base
|
||||
from dcmanager.tests import utils
|
||||
|
||||
FAKE_TENANT = utils.UUID1
|
||||
FAKE_ID = '1'
|
||||
FAKE_URL = '/v1.0/subclouds'
|
||||
WRONG_URL = '/v1.0/wrong'
|
||||
FAKE_ID = "1"
|
||||
FAKE_URL = "/v1.0/subclouds"
|
||||
WRONG_URL = "/v1.0/wrong"
|
||||
|
||||
FAKE_SOFTWARE_VERSION = '18.03'
|
||||
FAKE_SOFTWARE_VERSION = "18.03"
|
||||
|
||||
FAKE_HEADERS = {'X-Tenant-Id': FAKE_TENANT, 'X_ROLE': 'admin,member,reader',
|
||||
'X-Identity-Status': 'Confirmed', 'X-Project-Name': 'admin'}
|
||||
FAKE_HEADERS = {
|
||||
"X-Tenant-Id": FAKE_TENANT,
|
||||
"X_ROLE": "admin,member,reader",
|
||||
"X-Identity-Status": "Confirmed",
|
||||
"X-Project-Name": "admin",
|
||||
}
|
||||
|
||||
FAKE_SUBCLOUD_DATA = {"id": FAKE_ID,
|
||||
"name": "subcloud1",
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
"system_mode": "duplex",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
"deploy_status": consts.DEPLOY_STATE_DONE,
|
||||
'error_description': consts.ERROR_DESC_EMPTY,
|
||||
'region_name': base.SUBCLOUD_1['region_name'],
|
||||
"external_oam_subnet": "10.10.10.0/24",
|
||||
"external_oam_gateway_address": "10.10.10.1",
|
||||
"external_oam_floating_address": "10.10.10.12",
|
||||
"availability-status": "disabled"}
|
||||
FAKE_SUBCLOUD_DATA = {
|
||||
"id": FAKE_ID,
|
||||
"name": "subcloud1",
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
"system_mode": "duplex",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
"deploy_status": consts.DEPLOY_STATE_DONE,
|
||||
"error_description": consts.ERROR_DESC_EMPTY,
|
||||
"region_name": base.SUBCLOUD_1["region_name"],
|
||||
"external_oam_subnet": "10.10.10.0/24",
|
||||
"external_oam_gateway_address": "10.10.10.1",
|
||||
"external_oam_floating_address": "10.10.10.12",
|
||||
"availability-status": "disabled",
|
||||
}
|
||||
|
||||
FAKE_BOOTSTRAP_VALUE = {
|
||||
'bootstrap-address': '10.10.10.12',
|
||||
'sysadmin_password': base64.b64encode('testpass'.encode("utf-8"))
|
||||
"bootstrap-address": "10.10.10.12",
|
||||
"sysadmin_password": base64.b64encode("testpass".encode("utf-8")),
|
||||
}
|
||||
|
||||
FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD = {
|
||||
'bootstrap-address': '10.10.10.12',
|
||||
"bootstrap-address": "10.10.10.12",
|
||||
"system_mode": "simplex",
|
||||
"name": "subcloud1",
|
||||
"description": "subcloud1 description",
|
||||
@ -59,13 +65,12 @@ FAKE_SUBCLOUD_BOOTSTRAP_PAYLOAD = {
|
||||
"external_oam_subnet": "10.10.10.0/24",
|
||||
"external_oam_gateway_address": "10.10.10.1",
|
||||
"external_oam_floating_address": "10.10.10.12",
|
||||
'sysadmin_password':
|
||||
(base64.b64encode('testpass'.encode("utf-8"))).decode('ascii'),
|
||||
"sysadmin_password": (base64.b64encode("testpass".encode("utf-8"))).decode("ascii"),
|
||||
}
|
||||
|
||||
FAKE_BOOTSTRAP_FILE_DATA = {
|
||||
"system_mode": "simplex",
|
||||
"name": "fake subcloud1",
|
||||
"name": "fake_subcloud1",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
@ -116,13 +121,16 @@ FAKE_SUBCLOUD_INSTALL_VALUES_WITH_PERSISTENT_SIZE = {
|
||||
"persistent_size": 40000,
|
||||
}
|
||||
|
||||
FAKE_UPGRADES_METADATA = '''
|
||||
FAKE_UPGRADES_METADATA = (
|
||||
"""
|
||||
<build>\n<version>0.1</version>\n<supported_upgrades>
|
||||
\n<upgrade>\n<version>%s</version>\n</upgrade>
|
||||
\n<upgrade>\n<version>21.12</version>\n</upgrade>
|
||||
\n<upgrade>\n<version>22.12</version>\n</upgrade>
|
||||
\n</supported_upgrades>\n</build>
|
||||
''' % FAKE_SOFTWARE_VERSION
|
||||
"""
|
||||
% FAKE_SOFTWARE_VERSION
|
||||
)
|
||||
|
||||
|
||||
def create_fake_subcloud(ctxt, **kwargs):
|
||||
@ -130,18 +138,18 @@ def create_fake_subcloud(ctxt, **kwargs):
|
||||
"name": "subcloud1",
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
'software_version': FAKE_SOFTWARE_VERSION,
|
||||
"software_version": FAKE_SOFTWARE_VERSION,
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_ip": "192.168.101.1",
|
||||
"management_start_ip": "192.168.101.2",
|
||||
"management_end_ip": "192.168.101.50",
|
||||
"systemcontroller_gateway_ip": "192.168.204.101",
|
||||
'deploy_status': consts.DEPLOY_STATE_DONE,
|
||||
'error_description': consts.ERROR_DESC_EMPTY,
|
||||
'region_name': base.SUBCLOUD_1['region_name'],
|
||||
'openstack_installed': False,
|
||||
'group_id': 1,
|
||||
'data_install': 'data from install',
|
||||
"deploy_status": consts.DEPLOY_STATE_DONE,
|
||||
"error_description": consts.ERROR_DESC_EMPTY,
|
||||
"region_name": base.SUBCLOUD_1["region_name"],
|
||||
"openstack_installed": False,
|
||||
"group_id": 1,
|
||||
"data_install": "data from install",
|
||||
}
|
||||
values.update(kwargs)
|
||||
return db_api.subcloud_create(ctxt, **values)
|
||||
|
@ -44,50 +44,62 @@ class Subcloud(object):
|
||||
self.data_upgrade = ""
|
||||
|
||||
|
||||
@mock.patch.object(os, 'listdir')
|
||||
@mock.patch.object(os, "listdir")
|
||||
def test_check_deploy_files_in_alternate_location_with_all_file_exists(
|
||||
self, mock_os_isdir, mock_os_listdir):
|
||||
self, mock_os_isdir, mock_os_listdir
|
||||
):
|
||||
payload = {}
|
||||
mock_os_isdir.return_value = True
|
||||
mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz',
|
||||
'deploy-overrides-fake-overrides-subcloud.yaml',
|
||||
'deploy-playbook-fake-deployment-manager.yaml']
|
||||
mock_os_listdir.return_value = [
|
||||
"deploy-chart-fake-deployment-manager.tgz",
|
||||
"deploy-overrides-fake-overrides-subcloud.yaml",
|
||||
"deploy-playbook-fake-deployment-manager.yaml",
|
||||
]
|
||||
|
||||
response = self.check_deploy_files_in_alternate_location(payload)
|
||||
self.assertEqual(response, True)
|
||||
|
||||
|
||||
def test_check_deploy_files_in_alternate_location_with_deploy_chart_not_exists(
|
||||
self, mock_os_isdir, mock_os_listdir):
|
||||
self, mock_os_isdir, mock_os_listdir
|
||||
):
|
||||
payload = {}
|
||||
mock_os_isdir.return_value = True
|
||||
mock_os_listdir.return_value = ['deploy-chart-fake.tgz',
|
||||
'deploy-overrides-fake-overrides-subcloud.yaml',
|
||||
'deploy-playbook-fake-deployment-manager.yaml']
|
||||
mock_os_listdir.return_value = [
|
||||
"deploy-chart-fake.tgz",
|
||||
"deploy-overrides-fake-overrides-subcloud.yaml",
|
||||
"deploy-playbook-fake-deployment-manager.yaml",
|
||||
]
|
||||
|
||||
response = self.check_deploy_files_in_alternate_location(payload)
|
||||
self.assertEqual(response, False)
|
||||
|
||||
|
||||
def test_check_deploy_files_in_alternate_location_with_deploy_overrides_not_exists(
|
||||
self, mock_os_isdir, mock_os_listdir):
|
||||
self, mock_os_isdir, mock_os_listdir
|
||||
):
|
||||
payload = {}
|
||||
mock_os_isdir.return_value = True
|
||||
mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz',
|
||||
'deploy-overrides.yaml',
|
||||
'deploy-playbook-fake-deployment-manager.yaml']
|
||||
mock_os_listdir.return_value = [
|
||||
"deploy-chart-fake-deployment-manager.tgz",
|
||||
"deploy-overrides.yaml",
|
||||
"deploy-playbook-fake-deployment-manager.yaml",
|
||||
]
|
||||
|
||||
response = self.check_deploy_files_in_alternate_location(payload)
|
||||
self.assertEqual(response, False)
|
||||
|
||||
|
||||
def test_check_deploy_files_in_alternate_location_with_deploy_playbook_not_exists(
|
||||
self, mock_os_isdir, mock_os_listdir):
|
||||
self, mock_os_isdir, mock_os_listdir
|
||||
):
|
||||
payload = {}
|
||||
mock_os_isdir.return_value = True
|
||||
mock_os_listdir.return_value = ['deploy-chart-fake-deployment-manager.tgz',
|
||||
'deploy-overrides-fake-overrides-subcloud.yaml',
|
||||
'deploy-playbook.yaml']
|
||||
mock_os_listdir.return_value = [
|
||||
"deploy-chart-fake-deployment-manager.tgz",
|
||||
"deploy-overrides-fake-overrides-subcloud.yaml",
|
||||
"deploy-playbook.yaml",
|
||||
]
|
||||
|
||||
response = self.check_deploy_files_in_alternate_location(payload)
|
||||
self.assertEqual(response, False)
|
||||
@ -95,23 +107,19 @@ def test_check_deploy_files_in_alternate_location_with_deploy_playbook_not_exist
|
||||
|
||||
def test_get_config_file_path(self):
|
||||
bootstrap_file = psd_common.get_config_file_path("subcloud1")
|
||||
install_values = psd_common.get_config_file_path(
|
||||
"subcloud1", "install_values"
|
||||
)
|
||||
deploy_config = psd_common.get_config_file_path(
|
||||
"subcloud1", consts.DEPLOY_CONFIG
|
||||
)
|
||||
install_values = psd_common.get_config_file_path("subcloud1", "install_values")
|
||||
deploy_config = psd_common.get_config_file_path("subcloud1", consts.DEPLOY_CONFIG)
|
||||
|
||||
self.assertEqual(
|
||||
bootstrap_file, f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1.yml"
|
||||
)
|
||||
self.assertEqual(
|
||||
install_values,
|
||||
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml"
|
||||
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1/install_values.yml",
|
||||
)
|
||||
self.assertEqual(
|
||||
deploy_config,
|
||||
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml"
|
||||
f"{dccommon_consts.ANSIBLE_OVERRIDES_PATH}/subcloud1_deploy_config.yml",
|
||||
)
|
||||
|
||||
|
||||
@ -132,24 +140,22 @@ def test_format_ip_address(self):
|
||||
|
||||
fake_payload[consts.INSTALL_VALUES] = {}
|
||||
for k, v in good_values.items():
|
||||
fake_payload[consts.INSTALL_VALUES]['bmc_address'] = k
|
||||
fake_payload[consts.INSTALL_VALUES]["bmc_address"] = k
|
||||
psd_common.format_ip_address(fake_payload)
|
||||
self.assertEqual(fake_payload[consts.INSTALL_VALUES]['bmc_address'], v)
|
||||
self.assertEqual(fake_payload[consts.INSTALL_VALUES]["bmc_address"], v)
|
||||
|
||||
fake_payload['othervalues1'] = 'othervalues1'
|
||||
fake_payload[consts.INSTALL_VALUES]['othervalues2'] = 'othervalues2'
|
||||
fake_payload["othervalues1"] = "othervalues1"
|
||||
fake_payload[consts.INSTALL_VALUES]["othervalues2"] = "othervalues2"
|
||||
psd_common.format_ip_address(fake_payload)
|
||||
self.assertEqual(fake_payload['othervalues1'], 'othervalues1')
|
||||
self.assertEqual(fake_payload["othervalues1"], "othervalues1")
|
||||
self.assertEqual(
|
||||
fake_payload[consts.INSTALL_VALUES]['othervalues2'], 'othervalues2'
|
||||
fake_payload[consts.INSTALL_VALUES]["othervalues2"], "othervalues2"
|
||||
)
|
||||
|
||||
|
||||
def test_get_subcloud_db_install_values(self):
|
||||
install_data = copy.copy(fake_subcloud.FAKE_SUBCLOUD_INSTALL_VALUES)
|
||||
encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode(
|
||||
"utf-8"
|
||||
)
|
||||
encoded_password = base64.b64encode("bmc_password".encode("utf-8")).decode("utf-8")
|
||||
install_data["bmc_password"] = encoded_password
|
||||
test_subcloud = copy.copy(fake_subcloud.FAKE_SUBCLOUD_DATA)
|
||||
subcloud_info = Subcloud(test_subcloud, False)
|
||||
@ -158,8 +164,7 @@ def test_get_subcloud_db_install_values(self):
|
||||
actual_result = psd_common.get_subcloud_db_install_values(subcloud_info)
|
||||
|
||||
self.assertEqual(
|
||||
json.loads(json.dumps(install_data)),
|
||||
json.loads(json.dumps(actual_result))
|
||||
json.loads(json.dumps(install_data)), json.loads(json.dumps(actual_result))
|
||||
)
|
||||
|
||||
|
||||
|
@ -29,11 +29,13 @@ class DBAPISubcloudAlarm(base.DCManagerTestCase):
|
||||
|
||||
@staticmethod
|
||||
def create_subcloud_alarms(ctxt, name):
|
||||
values = {'critical_alarms': -1,
|
||||
'major_alarms': -1,
|
||||
'minor_alarms': -1,
|
||||
'warnings': -1,
|
||||
'cloud_status': consts.ALARMS_DISABLED}
|
||||
values = {
|
||||
"critical_alarms": -1,
|
||||
"major_alarms": -1,
|
||||
"minor_alarms": -1,
|
||||
"warnings": -1,
|
||||
"cloud_status": consts.ALARMS_DISABLED,
|
||||
}
|
||||
return db_api.subcloud_alarms_create(ctxt, name, values)
|
||||
|
||||
def setUp(self):
|
||||
@ -41,68 +43,76 @@ class DBAPISubcloudAlarm(base.DCManagerTestCase):
|
||||
# calling setUp for the superclass sets up the DB and context
|
||||
|
||||
def test_subcloud_alarms_create(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
self.assertEqual(result['name'], 'subcloud1')
|
||||
self.assertEqual(result['cloud_status'], 'disabled')
|
||||
self.assertEqual(result["name"], "subcloud1")
|
||||
self.assertEqual(result["cloud_status"], "disabled")
|
||||
|
||||
def test_subcloud_alarms_create_duplicate(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
self.assertRaises(db_exception.DBDuplicateEntry,
|
||||
self.create_subcloud_alarms,
|
||||
self.ctx, 'subcloud1')
|
||||
self.assertRaises(
|
||||
db_exception.DBDuplicateEntry,
|
||||
self.create_subcloud_alarms,
|
||||
self.ctx,
|
||||
"subcloud1",
|
||||
)
|
||||
|
||||
def test_subcloud_alarms_get(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
subcloud = db_api.subcloud_alarms_get(self.ctx, 'subcloud1')
|
||||
subcloud = db_api.subcloud_alarms_get(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(subcloud)
|
||||
self.assertEqual(subcloud['name'], 'subcloud1')
|
||||
self.assertEqual(subcloud["name"], "subcloud1")
|
||||
|
||||
def test_subcloud_alarms_get_not_found(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
self.assertRaises(exception.SubcloudNameNotFound,
|
||||
db_api.subcloud_alarms_get,
|
||||
self.ctx, 'subcloud2')
|
||||
self.assertRaises(
|
||||
exception.SubcloudNameNotFound,
|
||||
db_api.subcloud_alarms_get,
|
||||
self.ctx,
|
||||
"subcloud2",
|
||||
)
|
||||
|
||||
def test_subcloud_alarms_get_all(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud2')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud2")
|
||||
self.assertIsNotNone(result)
|
||||
subclouds = db_api.subcloud_alarms_get_all(self.ctx)
|
||||
self.assertEqual(len(subclouds), 2)
|
||||
self.assertEqual(subclouds[0]['name'], 'subcloud2')
|
||||
self.assertEqual(subclouds[1]['name'], 'subcloud1')
|
||||
self.assertEqual(subclouds[0]["name"], "subcloud2")
|
||||
self.assertEqual(subclouds[1]["name"], "subcloud1")
|
||||
|
||||
def test_subcloud_alarms_get_one(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud2')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud2")
|
||||
self.assertIsNotNone(result)
|
||||
subclouds = db_api.subcloud_alarms_get_all(self.ctx, 'subcloud1')
|
||||
self.assertEqual(subclouds[0]['name'], 'subcloud1')
|
||||
subclouds = db_api.subcloud_alarms_get_all(self.ctx, "subcloud1")
|
||||
self.assertEqual(subclouds[0]["name"], "subcloud1")
|
||||
|
||||
def test_subcloud_alarms_update(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
values = {'critical_alarms': 0,
|
||||
'major_alarms': 1,
|
||||
'minor_alarms': 2,
|
||||
'warnings': 3,
|
||||
'cloud_status': consts.ALARM_DEGRADED_STATUS}
|
||||
result = db_api.subcloud_alarms_update(self.ctx, 'subcloud1', values)
|
||||
values = {
|
||||
"critical_alarms": 0,
|
||||
"major_alarms": 1,
|
||||
"minor_alarms": 2,
|
||||
"warnings": 3,
|
||||
"cloud_status": consts.ALARM_DEGRADED_STATUS,
|
||||
}
|
||||
result = db_api.subcloud_alarms_update(self.ctx, "subcloud1", values)
|
||||
self.assertIsNotNone(result)
|
||||
self.assertEqual(result['major_alarms'], 1)
|
||||
subcloud = db_api.subcloud_alarms_get(self.ctx, 'subcloud1')
|
||||
self.assertEqual(result["major_alarms"], 1)
|
||||
subcloud = db_api.subcloud_alarms_get(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(subcloud)
|
||||
self.assertEqual(subcloud['major_alarms'], 1)
|
||||
self.assertEqual(subcloud["major_alarms"], 1)
|
||||
|
||||
def test_subcloud_alarms_delete(self):
|
||||
result = self.create_subcloud_alarms(self.ctx, 'subcloud1')
|
||||
result = self.create_subcloud_alarms(self.ctx, "subcloud1")
|
||||
self.assertIsNotNone(result)
|
||||
db_api.subcloud_alarms_delete(self.ctx, 'subcloud1')
|
||||
db_api.subcloud_alarms_delete(self.ctx, "subcloud1")
|
||||
subclouds = db_api.subcloud_alarms_get_all(self.ctx)
|
||||
self.assertEqual(len(subclouds), 0)
|
||||
|
@ -31,20 +31,20 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase):
|
||||
@staticmethod
|
||||
def create_subcloud(ctxt, name, **kwargs):
|
||||
values = {
|
||||
'name': name,
|
||||
'description': "This is a subcloud",
|
||||
'location': "This is the location of the subcloud",
|
||||
'software_version': "10.04",
|
||||
'management_subnet': "192.168.101.0/24",
|
||||
'management_gateway_ip': "192.168.101.1",
|
||||
'management_start_ip': "192.168.101.2",
|
||||
'management_end_ip': "192.168.101.50",
|
||||
'systemcontroller_gateway_ip': "192.168.204.101",
|
||||
'deploy_status': "not-deployed",
|
||||
'error_description': 'No errors present',
|
||||
'region_name': uuidutils.generate_uuid().replace("-", ""),
|
||||
'openstack_installed': False,
|
||||
'group_id': 1,
|
||||
"name": name,
|
||||
"description": "This is a subcloud",
|
||||
"location": "This is the location of the subcloud",
|
||||
"software_version": "10.04",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_ip": "192.168.101.1",
|
||||
"management_start_ip": "192.168.101.2",
|
||||
"management_end_ip": "192.168.101.50",
|
||||
"systemcontroller_gateway_ip": "192.168.204.101",
|
||||
"deploy_status": "not-deployed",
|
||||
"error_description": "No errors present",
|
||||
"region_name": uuidutils.generate_uuid().replace("-", ""),
|
||||
"openstack_installed": False,
|
||||
"group_id": 1,
|
||||
}
|
||||
values.update(kwargs)
|
||||
return db_api.subcloud_create(ctxt, **values)
|
||||
@ -61,12 +61,8 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase):
|
||||
# Test the SubcloudAudits created when we created subcloud2 in setup.
|
||||
result = db_api.subcloud_audits_get(self.ctx, 2)
|
||||
self.assertEqual(result["subcloud_id"], 2)
|
||||
self.assertEqual(
|
||||
result["audit_started_at"], datetime.datetime(1, 1, 1, 0, 0)
|
||||
)
|
||||
self.assertEqual(
|
||||
result["audit_finished_at"], datetime.datetime(1, 1, 1, 0, 0)
|
||||
)
|
||||
self.assertEqual(result["audit_started_at"], datetime.datetime(1, 1, 1, 0, 0))
|
||||
self.assertEqual(result["audit_finished_at"], datetime.datetime(1, 1, 1, 0, 0))
|
||||
self.assertEqual(result["patch_audit_requested"], False)
|
||||
self.assertEqual(result["load_audit_requested"], False)
|
||||
self.assertEqual(result["firmware_audit_requested"], False)
|
||||
@ -221,7 +217,10 @@ class DBAPISubcloudAuditsTest(base.DCManagerTestCase):
|
||||
subcloud_audits1 = db_api.subcloud_audits_get(self.ctx, 1)
|
||||
subcloud_audits2 = db_api.subcloud_audits_get(self.ctx, 2)
|
||||
subcloud_audits3 = db_api.subcloud_audits_get(self.ctx, 3)
|
||||
self.assertEqual(subcloud_audits1["audit_finished_at"],
|
||||
subcloud_audits2["audit_finished_at"])
|
||||
self.assertTrue(subcloud_audits1["audit_finished_at"] >
|
||||
subcloud_audits3["audit_finished_at"])
|
||||
self.assertEqual(
|
||||
subcloud_audits1["audit_finished_at"], subcloud_audits2["audit_finished_at"]
|
||||
)
|
||||
self.assertTrue(
|
||||
subcloud_audits1["audit_finished_at"]
|
||||
> subcloud_audits3["audit_finished_at"]
|
||||
)
|
||||
|
@ -43,20 +43,20 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
@staticmethod
|
||||
def create_subcloud_static(ctxt, **kwargs):
|
||||
values = {
|
||||
'name': "subcloud1",
|
||||
'description': "This is a subcloud",
|
||||
'location': "This is the location of the subcloud",
|
||||
'software_version': "10.04",
|
||||
'management_subnet': "192.168.101.0/24",
|
||||
'management_gateway_ip': "192.168.101.1",
|
||||
'management_start_ip': "192.168.101.2",
|
||||
'management_end_ip': "192.168.101.50",
|
||||
'systemcontroller_gateway_ip': "192.168.204.101",
|
||||
'deploy_status': "not-deployed",
|
||||
'error_description': 'No errors present',
|
||||
'region_name': base.SUBCLOUD_1['region_name'],
|
||||
'openstack_installed': False,
|
||||
'group_id': 1,
|
||||
"name": "subcloud1",
|
||||
"description": "This is a subcloud",
|
||||
"location": "This is the location of the subcloud",
|
||||
"software_version": "10.04",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_ip": "192.168.101.1",
|
||||
"management_start_ip": "192.168.101.2",
|
||||
"management_end_ip": "192.168.101.50",
|
||||
"systemcontroller_gateway_ip": "192.168.204.101",
|
||||
"deploy_status": "not-deployed",
|
||||
"error_description": "No errors present",
|
||||
"region_name": base.SUBCLOUD_1["region_name"],
|
||||
"openstack_installed": False,
|
||||
"group_id": 1,
|
||||
}
|
||||
values.update(kwargs)
|
||||
return db_api.subcloud_create(ctxt, **values)
|
||||
@ -64,21 +64,20 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
@staticmethod
|
||||
def create_subcloud(ctxt, data):
|
||||
values = {
|
||||
'name': data['name'],
|
||||
'description': data['description'],
|
||||
'location': data['location'],
|
||||
'software_version': data['software-version'],
|
||||
'management_subnet': data['management_subnet'],
|
||||
'management_gateway_ip': data['management_gateway_address'],
|
||||
'management_start_ip': data['management_start_address'],
|
||||
'management_end_ip': data['management_end_address'],
|
||||
'systemcontroller_gateway_ip': data[
|
||||
'systemcontroller_gateway_address'],
|
||||
'deploy_status': "not-deployed",
|
||||
'error_description': 'No errors present',
|
||||
'region_name': data['region_name'],
|
||||
'openstack_installed': False,
|
||||
'group_id': 1,
|
||||
"name": data["name"],
|
||||
"description": data["description"],
|
||||
"location": data["location"],
|
||||
"software_version": data["software-version"],
|
||||
"management_subnet": data["management_subnet"],
|
||||
"management_gateway_ip": data["management_gateway_address"],
|
||||
"management_start_ip": data["management_start_address"],
|
||||
"management_end_ip": data["management_end_address"],
|
||||
"systemcontroller_gateway_ip": data["systemcontroller_gateway_address"],
|
||||
"deploy_status": "not-deployed",
|
||||
"error_description": "No errors present",
|
||||
"region_name": data["region_name"],
|
||||
"openstack_installed": False,
|
||||
"group_id": 1,
|
||||
}
|
||||
return db_api.subcloud_create(ctxt, **values)
|
||||
|
||||
@ -143,29 +142,33 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
)
|
||||
|
||||
def test_create_multiple_subclouds(self):
|
||||
name1 = 'testname1'
|
||||
region1 = base.SUBCLOUD_1['region_name']
|
||||
name2 = 'testname2'
|
||||
region2 = base.SUBCLOUD_2['region_name']
|
||||
name3 = 'testname3'
|
||||
region3 = base.SUBCLOUD_3['region_name']
|
||||
subcloud = self.create_subcloud_static(self.ctx,
|
||||
name=name1,
|
||||
region_name=region1)
|
||||
name1 = "testname1"
|
||||
region1 = base.SUBCLOUD_1["region_name"]
|
||||
name2 = "testname2"
|
||||
region2 = base.SUBCLOUD_2["region_name"]
|
||||
name3 = "testname3"
|
||||
region3 = base.SUBCLOUD_3["region_name"]
|
||||
subcloud = self.create_subcloud_static(
|
||||
self.ctx, name=name1, region_name=region1
|
||||
)
|
||||
self.assertIsNotNone(subcloud)
|
||||
|
||||
subcloud2 = self.create_subcloud_static(self.ctx,
|
||||
name=name2,
|
||||
region_name=region2,
|
||||
management_start_ip="2.3.4.6",
|
||||
management_end_ip="2.3.4.7")
|
||||
subcloud2 = self.create_subcloud_static(
|
||||
self.ctx,
|
||||
name=name2,
|
||||
region_name=region2,
|
||||
management_start_ip="2.3.4.6",
|
||||
management_end_ip="2.3.4.7",
|
||||
)
|
||||
self.assertIsNotNone(subcloud2)
|
||||
|
||||
subcloud3 = self.create_subcloud_static(self.ctx,
|
||||
name=name3,
|
||||
region_name=region3,
|
||||
management_start_ip="3.3.4.6",
|
||||
management_end_ip="3.3.4.7")
|
||||
subcloud3 = self.create_subcloud_static(
|
||||
self.ctx,
|
||||
name=name3,
|
||||
region_name=region3,
|
||||
management_start_ip="3.3.4.6",
|
||||
management_end_ip="3.3.4.7",
|
||||
)
|
||||
self.assertIsNotNone(subcloud3)
|
||||
|
||||
new_subclouds = db_api.subcloud_get_all(self.ctx)
|
||||
@ -183,13 +186,13 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
subcloud = self.create_subcloud(self.ctx, fake_subcloud)
|
||||
self.assertIsNotNone(subcloud)
|
||||
|
||||
management_state = 'testmanagementstate'
|
||||
availability_status = 'testavailabilitystatus'
|
||||
software_version = 'testversion'
|
||||
admin_subnet = '192.168.102.0/24'
|
||||
admin_start_ip = '192.168.102.5'
|
||||
admin_end_ip = '192.168.102.49'
|
||||
admin_gateway_ip = '192.168.102.1'
|
||||
management_state = "testmanagementstate"
|
||||
availability_status = "testavailabilitystatus"
|
||||
software_version = "testversion"
|
||||
admin_subnet = "192.168.102.0/24"
|
||||
admin_start_ip = "192.168.102.5"
|
||||
admin_end_ip = "192.168.102.49"
|
||||
admin_gateway_ip = "192.168.102.1"
|
||||
rehomed = True
|
||||
updated = db_api.subcloud_update(
|
||||
self.ctx,
|
||||
@ -201,7 +204,8 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
management_start_ip=admin_start_ip,
|
||||
management_end_ip=admin_end_ip,
|
||||
management_gateway_ip=admin_gateway_ip,
|
||||
rehomed=rehomed)
|
||||
rehomed=rehomed,
|
||||
)
|
||||
self.assertIsNotNone(updated)
|
||||
self.assertEqual(management_state, updated.management_state)
|
||||
self.assertEqual(availability_status, updated.availability_status)
|
||||
@ -213,22 +217,14 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
self.assertEqual(rehomed, updated.rehomed)
|
||||
|
||||
updated_subcloud = db_api.subcloud_get(self.ctx, subcloud.id)
|
||||
self.assertEqual(management_state,
|
||||
updated_subcloud.management_state)
|
||||
self.assertEqual(availability_status,
|
||||
updated_subcloud.availability_status)
|
||||
self.assertEqual(software_version,
|
||||
updated_subcloud.software_version)
|
||||
self.assertEqual(admin_subnet,
|
||||
updated_subcloud.management_subnet)
|
||||
self.assertEqual(admin_start_ip,
|
||||
updated_subcloud.management_start_ip)
|
||||
self.assertEqual(admin_end_ip,
|
||||
updated_subcloud.management_end_ip)
|
||||
self.assertEqual(admin_gateway_ip,
|
||||
updated_subcloud.management_gateway_ip)
|
||||
self.assertEqual(rehomed,
|
||||
updated_subcloud.rehomed)
|
||||
self.assertEqual(management_state, updated_subcloud.management_state)
|
||||
self.assertEqual(availability_status, updated_subcloud.availability_status)
|
||||
self.assertEqual(software_version, updated_subcloud.software_version)
|
||||
self.assertEqual(admin_subnet, updated_subcloud.management_subnet)
|
||||
self.assertEqual(admin_start_ip, updated_subcloud.management_start_ip)
|
||||
self.assertEqual(admin_end_ip, updated_subcloud.management_end_ip)
|
||||
self.assertEqual(admin_gateway_ip, updated_subcloud.management_gateway_ip)
|
||||
self.assertEqual(rehomed, updated_subcloud.rehomed)
|
||||
|
||||
def test_delete_subcloud(self):
|
||||
fake_subcloud = utils.create_subcloud_dict(base.SUBCLOUD_SAMPLE_DATA_0)
|
||||
@ -313,9 +309,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
|
||||
new_subcloud_statuses = db_api.subcloud_status_get_all(self.ctx, subcloud.id)
|
||||
self.assertIsNotNone(new_subcloud_statuses)
|
||||
self.assertEqual(
|
||||
num_default_subcloud_statuses + 3, len(new_subcloud_statuses)
|
||||
)
|
||||
self.assertEqual(num_default_subcloud_statuses + 3, len(new_subcloud_statuses))
|
||||
self.assertEqual(
|
||||
endpoint_type1,
|
||||
new_subcloud_statuses[num_default_subcloud_statuses].endpoint_type,
|
||||
@ -521,13 +515,9 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
)
|
||||
self.assertIsNotNone(subcloud_status3)
|
||||
|
||||
new_subcloud_statuses = db_api.subcloud_status_get_all_by_name(
|
||||
self.ctx, name
|
||||
)
|
||||
new_subcloud_statuses = db_api.subcloud_status_get_all_by_name(self.ctx, name)
|
||||
self.assertIsNotNone(new_subcloud_statuses)
|
||||
self.assertEqual(
|
||||
num_default_subcloud_statuses + 3, len(new_subcloud_statuses)
|
||||
)
|
||||
self.assertEqual(num_default_subcloud_statuses + 3, len(new_subcloud_statuses))
|
||||
self.assertEqual(
|
||||
endpoint_type1,
|
||||
new_subcloud_statuses[num_default_subcloud_statuses].endpoint_type,
|
||||
@ -589,9 +579,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
)
|
||||
self.assertEqual(42, new_sw_update_strategy.max_parallel_subclouds)
|
||||
self.assertEqual(False, new_sw_update_strategy.stop_on_failure)
|
||||
self.assertEqual(
|
||||
consts.SW_UPDATE_STATE_APPLYING, new_sw_update_strategy.state
|
||||
)
|
||||
self.assertEqual(consts.SW_UPDATE_STATE_APPLYING, new_sw_update_strategy.state)
|
||||
|
||||
def test_create_sw_update_strategy_duplicate(self):
|
||||
sw_update_strategy = self.create_sw_update_strategy(self.ctx)
|
||||
@ -619,9 +607,7 @@ class DBAPISubcloudTest(base.DCManagerTestCase):
|
||||
|
||||
db_api.sw_update_strategy_destroy(self.ctx)
|
||||
|
||||
self.assertRaises(
|
||||
exceptions.NotFound, db_api.sw_update_strategy_get, self.ctx
|
||||
)
|
||||
self.assertRaises(exceptions.NotFound, db_api.sw_update_strategy_get, self.ctx)
|
||||
|
||||
def test_create_strategy_step(self):
|
||||
name = "testname"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -16,9 +16,9 @@ import uuid
|
||||
from oslo_utils import timeutils
|
||||
|
||||
# VIM constants for Strategy
|
||||
APPLY_TYPE_SERIAL = 'serial'
|
||||
INSTANCE_ACTION_STOP_START = 'stop-start'
|
||||
ALARM_RESTRICTIONS_STRICT = 'strict'
|
||||
APPLY_TYPE_SERIAL = "serial"
|
||||
INSTANCE_ACTION_STOP_START = "stop-start"
|
||||
ALARM_RESTRICTIONS_STRICT = "strict"
|
||||
|
||||
|
||||
class FakeVimClient(object):
|
||||
@ -29,24 +29,26 @@ class FakeVimClient(object):
|
||||
class FakeVimStrategy(object):
|
||||
"""Represents a VIM Strategy object defined in:
|
||||
|
||||
starlingx/nfv/nfv-client/nfv_client/openstack/sw_update.py
|
||||
starlingx/nfv/nfv-client/nfv_client/openstack/sw_update.py
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
name="VIM Strategy",
|
||||
controller_apply_type=APPLY_TYPE_SERIAL,
|
||||
storage_apply_type=APPLY_TYPE_SERIAL,
|
||||
swift_apply_type=APPLY_TYPE_SERIAL,
|
||||
worker_apply_type=APPLY_TYPE_SERIAL,
|
||||
max_parallel_worker_hosts=2,
|
||||
default_instance_action=INSTANCE_ACTION_STOP_START,
|
||||
alarm_restrictions=ALARM_RESTRICTIONS_STRICT,
|
||||
current_phase=None,
|
||||
current_phase_completion_percentage=0,
|
||||
state=None,
|
||||
build_phase=None,
|
||||
apply_phase=None,
|
||||
abort_phase=None):
|
||||
def __init__(
|
||||
self,
|
||||
name="VIM Strategy",
|
||||
controller_apply_type=APPLY_TYPE_SERIAL,
|
||||
storage_apply_type=APPLY_TYPE_SERIAL,
|
||||
swift_apply_type=APPLY_TYPE_SERIAL,
|
||||
worker_apply_type=APPLY_TYPE_SERIAL,
|
||||
max_parallel_worker_hosts=2,
|
||||
default_instance_action=INSTANCE_ACTION_STOP_START,
|
||||
alarm_restrictions=ALARM_RESTRICTIONS_STRICT,
|
||||
current_phase=None,
|
||||
current_phase_completion_percentage=0,
|
||||
state=None,
|
||||
build_phase=None,
|
||||
apply_phase=None,
|
||||
abort_phase=None,
|
||||
):
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.name = name
|
||||
self.controller_apply_type = controller_apply_type
|
||||
@ -57,8 +59,7 @@ class FakeVimStrategy(object):
|
||||
self.default_instance_action = default_instance_action
|
||||
self.alarm_restrictions = alarm_restrictions
|
||||
self.current_phase = current_phase
|
||||
self.current_phase_completion_percentage =\
|
||||
current_phase_completion_percentage
|
||||
self.current_phase_completion_percentage = current_phase_completion_percentage
|
||||
self.state = state
|
||||
self.build_phase = build_phase
|
||||
self.apply_phase = apply_phase
|
||||
@ -79,13 +80,13 @@ class FakeVimStrategyPhase(object):
|
||||
class SwUpdateStrategy(object):
|
||||
def __init__(self, id, data):
|
||||
self.id = id
|
||||
self.type = data['type']
|
||||
self.subcloud_apply_type = data['subcloud-apply-type']
|
||||
self.max_parallel_subclouds = int(data['max-parallel-subclouds'])
|
||||
if data['stop-on-failure'] == 'true':
|
||||
self.type = data["type"]
|
||||
self.subcloud_apply_type = data["subcloud-apply-type"]
|
||||
self.max_parallel_subclouds = int(data["max-parallel-subclouds"])
|
||||
if data["stop-on-failure"] == "true":
|
||||
self.stop_on_failure = True
|
||||
else:
|
||||
self.stop_on_failure = False
|
||||
self.state = data['state']
|
||||
self.state = data["state"]
|
||||
self.created_at = timeutils.utcnow()
|
||||
self.updated_at = timeutils.utcnow()
|
||||
|
@ -22,12 +22,12 @@ from dcmanager.tests.unit.manager import test_system_peer_manager as tsm
|
||||
|
||||
# FAKE SUBCLOUD PEER GROUP DATA (SITE1)
|
||||
FAKE_SITE1_PEER_GROUP_ID = 9
|
||||
FAKE_SITE1_PEER_GROUP_NAME = 'PeerGroup2'
|
||||
FAKE_SITE1_PEER_GROUP_NAME = "PeerGroup2"
|
||||
FAKE_SITE1_PEER_GROUP_SYSTEM_LEADER_ID = tpm.FAKE_SYSTEM_PEER_UUID # SITE1 UUID
|
||||
FAKE_SITE1_PEER_GROUP_SYSTEM_LEADER_NAME = tpm.FAKE_SYSTEM_PEER_NAME # SITE1 NAME
|
||||
FAKE_SITE1_PEER_GROUP_MAX_SUBCLOUDS_REHOMING = 20
|
||||
FAKE_SITE1_PEER_GROUP_PRIORITY = 1
|
||||
FAKE_SITE1_PEER_GROUP_STATE = 'enabled'
|
||||
FAKE_SITE1_PEER_GROUP_STATE = "enabled"
|
||||
FAKE_SITE1_PEER_GROUP_MIGRATION_STATUS = consts.PEER_GROUP_MIGRATION_COMPLETE
|
||||
FAKE_SITE1_PEER_GROUP_DATA = {
|
||||
"peer_group_name": FAKE_SITE1_PEER_GROUP_NAME,
|
||||
@ -36,46 +36,46 @@ FAKE_SITE1_PEER_GROUP_DATA = {
|
||||
"group_priority": FAKE_SITE1_PEER_GROUP_PRIORITY,
|
||||
"group_state": FAKE_SITE1_PEER_GROUP_STATE,
|
||||
"max_subcloud_rehoming": FAKE_SITE1_PEER_GROUP_MAX_SUBCLOUDS_REHOMING,
|
||||
"migration_status": FAKE_SITE1_PEER_GROUP_MIGRATION_STATUS
|
||||
"migration_status": FAKE_SITE1_PEER_GROUP_MIGRATION_STATUS,
|
||||
}
|
||||
|
||||
# FAKE SUBCLOUD PEER GROUP DATA (SITE2)
|
||||
FAKE_SITE2_PEER_GROUP_PRIORITY = 0
|
||||
FAKE_SITE2_PEER_GROUP_DATA = {
|
||||
"peer_group_name": 'PeerGroup3',
|
||||
"peer_group_name": "PeerGroup3",
|
||||
"system_leader_id": FAKE_SITE1_PEER_GROUP_SYSTEM_LEADER_ID,
|
||||
"system_leader_name": FAKE_SITE1_PEER_GROUP_SYSTEM_LEADER_NAME,
|
||||
"group_priority": FAKE_SITE2_PEER_GROUP_PRIORITY,
|
||||
"group_state": FAKE_SITE1_PEER_GROUP_STATE,
|
||||
"max_subcloud_rehoming": FAKE_SITE1_PEER_GROUP_MAX_SUBCLOUDS_REHOMING,
|
||||
"migration_status": consts.PEER_GROUP_MIGRATING
|
||||
"migration_status": consts.PEER_GROUP_MIGRATING,
|
||||
}
|
||||
|
||||
# FAKE SUBCLOUD
|
||||
FAKE_SUBCLOUD1_REGION_NAME = str(uuid.uuid4())
|
||||
FAKE_SUBCLOUD1_NAME = 'subcloud1'
|
||||
FAKE_SUBCLOUD1_NAME = "subcloud1"
|
||||
FAKE_SUBCLOUD2_REGION_NAME = str(uuid.uuid4())
|
||||
FAKE_SUBCLOUD2_NAME = 'subcloud2'
|
||||
FAKE_SUBCLOUD2_NAME = "subcloud2"
|
||||
FAKE_SUBCLOUD3_REGION_NAME = str(uuid.uuid4())
|
||||
FAKE_SUBCLOUD3_NAME = 'subcloud3'
|
||||
FAKE_SUBCLOUD3_NAME = "subcloud3"
|
||||
|
||||
# FAKE SUBCLOUD REHOME DATA
|
||||
FAKE_REHOME_DATA1 = {
|
||||
"saved_payload": {
|
||||
"bootstrap-address": "192.168.10.11",
|
||||
"systemcontroller_gateway_address": "192.168.204.101"
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
}
|
||||
}
|
||||
FAKE_REHOME_DATA2 = {
|
||||
"saved_payload": {
|
||||
"bootstrap-address": "192.168.10.12",
|
||||
"systemcontroller_gateway_address": "192.168.204.101"
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
}
|
||||
}
|
||||
FAKE_REHOME_DATA3 = {
|
||||
"saved_payload": {
|
||||
"bootstrap-address": "192.168.10.13",
|
||||
"systemcontroller_gateway_address": "192.168.204.101"
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ FAKE_SITE1_SUBCLOUD1_DATA = {
|
||||
"deploy-status": FAKE_SITE1_SUBCLOUD1_DEPLOY_STATUS,
|
||||
"management-state": FAKE_SITE1_SUBCLOUD1_MANAGEMENT_STATE,
|
||||
"peer_group_id": FAKE_SITE1_SUBCLOUD1_PEER_GROUP_ID,
|
||||
"rehome_data": json.dumps(FAKE_REHOME_DATA1)
|
||||
"rehome_data": json.dumps(FAKE_REHOME_DATA1),
|
||||
}
|
||||
FAKE_SITE1_SUBCLOUD2_ID = 12
|
||||
FAKE_SITE1_SUBCLOUD2_REGION_NAME = FAKE_SUBCLOUD2_REGION_NAME
|
||||
@ -108,7 +108,7 @@ FAKE_SITE1_SUBCLOUD2_DATA = {
|
||||
"peer_group_id": FAKE_SITE1_SUBCLOUD2_PEER_GROUP_ID,
|
||||
# To test syncing rehome_data from site1(remote) to site0(local),
|
||||
# we set the rehome_data to data3 instead of data2 for remote subcloud2
|
||||
"rehome_data": json.dumps(FAKE_REHOME_DATA3)
|
||||
"rehome_data": json.dumps(FAKE_REHOME_DATA3),
|
||||
}
|
||||
|
||||
|
||||
@ -121,23 +121,24 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self._mock_peer_monitor_manager_get_peer_dc_client()
|
||||
|
||||
self.pm = peer_group_audit_manager.PeerGroupAuditManager(
|
||||
self.mock_subcloud_manager, FAKE_SITE1_PEER_GROUP_ID)
|
||||
self.mock_subcloud_manager, FAKE_SITE1_PEER_GROUP_ID
|
||||
)
|
||||
|
||||
def _mock_peer_monitor_manager_get_peer_dc_client(self):
|
||||
"""Mock peer_monitor_manager's get_peer_dc_client"""
|
||||
|
||||
mock_patch = mock.patch.object(
|
||||
peer_group_audit_manager.SystemPeerManager, 'get_peer_dc_client')
|
||||
peer_group_audit_manager.SystemPeerManager, "get_peer_dc_client"
|
||||
)
|
||||
self.mock_get_peer_dc_client = mock_patch.start()
|
||||
self.addCleanup(mock_patch.stop)
|
||||
|
||||
self.peer = tpm.TestPeerMonitor.create_system_peer_static(
|
||||
self.ctx,
|
||||
peer_name='SystemPeer1')
|
||||
self.peer_group = tsm.TestSystemPeerManager. \
|
||||
create_subcloud_peer_group_static(
|
||||
self.ctx,
|
||||
peer_group_name='SubcloudPeerGroup1')
|
||||
self.ctx, peer_name="SystemPeer1"
|
||||
)
|
||||
self.peer_group = tsm.TestSystemPeerManager.create_subcloud_peer_group_static(
|
||||
self.ctx, peer_group_name="SubcloudPeerGroup1"
|
||||
)
|
||||
# Create local dc subcloud1 mock data in database
|
||||
self.subcloud1 = tsm.TestSystemPeerManager.create_subcloud_with_pg_static(
|
||||
self.ctx,
|
||||
@ -145,7 +146,8 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
rehome_data=json.dumps(FAKE_REHOME_DATA1),
|
||||
name=FAKE_SUBCLOUD1_NAME,
|
||||
region_name=FAKE_SUBCLOUD1_REGION_NAME,
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||
)
|
||||
# Create local dc subcloud2 mock data in database
|
||||
self.subcloud2 = tsm.TestSystemPeerManager.create_subcloud_with_pg_static(
|
||||
self.ctx,
|
||||
@ -153,7 +155,8 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
rehome_data=json.dumps(FAKE_REHOME_DATA2),
|
||||
name=FAKE_SUBCLOUD2_NAME,
|
||||
region_name=FAKE_SUBCLOUD2_REGION_NAME,
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||
)
|
||||
# Create local dc subcloud3 mock data in database
|
||||
self.subcloud3 = tsm.TestSystemPeerManager.create_subcloud_with_pg_static(
|
||||
self.ctx,
|
||||
@ -161,7 +164,8 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
rehome_data=json.dumps(FAKE_REHOME_DATA3),
|
||||
name=FAKE_SUBCLOUD3_NAME,
|
||||
region_name=FAKE_SUBCLOUD3_REGION_NAME,
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING)
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||
)
|
||||
# Remote subclouds
|
||||
self.peer_subcloud1 = copy.deepcopy(FAKE_SITE1_SUBCLOUD1_DATA)
|
||||
self.peer_subcloud2 = copy.deepcopy(FAKE_SITE1_SUBCLOUD2_DATA)
|
||||
@ -170,9 +174,10 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self.remote_peer_group2 = copy.deepcopy(FAKE_SITE2_PEER_GROUP_DATA)
|
||||
|
||||
# Initialize mock objects
|
||||
self.mock_update_sync_status = \
|
||||
mock.patch.object(SystemPeerManager, 'update_sync_status').start()
|
||||
mock_get_local_system = mock.patch.object(utils, 'get_local_system').start()
|
||||
self.mock_update_sync_status = mock.patch.object(
|
||||
SystemPeerManager, "update_sync_status"
|
||||
).start()
|
||||
mock_get_local_system = mock.patch.object(utils, "get_local_system").start()
|
||||
|
||||
# Cleanup mock objects after test finishes
|
||||
self.addCleanup(self.mock_update_sync_status.stop())
|
||||
@ -183,13 +188,19 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self.mock_dc_client = mock.MagicMock()
|
||||
self.mock_get_peer_dc_client.return_value = self.mock_dc_client()
|
||||
self.mock_dc_client().get_subcloud_list_by_peer_group.return_value = [
|
||||
self.peer_subcloud1, self.peer_subcloud2]
|
||||
self.peer_subcloud1,
|
||||
self.peer_subcloud2,
|
||||
]
|
||||
self.mock_dc_client().get_system_peer.return_value = mock.MagicMock()
|
||||
self.mock_dc_client().get_peer_group_association_with_peer_id_and_pg_id. \
|
||||
return_value = {
|
||||
"sync-status": consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC}
|
||||
peer_group_assoc = (
|
||||
self.mock_dc_client().get_peer_group_association_with_peer_id_and_pg_id
|
||||
)
|
||||
peer_group_assoc.return_value = {
|
||||
"sync-status": consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC
|
||||
}
|
||||
self.pm._set_local_subcloud_to_secondary = mock.MagicMock(
|
||||
wraps=self.pm._set_local_subcloud_to_secondary)
|
||||
wraps=self.pm._set_local_subcloud_to_secondary
|
||||
)
|
||||
self.pm.audit(self.peer, remote_peer_group, self.peer_group)
|
||||
return self.pm
|
||||
|
||||
@ -207,26 +218,33 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
# from data2 to data3, syncing from the remote site subcloud2
|
||||
self.assertEqual(
|
||||
json.dumps(FAKE_REHOME_DATA3),
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).rehome_data
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).rehome_data,
|
||||
)
|
||||
# Verify that the subcloud3 is deleted because it doesn't
|
||||
# exist in the peer site
|
||||
self.mock_subcloud_manager.delete_subcloud.assert_called_with(
|
||||
pm.context, self.subcloud3.id)
|
||||
pm.context, self.subcloud3.id
|
||||
)
|
||||
# Verify that the system leader id is updated to the peer site uuid
|
||||
self.assertEqual(
|
||||
tpm.FAKE_SITE1_SYSTEM_UUID,
|
||||
db_api.subcloud_peer_group_get(self.ctx, self.peer_group.id)
|
||||
.system_leader_id
|
||||
db_api.subcloud_peer_group_get(
|
||||
self.ctx, self.peer_group.id
|
||||
).system_leader_id,
|
||||
)
|
||||
# Verify that the migration status of the remote peer group is updated
|
||||
# to None since the migration completed
|
||||
self.mock_dc_client().update_subcloud_peer_group.assert_called_with(
|
||||
self.remote_peer_group.get("peer_group_name"), migration_status=None)
|
||||
self.remote_peer_group.get("peer_group_name"), migration_status=None
|
||||
)
|
||||
# Verify that the PGA sync status is updated to in-sync
|
||||
self.mock_update_sync_status.assert_called_with(
|
||||
pm.context, self.peer, consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
self.peer_group, self.remote_peer_group)
|
||||
pm.context,
|
||||
self.peer,
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
self.peer_group,
|
||||
self.remote_peer_group,
|
||||
)
|
||||
|
||||
def test_audit_migration_complete_with_partial_failure(self):
|
||||
# Remove local subcloud3
|
||||
@ -242,18 +260,20 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
# Verify that the local subcloud2 is also set to rehome-failed
|
||||
self.assertEqual(
|
||||
consts.DEPLOY_STATE_REHOME_FAILED,
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).deploy_status
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).deploy_status,
|
||||
)
|
||||
# Verify that the system leader id is updated to the peer site uuid
|
||||
self.assertEqual(
|
||||
tpm.FAKE_SITE1_SYSTEM_UUID,
|
||||
db_api.subcloud_peer_group_get(self.ctx, self.peer_group.id)
|
||||
.system_leader_id
|
||||
db_api.subcloud_peer_group_get(
|
||||
self.ctx, self.peer_group.id
|
||||
).system_leader_id,
|
||||
)
|
||||
# Verify that the migration status of the remote peer group is updated
|
||||
# to None since the migration completed
|
||||
self.mock_dc_client().update_subcloud_peer_group.assert_called_with(
|
||||
self.remote_peer_group.get('peer_group_name'), migration_status=None)
|
||||
self.remote_peer_group.get("peer_group_name"), migration_status=None
|
||||
)
|
||||
# Verify that the PGA sync status remains out-of-sync due to rehome failure
|
||||
self.mock_update_sync_status.assert_not_called()
|
||||
|
||||
@ -273,29 +293,29 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
# Verify that the local subclouds are also set to rehome-failed
|
||||
self.assertEqual(
|
||||
consts.DEPLOY_STATE_REHOME_FAILED,
|
||||
db_api.subcloud_get(self.ctx, self.subcloud1.id).deploy_status
|
||||
db_api.subcloud_get(self.ctx, self.subcloud1.id).deploy_status,
|
||||
)
|
||||
self.assertEqual(
|
||||
consts.DEPLOY_STATE_REHOME_FAILED,
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).deploy_status
|
||||
db_api.subcloud_get(self.ctx, self.subcloud2.id).deploy_status,
|
||||
)
|
||||
# Verify that the system leader id is updated to the peer site uuid
|
||||
self.assertEqual(
|
||||
tpm.FAKE_SITE1_SYSTEM_UUID,
|
||||
db_api.subcloud_peer_group_get(self.ctx, self.peer_group.id)
|
||||
.system_leader_id
|
||||
db_api.subcloud_peer_group_get(
|
||||
self.ctx, self.peer_group.id
|
||||
).system_leader_id,
|
||||
)
|
||||
# Verify that the migration status of the remote peer group is updated
|
||||
# to None since the migration completed
|
||||
self.mock_dc_client().update_subcloud_peer_group.assert_called_with(
|
||||
self.remote_peer_group.get('peer_group_name'), migration_status=None)
|
||||
self.remote_peer_group.get("peer_group_name"), migration_status=None
|
||||
)
|
||||
# Verify that the PGA sync status remains out-of-sync due to rehome failure
|
||||
self.mock_update_sync_status.assert_not_called()
|
||||
|
||||
def test_audit_subcloud_management_state_managed(self):
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud3.id, management_state='managed'
|
||||
)
|
||||
db_api.subcloud_update(self.ctx, self.subcloud3.id, management_state="managed")
|
||||
self.run_audit(self.remote_peer_group2)
|
||||
self.mock_subcloud_manager.update_subcloud.assert_called()
|
||||
expected_calls = [
|
||||
@ -317,7 +337,7 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self.subcloud3.id,
|
||||
management_state=dccommon_consts.MANAGEMENT_UNMANAGED,
|
||||
deploy_status=consts.DEPLOY_STATE_REHOME_PENDING,
|
||||
)
|
||||
),
|
||||
]
|
||||
self.mock_subcloud_manager.update_subcloud.assert_has_calls(expected_calls)
|
||||
|
||||
@ -326,21 +346,22 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self.mock_get_peer_dc_client.assert_called()
|
||||
self.mock_get_peer_dc_client().audit_subcloud_peer_group.assert_called()
|
||||
self.assertEqual(
|
||||
response, self.mock_get_peer_dc_client().audit_subcloud_peer_group())
|
||||
response, self.mock_get_peer_dc_client().audit_subcloud_peer_group()
|
||||
)
|
||||
|
||||
def test_send_audit_peer_group_exception(self):
|
||||
self.mock_get_peer_dc_client.side_effect = Exception('boom')
|
||||
self.mock_get_peer_dc_client.side_effect = Exception("boom")
|
||||
self.pm.send_audit_peer_group([self.peer], self.peer_group)
|
||||
self.mock_log.exception.assert_called_once_with(
|
||||
'Failed to send audit request for peer group '
|
||||
f'{self.peer_group.peer_group_name} to DC: {self.peer.peer_name}'
|
||||
"Failed to send audit request for peer group "
|
||||
f"{self.peer_group.peer_group_name} to DC: {self.peer.peer_name}"
|
||||
)
|
||||
|
||||
def test_audit_fail_to_unmanage(self):
|
||||
self.mock_subcloud_manager.update_subcloud.side_effect = Exception('boom')
|
||||
self.mock_subcloud_manager.update_subcloud.side_effect = Exception("boom")
|
||||
self.assertRaisesRegex(
|
||||
Exception, 'boom', self.run_audit,
|
||||
self.remote_peer_group2)
|
||||
Exception, "boom", self.run_audit, self.remote_peer_group2
|
||||
)
|
||||
|
||||
@mock.patch.object(peer_group_audit_manager.PeerGroupAuditManager, "audit")
|
||||
def test_audit_peer_group_from_system(self, mock_audit):
|
||||
@ -351,32 +372,30 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
|
||||
def test_audit_update_subcloud_exception(self):
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud1.id,
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED
|
||||
self.ctx,
|
||||
self.subcloud1.id,
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED,
|
||||
)
|
||||
self.mock_subcloud_manager.update_subcloud.side_effect = Exception('boom')
|
||||
self.mock_subcloud_manager.update_subcloud.side_effect = Exception("boom")
|
||||
self.assertRaisesRegex(
|
||||
Exception, 'boom', self.run_audit, self.remote_peer_group)
|
||||
Exception, "boom", self.run_audit, self.remote_peer_group
|
||||
)
|
||||
|
||||
def test_audit_failed_to_get_system_peer(self):
|
||||
self.mock_get_peer_dc_client().\
|
||||
get_system_peer.side_effect = Exception('boom')
|
||||
self.mock_get_peer_dc_client().get_system_peer.side_effect = Exception("boom")
|
||||
self.pm.audit(self.peer, self.remote_peer_group, self.peer_group)
|
||||
self.mock_log.exception.assert_called_once_with(
|
||||
'Failed to get subclouds of peer group None from DC: '
|
||||
f'{self.peer.peer_name}'
|
||||
f"Failed to get subclouds of peer group None from DC: {self.peer.peer_name}"
|
||||
)
|
||||
|
||||
def test_audit_clear_fault_exception(self):
|
||||
self.pm.fm_api = mock.MagicMock()
|
||||
self.pm.fm_api.clear_fault.side_effect = Exception('boom')
|
||||
self.pm.fm_api.clear_fault.side_effect = Exception("boom")
|
||||
self.remote_peer_group["group_priority"] = 0
|
||||
self.run_audit()
|
||||
self.mock_log.exception.assert_called_once_with(
|
||||
'Problem clearing fault '
|
||||
f'[peer_group={self.peer_group.peer_group_name},'
|
||||
f'peer={self.peer.peer_uuid}],'
|
||||
' alarm_id=280.005'
|
||||
f"Problem clearing fault [peer_group={self.peer_group.peer_group_name},"
|
||||
f"peer={self.peer.peer_uuid}], alarm_id=280.005"
|
||||
)
|
||||
|
||||
def test_audit_set_fault(self):
|
||||
@ -385,17 +404,18 @@ class TestPeerGroupAudit(DCManagerTestCase):
|
||||
self.pm.fm_api.set_fault.assert_called()
|
||||
|
||||
def test_audit_quit(self):
|
||||
self.peer_group = tsm.TestSystemPeerManager. \
|
||||
create_subcloud_peer_group_static(
|
||||
self.ctx,
|
||||
peer_group_name='SubcloudPeerGroup2',
|
||||
migration_status=consts.PEER_GROUP_MIGRATING)
|
||||
self.peer_group = tsm.TestSystemPeerManager.create_subcloud_peer_group_static(
|
||||
self.ctx,
|
||||
peer_group_name="SubcloudPeerGroup2",
|
||||
migration_status=consts.PEER_GROUP_MIGRATING,
|
||||
)
|
||||
self.run_audit()
|
||||
self.mock_log.info.assert_called_with(
|
||||
'Local peer group in migrating state, quit audit'
|
||||
"Local peer group in migrating state, quit audit"
|
||||
)
|
||||
|
||||
def test_audit_delete_subcloud_exception(self):
|
||||
self.mock_subcloud_manager.delete_subcloud.side_effect = Exception('boom')
|
||||
self.mock_subcloud_manager.delete_subcloud.side_effect = Exception("boom")
|
||||
self.assertRaisesRegex(
|
||||
Exception, 'boom', self.run_audit, self.remote_peer_group)
|
||||
Exception, "boom", self.run_audit, self.remote_peer_group
|
||||
)
|
||||
|
@ -25,11 +25,11 @@ FAKE_SITE1_SYSTEM_UUID = str(uuid.uuid4())
|
||||
# FAKE SYSTEM PEER DATA
|
||||
FAKE_SYSTEM_PEER_ID = 1
|
||||
FAKE_SYSTEM_PEER_UUID = FAKE_SITE1_SYSTEM_UUID
|
||||
FAKE_SYSTEM_PEER_NAME = 'PeerSite1'
|
||||
FAKE_MANAGER_ENDPOINT = 'http://128.128.128.128:5000/v3'
|
||||
FAKE_MANAGER_USERNAME = 'admin'
|
||||
FAKE_MANAGER_PASSWORD = 'cGFzc3dvcmQ='
|
||||
FAKE_PEER_CONTROLLER_GATEWAY_IP = '128.128.1.1'
|
||||
FAKE_SYSTEM_PEER_NAME = "PeerSite1"
|
||||
FAKE_MANAGER_ENDPOINT = "http://128.128.128.128:5000/v3"
|
||||
FAKE_MANAGER_USERNAME = "admin"
|
||||
FAKE_MANAGER_PASSWORD = "cGFzc3dvcmQ="
|
||||
FAKE_PEER_CONTROLLER_GATEWAY_IP = "128.128.1.1"
|
||||
|
||||
# FAKE SYSTEM PEER DATA (SITE1)
|
||||
FAKE_SITE1_SYSTEM_PEER_ID = 10
|
||||
@ -51,39 +51,44 @@ class TestPeerMonitor(base.DCManagerTestCase):
|
||||
self._mock_peer_monitor_manager_get_peer_dc_client()
|
||||
|
||||
self.peer = self.create_system_peer_static(self.ctx)
|
||||
self.peer_group1 = test_system_peer_manager.TestSystemPeerManager.\
|
||||
create_subcloud_peer_group_static(
|
||||
self.ctx, peer_group_name='SubcloudPeerGroup1')
|
||||
self.peer_group2 = test_system_peer_manager.TestSystemPeerManager.\
|
||||
create_subcloud_peer_group_static(
|
||||
self.ctx, peer_group_name='SubcloudPeerGroup2')
|
||||
self.association = test_system_peer_manager.TestSystemPeerManager.\
|
||||
create_peer_group_association_static(
|
||||
self.ctx, system_peer_id=self.peer.id,
|
||||
peer_group_id=self.peer_group1.id)
|
||||
self.system_peer_manager = test_system_peer_manager.TestSystemPeerManager
|
||||
self.peer_group1 = self.system_peer_manager.create_subcloud_peer_group_static(
|
||||
self.ctx, peer_group_name="SubcloudPeerGroup1"
|
||||
)
|
||||
self.peer_group2 = self.system_peer_manager.create_subcloud_peer_group_static(
|
||||
self.ctx, peer_group_name="SubcloudPeerGroup2"
|
||||
)
|
||||
self.association = (
|
||||
self.system_peer_manager.create_peer_group_association_static(
|
||||
self.ctx, system_peer_id=self.peer.id, peer_group_id=self.peer_group1.id
|
||||
)
|
||||
)
|
||||
|
||||
self.peer_monitor = peer_monitor_manager.PeerMonitor(
|
||||
self.peer, self.ctx, self.mock_subcloud_manager)
|
||||
self.peer_monitor_manager = peer_monitor_manager.\
|
||||
PeerMonitorManager(self.mock_subcloud_manager)
|
||||
self.peer, self.ctx, self.mock_subcloud_manager
|
||||
)
|
||||
self.peer_monitor_manager = peer_monitor_manager.PeerMonitorManager(
|
||||
self.mock_subcloud_manager
|
||||
)
|
||||
|
||||
def _mock_peer_monitor_manager_get_peer_dc_client(self):
|
||||
"""Mock peer_monitor_manager's get_peer_dc_client"""
|
||||
|
||||
mock_patch = mock.patch.object(
|
||||
peer_monitor_manager.SystemPeerManager, 'get_peer_dc_client')
|
||||
peer_monitor_manager.SystemPeerManager, "get_peer_dc_client"
|
||||
)
|
||||
self.mock_get_peer_dc_client = mock_patch.start()
|
||||
self.addCleanup(mock_patch.stop)
|
||||
|
||||
@staticmethod
|
||||
def create_system_peer_static(ctxt, **kwargs):
|
||||
values = {
|
||||
'peer_uuid': FAKE_SYSTEM_PEER_UUID,
|
||||
'peer_name': FAKE_SYSTEM_PEER_NAME,
|
||||
'endpoint': FAKE_MANAGER_ENDPOINT,
|
||||
'username': FAKE_MANAGER_USERNAME,
|
||||
'password': FAKE_MANAGER_PASSWORD,
|
||||
'gateway_ip': FAKE_PEER_CONTROLLER_GATEWAY_IP
|
||||
"peer_uuid": FAKE_SYSTEM_PEER_UUID,
|
||||
"peer_name": FAKE_SYSTEM_PEER_NAME,
|
||||
"endpoint": FAKE_MANAGER_ENDPOINT,
|
||||
"username": FAKE_MANAGER_USERNAME,
|
||||
"password": FAKE_MANAGER_PASSWORD,
|
||||
"gateway_ip": FAKE_PEER_CONTROLLER_GATEWAY_IP,
|
||||
}
|
||||
values.update(kwargs)
|
||||
return db_api.system_peer_create(ctxt, **values)
|
||||
@ -92,194 +97,229 @@ class TestPeerMonitor(base.DCManagerTestCase):
|
||||
self.assertIsNotNone(self.peer_monitor)
|
||||
self.assertEqual(FAKE_SYSTEM_PEER_NAME, self.peer_monitor.peer.peer_name)
|
||||
|
||||
def test_update_sync_status_when_secondary_site_becomes_unreachable(self):
|
||||
self.peer_monitor.\
|
||||
_update_sync_status_when_secondary_site_becomes_unreachable()
|
||||
def test_update_sync_status_secondary_site_becomes_unreachable(self):
|
||||
self.peer_monitor._update_sync_status_secondary_site_becomes_unreachable()
|
||||
association_new = db_api.peer_group_association_get(
|
||||
self.ctx, self.association.id)
|
||||
self.assertEqual(consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
association_new.sync_status)
|
||||
self.ctx, self.association.id
|
||||
)
|
||||
self.assertEqual(
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN, association_new.sync_status
|
||||
)
|
||||
|
||||
def test_update_sync_status_and_association_is_non_primary(self):
|
||||
association = test_system_peer_manager.TestSystemPeerManager.\
|
||||
create_peer_group_association_static(
|
||||
self.ctx, system_peer_id=self.peer.id,
|
||||
peer_group_id=self.peer_group2.id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY)
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.return_value = \
|
||||
{'id': FAKE_SITE1_PEER_GROUP_ID}
|
||||
association = self.system_peer_manager.create_peer_group_association_static(
|
||||
self.ctx,
|
||||
system_peer_id=self.peer.id,
|
||||
peer_group_id=self.peer_group2.id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY,
|
||||
)
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.return_value = {
|
||||
"id": FAKE_SITE1_PEER_GROUP_ID
|
||||
}
|
||||
|
||||
# Test the case where the association is non-primary
|
||||
self.peer_monitor._update_sync_status_when_secondary_site_becomes_reachable()
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.\
|
||||
assert_called_once_with(self.peer_group1.peer_group_name)
|
||||
association_new = db_api.peer_group_association_get(
|
||||
self.ctx, association.id)
|
||||
self.assertEqual(consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
association_new.sync_status)
|
||||
self.peer_monitor._update_sync_status_secondary_site_becomes_reachable()
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.assert_called_once_with(
|
||||
self.peer_group1.peer_group_name
|
||||
)
|
||||
association_new = db_api.peer_group_association_get(self.ctx, association.id)
|
||||
self.assertEqual(
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC, association_new.sync_status
|
||||
)
|
||||
|
||||
def test_update_sync_status_when_secondary_site_becomes_reachable(self):
|
||||
self.mock_get_local_system.return_value = \
|
||||
test_system_peer_manager.FakeSystem(FAKE_SITE0_SYSTEM_UUID)
|
||||
def test_update_sync_status_secondary_site_becomes_reachable(self):
|
||||
self.mock_get_local_system.return_value = test_system_peer_manager.FakeSystem(
|
||||
FAKE_SITE0_SYSTEM_UUID
|
||||
)
|
||||
db_api.peer_group_association_update(
|
||||
self.ctx, self.association.id,
|
||||
sync_status=consts.ASSOCIATION_SYNC_STATUS_UNKNOWN)
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.return_value = \
|
||||
{'id': FAKE_SITE1_PEER_GROUP_ID}
|
||||
self.mock_get_peer_dc_client().get_system_peer.return_value = \
|
||||
{'id': FAKE_SITE1_SYSTEM_PEER_ID}
|
||||
self.mock_get_peer_dc_client().\
|
||||
get_peer_group_association_with_peer_id_and_pg_id.\
|
||||
return_value = {'id': FAKE_SITE1_ASSOCIATION_ID}
|
||||
self.ctx,
|
||||
self.association.id,
|
||||
sync_status=consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
)
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.return_value = {
|
||||
"id": FAKE_SITE1_PEER_GROUP_ID
|
||||
}
|
||||
self.mock_get_peer_dc_client().get_system_peer.return_value = {
|
||||
"id": FAKE_SITE1_SYSTEM_PEER_ID
|
||||
}
|
||||
peer_dc_client = self.mock_get_peer_dc_client()
|
||||
peer_group_assoc = (
|
||||
peer_dc_client.get_peer_group_association_with_peer_id_and_pg_id
|
||||
)
|
||||
peer_group_assoc.return_value = {"id": FAKE_SITE1_ASSOCIATION_ID}
|
||||
|
||||
# Test the case where the association sync_status is unknown
|
||||
self.peer_monitor._update_sync_status_when_secondary_site_becomes_reachable()
|
||||
self.peer_monitor._update_sync_status_secondary_site_becomes_reachable()
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group.assert_called_once()
|
||||
self.mock_get_peer_dc_client().get_system_peer.assert_called_once_with(
|
||||
FAKE_SITE0_SYSTEM_UUID)
|
||||
self.mock_get_peer_dc_client().\
|
||||
get_peer_group_association_with_peer_id_and_pg_id.\
|
||||
assert_called_once_with(FAKE_SITE1_SYSTEM_PEER_ID,
|
||||
FAKE_SITE1_PEER_GROUP_ID)
|
||||
self.mock_get_peer_dc_client().update_peer_group_association_sync_status.\
|
||||
assert_called_once_with(FAKE_SITE1_ASSOCIATION_ID,
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC)
|
||||
FAKE_SITE0_SYSTEM_UUID
|
||||
)
|
||||
peer_group_assoc.assert_called_once_with(
|
||||
FAKE_SITE1_SYSTEM_PEER_ID, FAKE_SITE1_PEER_GROUP_ID
|
||||
)
|
||||
update_peer_group_association_sync_status = (
|
||||
self.mock_get_peer_dc_client().update_peer_group_association_sync_status
|
||||
)
|
||||
update_peer_group_association_sync_status.assert_called_once_with(
|
||||
FAKE_SITE1_ASSOCIATION_ID, consts.ASSOCIATION_SYNC_STATUS_IN_SYNC
|
||||
)
|
||||
|
||||
association_new = db_api.peer_group_association_get(
|
||||
self.ctx, self.association.id)
|
||||
self.assertEqual(consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
association_new.sync_status)
|
||||
self.ctx, self.association.id
|
||||
)
|
||||
self.assertEqual(
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC, association_new.sync_status
|
||||
)
|
||||
|
||||
def test_update_sync_status_unreachable_non_primary(self):
|
||||
association = test_system_peer_manager.TestSystemPeerManager.\
|
||||
create_peer_group_association_static(
|
||||
self.ctx, system_peer_id=self.peer.id,
|
||||
peer_group_id=self.peer_group2.id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY)
|
||||
association = self.system_peer_manager.create_peer_group_association_static(
|
||||
self.ctx,
|
||||
system_peer_id=self.peer.id,
|
||||
peer_group_id=self.peer_group2.id,
|
||||
association_type=consts.ASSOCIATION_TYPE_NON_PRIMARY,
|
||||
)
|
||||
|
||||
# Test the case where the association is non-primary
|
||||
self.peer_monitor.\
|
||||
_update_sync_status_when_secondary_site_becomes_unreachable()
|
||||
association_new = db_api.peer_group_association_get(
|
||||
self.ctx, association.id)
|
||||
self.assertEqual(consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
association_new.sync_status)
|
||||
self.peer_monitor._update_sync_status_secondary_site_becomes_unreachable()
|
||||
association_new = db_api.peer_group_association_get(self.ctx, association.id)
|
||||
self.assertEqual(
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC, association_new.sync_status
|
||||
)
|
||||
|
||||
def test_update_sync_status_unreachable_sync_status(self):
|
||||
test_cases = [
|
||||
(consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED),
|
||||
(consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN),
|
||||
(consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN),
|
||||
(consts.ASSOCIATION_SYNC_STATUS_SYNCING,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED),
|
||||
(consts.ASSOCIATION_SYNC_STATUS_FAILED,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED)
|
||||
(
|
||||
consts.ASSOCIATION_SYNC_STATUS_OUT_OF_SYNC,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED,
|
||||
),
|
||||
(
|
||||
consts.ASSOCIATION_SYNC_STATUS_IN_SYNC,
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
),
|
||||
(
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
consts.ASSOCIATION_SYNC_STATUS_UNKNOWN,
|
||||
),
|
||||
(
|
||||
consts.ASSOCIATION_SYNC_STATUS_SYNCING,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED,
|
||||
),
|
||||
(
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED,
|
||||
consts.ASSOCIATION_SYNC_STATUS_FAILED,
|
||||
),
|
||||
]
|
||||
for initial_status, expected_status in test_cases:
|
||||
db_api.peer_group_association_update(
|
||||
self.ctx, self.association.id,
|
||||
sync_status=initial_status)
|
||||
self.ctx, self.association.id, sync_status=initial_status
|
||||
)
|
||||
|
||||
self.peer_monitor.\
|
||||
_update_sync_status_when_secondary_site_becomes_unreachable()
|
||||
self.peer_monitor._update_sync_status_secondary_site_becomes_unreachable()
|
||||
association_new = db_api.peer_group_association_get(
|
||||
self.ctx, self.association.id)
|
||||
self.ctx, self.association.id
|
||||
)
|
||||
self.assertEqual(expected_status, association_new.sync_status)
|
||||
|
||||
def test_peer_group_audit_notify_error(self):
|
||||
payload = {'peer_uuid': 100}
|
||||
payload = {"peer_uuid": 100}
|
||||
self.peer_monitor_manager.peer_group_audit_notify(
|
||||
self.ctx, self.peer_group1.peer_group_name, payload)
|
||||
self.ctx, self.peer_group1.peer_group_name, payload
|
||||
)
|
||||
self.mock_log.exception.assert_called_once_with(
|
||||
'Handling peer group audit notify error: System Peer with '
|
||||
f'peer_uuid {payload["peer_uuid"]} doesn\'t exist.')
|
||||
"Handling peer group audit notify error: "
|
||||
f"System Peer with peer_uuid {payload.get('peer_uuid')} doesn't exist."
|
||||
)
|
||||
|
||||
def test_peer_group_audit_notify_skip_audit(self):
|
||||
payload = {'peer_uuid': self.peer.peer_uuid}
|
||||
payload = {"peer_uuid": self.peer.peer_uuid}
|
||||
self.peer_monitor_manager.peer_group_audit_notify(
|
||||
self.ctx, self.peer_group1.peer_group_name, payload)
|
||||
msg = f'System peer with UUID={self.peer.peer_uuid} '\
|
||||
'is not under monitoring. Skipping audit for peer group '\
|
||||
f'{self.peer_group1.peer_group_name}'
|
||||
self.ctx, self.peer_group1.peer_group_name, payload
|
||||
)
|
||||
msg = (
|
||||
f"System peer with UUID={self.peer.peer_uuid} is not under monitoring. "
|
||||
f"Skipping audit for peer group {self.peer_group1.peer_group_name}"
|
||||
)
|
||||
self.mock_log.warning.assert_called_once_with(msg)
|
||||
|
||||
@mock.patch.object(threading, 'Thread')
|
||||
@mock.patch.object(threading, "Thread")
|
||||
def test_peer_monitor_notify(self, mock_thread_start):
|
||||
# Test to create peer monitor task
|
||||
self.peer_monitor_manager.peer_monitor_notify(self.ctx)
|
||||
Calls = [mock.call.debug('PeerMonitorManager initialization...'),
|
||||
mock.call.info('Caught peer monitor notify...'),
|
||||
mock.call.info(
|
||||
f'Create monitoring thread for peer: {self.peer.peer_name}'),
|
||||
mock.call.info(
|
||||
f'New peer group [{self.peer.id}] '
|
||||
f'found for peer [{self.peer.peer_name}]')]
|
||||
Calls = [
|
||||
mock.call.debug("PeerMonitorManager initialization..."),
|
||||
mock.call.info("Caught peer monitor notify..."),
|
||||
mock.call.info(f"Create monitoring thread for peer: {self.peer.peer_name}"),
|
||||
mock.call.info(
|
||||
f"New peer group [{self.peer.id}] "
|
||||
f"found for peer [{self.peer.peer_name}]"
|
||||
),
|
||||
]
|
||||
self.mock_log.assert_has_calls(Calls)
|
||||
|
||||
@mock.patch.object(threading, 'Thread')
|
||||
@mock.patch.object(threading, "Thread")
|
||||
def test_peer_monitor_notify_delete(self, mock_thread_start):
|
||||
obj = peer_group_audit_manager.\
|
||||
PeerGroupAuditManager(mock.MagicMock(), self.peer_group1.id)
|
||||
obj = peer_group_audit_manager.PeerGroupAuditManager(
|
||||
mock.MagicMock(), self.peer_group1.id
|
||||
)
|
||||
self.peer_monitor.peer_group_audit_obj_map = {self.peer_group1.id: obj}
|
||||
self.peer_monitor.thread = mock.MagicMock()
|
||||
self.peer_monitor_manager.peer_monitor_thread_map = {'2': self.peer_monitor}
|
||||
self.peer_monitor_manager.peer_monitor_thread_map = {"2": self.peer_monitor}
|
||||
|
||||
# Test to delete peer monitor task
|
||||
self.peer_monitor_manager.peer_monitor_notify(self.ctx)
|
||||
self.assertNotIn(2, self.peer_monitor_manager.peer_monitor_thread_map)
|
||||
|
||||
@mock.patch.object(threading.Event, 'wait')
|
||||
@mock.patch.object(threading.Event, "wait")
|
||||
def test_do_monitor_peer_clear_alarm(self, mock_event):
|
||||
self.peer_monitor.fm_api = mock.MagicMock()
|
||||
obj = peer_group_audit_manager.\
|
||||
PeerGroupAuditManager(self, self.peer_group1.id)
|
||||
obj = peer_group_audit_manager.PeerGroupAuditManager(self, self.peer_group1.id)
|
||||
self.peer_monitor.peer_group_audit_obj_map = {self.peer_group1.id: obj}
|
||||
mock_event.side_effect = [False, False, True]
|
||||
self.peer_monitor._do_monitor_peer()
|
||||
self.peer_monitor.fm_api.clear_fault.assert_called_once()
|
||||
|
||||
@mock.patch.object(threading.Event, 'wait')
|
||||
@mock.patch.object(threading.Event, "wait")
|
||||
def test_do_monitor_peer_problem_clearing_fault(self, mock_event):
|
||||
self.peer_monitor.fm_api.get_fault = \
|
||||
mock.MagicMock(side_effect=Exception('boom'))
|
||||
self.peer_monitor.fm_api.get_fault = mock.MagicMock(
|
||||
side_effect=Exception("boom")
|
||||
)
|
||||
mock_event.side_effect = [False, False, True]
|
||||
self.peer_monitor._do_monitor_peer()
|
||||
self.mock_log.exception.assert_called_with(
|
||||
f'Problem clearing fault for peer {self.peer.peer_uuid}, '
|
||||
f'alarm_id={fm_const.FM_ALARM_ID_DC_SYSTEM_PEER_HEARTBEAT_FAILED}'
|
||||
' error: boom'
|
||||
f"Problem clearing fault for peer {self.peer.peer_uuid}, alarm_id="
|
||||
f"{fm_const.FM_ALARM_ID_DC_SYSTEM_PEER_HEARTBEAT_FAILED} error: boom"
|
||||
)
|
||||
|
||||
@mock.patch.object(threading.Event, 'wait')
|
||||
@mock.patch.object(threading.Event, "wait")
|
||||
def test_do_monitor_peer_raising_alarm(self, mock_event):
|
||||
self.peer_monitor.fm_api = mock.MagicMock()
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group_list.\
|
||||
side_effect = Exception("Mocked exception")
|
||||
self.mock_get_peer_dc_client().get_subcloud_peer_group_list.side_effect = (
|
||||
Exception("Mocked exception")
|
||||
)
|
||||
mock_event.side_effect = [False, False, False, True]
|
||||
self.peer_monitor_manager.peer_monitor_thread_map = {'1': self.peer_monitor}
|
||||
self.peer_monitor_manager.peer_monitor_thread_map = {"1": self.peer_monitor}
|
||||
|
||||
# heartbeat_failure_threshold reached.
|
||||
self.peer_monitor._do_monitor_peer()
|
||||
ret_system_peer = db_api.system_peer_get_by_uuid(
|
||||
self.ctx, self.peer.peer_uuid)
|
||||
self.assertEqual(consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE,
|
||||
ret_system_peer.availability_state)
|
||||
ret_system_peer = db_api.system_peer_get_by_uuid(self.ctx, self.peer.peer_uuid)
|
||||
self.assertEqual(
|
||||
consts.SYSTEM_PEER_AVAILABILITY_STATE_UNAVAILABLE,
|
||||
ret_system_peer.availability_state,
|
||||
)
|
||||
self.peer_monitor.fm_api.set_fault.assert_called_once()
|
||||
|
||||
@mock.patch.object(peer_monitor_manager.PeerMonitor,
|
||||
'_heartbeat_check_via_get_peer_group_list')
|
||||
@mock.patch.object(threading.Event, 'wait')
|
||||
@mock.patch.object(
|
||||
peer_monitor_manager.PeerMonitor, "_heartbeat_check_via_get_peer_group_list"
|
||||
)
|
||||
@mock.patch.object(threading.Event, "wait")
|
||||
def test_do_monitor_peer_exception(
|
||||
self, mock_event, mock_heartbeat_check_via_get_peer_group_list
|
||||
):
|
||||
mock_heartbeat_check_via_get_peer_group_list.side_effect = Exception('boom')
|
||||
mock_heartbeat_check_via_get_peer_group_list.side_effect = Exception("boom")
|
||||
mock_event.side_effect = [False, False, True]
|
||||
self.peer_monitor._do_monitor_peer()
|
||||
self.mock_log.exception.assert_called_with(
|
||||
'Got exception monitoring peer PeerSite1 error: boom'
|
||||
"Got exception monitoring peer PeerSite1 error: boom"
|
||||
)
|
||||
|
||||
def test_heartbeat_check_via_get_peer_group_list_pg_not_found(self):
|
||||
@ -287,8 +327,9 @@ class TestPeerMonitor(base.DCManagerTestCase):
|
||||
ret = self.peer_monitor._heartbeat_check_via_get_peer_group_list()
|
||||
self.mock_get_peer_dc_client.assert_called()
|
||||
self.mock_log.warning.assert_called_once_with(
|
||||
'Resource subcloud peer group of dc:'
|
||||
'http://128.128.128.128:5000/v3 not found')
|
||||
"Resource subcloud peer group of dc:"
|
||||
"http://128.128.128.128:5000/v3 not found"
|
||||
)
|
||||
self.assertEqual((False, []), ret)
|
||||
|
||||
def test_audit_specific_local_peer_group(self):
|
||||
|
@ -26,7 +26,7 @@ from dcmanager.common import consts
|
||||
from dcmanager.manager import service
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
|
||||
sys.modules['fm_core'] = mock.Mock()
|
||||
sys.modules["fm_core"] = mock.Mock()
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@ -35,8 +35,7 @@ class BaseTestDCManagerService(DCManagerTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.service_obj = service.DCManagerService('dcmanager',
|
||||
'dcmanager')
|
||||
self.service_obj = service.DCManagerService("dcmanager", "dcmanager")
|
||||
self.payload = {}
|
||||
self._mock_audit_rpc_client()
|
||||
self._mock_subcloud_manager(service)
|
||||
@ -44,14 +43,14 @@ class BaseTestDCManagerService(DCManagerTestCase):
|
||||
|
||||
|
||||
class TestDCManagerServiceInit(BaseTestDCManagerService):
|
||||
"""Test class for testing init managers in DCManagerService """
|
||||
"""Test class for testing init managers in DCManagerService"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
def test_init(self):
|
||||
self.assertEqual(self.service_obj.host, 'localhost')
|
||||
self.assertEqual(self.service_obj.topic, 'dcmanager')
|
||||
self.assertEqual(self.service_obj.host, "localhost")
|
||||
self.assertEqual(self.service_obj.topic, "dcmanager")
|
||||
|
||||
def test_init_managers(self):
|
||||
self.service_obj.init_managers()
|
||||
@ -60,7 +59,7 @@ class TestDCManagerServiceInit(BaseTestDCManagerService):
|
||||
self.assertIsNotNone(self.service_obj.peer_monitor_manager)
|
||||
|
||||
|
||||
@mock.patch.object(service, 'rpc_messaging')
|
||||
@mock.patch.object(service, "rpc_messaging")
|
||||
class TestDCManagerService(BaseTestDCManagerService):
|
||||
"""Test class for testing DCManagerService"""
|
||||
|
||||
@ -71,7 +70,8 @@ class TestDCManagerService(BaseTestDCManagerService):
|
||||
os.path.isdir = mock.Mock(return_value=True)
|
||||
self.service_obj.start()
|
||||
mock_rpc.get_rpc_server.assert_called_once_with(
|
||||
self.service_obj.target, self.service_obj)
|
||||
self.service_obj.target, self.service_obj
|
||||
)
|
||||
mock_rpc.get_rpc_server().start.assert_called_once()
|
||||
|
||||
|
||||
@ -82,188 +82,237 @@ class TestSubcloudManager(BaseTestDCManagerService):
|
||||
super().setUp()
|
||||
|
||||
def test_add_subcloud(self):
|
||||
payload = {'name': 'testname',
|
||||
'region_name': uuidutils.generate_uuid().replace("-", "")}
|
||||
payload = {
|
||||
"name": "testname",
|
||||
"region_name": uuidutils.generate_uuid().replace("-", ""),
|
||||
}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.add_subcloud(
|
||||
self.ctx, subcloud_id=1, payload=payload)
|
||||
self.mock_subcloud_manager().add_subcloud.\
|
||||
assert_called_once_with(self.ctx, 1, payload)
|
||||
self.service_obj.add_subcloud(self.ctx, subcloud_id=1, payload=payload)
|
||||
self.mock_subcloud_manager().add_subcloud.assert_called_once_with(
|
||||
self.ctx, 1, payload
|
||||
)
|
||||
|
||||
def test_add_secondary_subcloud(self):
|
||||
payload = {'name': 'testname',
|
||||
'region_name': uuidutils.generate_uuid().replace("-", "")}
|
||||
payload = {
|
||||
"name": "testname",
|
||||
"region_name": uuidutils.generate_uuid().replace("-", ""),
|
||||
}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.add_secondary_subcloud(
|
||||
self.ctx, subcloud_id=2, payload=payload)
|
||||
self.mock_subcloud_manager().add_subcloud.\
|
||||
assert_called_once_with(self.ctx, 2, payload)
|
||||
self.ctx, subcloud_id=2, payload=payload
|
||||
)
|
||||
self.mock_subcloud_manager().add_subcloud.assert_called_once_with(
|
||||
self.ctx, 2, payload
|
||||
)
|
||||
|
||||
def test_delete_subcloud(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.delete_subcloud(
|
||||
self.ctx, subcloud_id=1)
|
||||
self.mock_subcloud_manager().delete_subcloud.\
|
||||
assert_called_once_with(self.ctx, 1)
|
||||
self.service_obj.delete_subcloud(self.ctx, subcloud_id=1)
|
||||
self.mock_subcloud_manager().delete_subcloud.assert_called_once_with(
|
||||
self.ctx, 1
|
||||
)
|
||||
|
||||
def test_rename_subcloud(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.rename_subcloud(
|
||||
self.ctx, subcloud_id=1, curr_subcloud_name='fake_subcloud',
|
||||
new_subcloud_name='subcloud1')
|
||||
self.mock_subcloud_manager().rename_subcloud.\
|
||||
assert_called_once_with(self.ctx, 1, 'fake_subcloud', 'subcloud1')
|
||||
self.ctx,
|
||||
subcloud_id=1,
|
||||
curr_subcloud_name="fake_subcloud",
|
||||
new_subcloud_name="subcloud1",
|
||||
)
|
||||
self.mock_subcloud_manager().rename_subcloud.assert_called_once_with(
|
||||
self.ctx, 1, "fake_subcloud", "subcloud1"
|
||||
)
|
||||
|
||||
def test_get_subcloud_name_by_region_name(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.get_subcloud_name_by_region_name(
|
||||
self.ctx, subcloud_region='test_region')
|
||||
self.mock_subcloud_manager().get_subcloud_name_by_region_name.\
|
||||
assert_called_once_with(self.ctx, 'test_region')
|
||||
self.ctx, subcloud_region="test_region"
|
||||
)
|
||||
get_subcloud_name = (
|
||||
self.mock_subcloud_manager().get_subcloud_name_by_region_name
|
||||
)
|
||||
get_subcloud_name.assert_called_once_with(self.ctx, "test_region")
|
||||
|
||||
def test_update_subcloud(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.update_subcloud(
|
||||
self.ctx, subcloud_id=1,
|
||||
management_state='testmgmtstatus')
|
||||
self.ctx, subcloud_id=1, management_state="testmgmtstatus"
|
||||
)
|
||||
self.mock_subcloud_manager().update_subcloud.assert_called_once_with(
|
||||
self.ctx, 1, 'testmgmtstatus', None, None, None, None, None, None,
|
||||
None, None, None
|
||||
self.ctx,
|
||||
1,
|
||||
"testmgmtstatus",
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
|
||||
def test_update_subcloud_with_network_reconfig(self):
|
||||
payload = {'name': 'testname',
|
||||
'bootstrap-address': "10.10.10.12"}
|
||||
payload = {"name": "testname", "bootstrap-address": "10.10.10.12"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.update_subcloud_with_network_reconfig(
|
||||
self.ctx, subcloud_id=1, payload=payload)
|
||||
self.mock_subcloud_manager().update_subcloud_with_network_reconfig.\
|
||||
assert_called_once_with(self.ctx, 1, payload)
|
||||
self.ctx, subcloud_id=1, payload=payload
|
||||
)
|
||||
update_subcloud_with_network_reconfig = (
|
||||
self.mock_subcloud_manager().update_subcloud_with_network_reconfig
|
||||
)
|
||||
update_subcloud_with_network_reconfig.assert_called_once_with(
|
||||
self.ctx, 1, payload
|
||||
)
|
||||
|
||||
def test_redeploy_subcloud(self):
|
||||
payload = {'DEPLOY_PHASE_CONFIG': 'configure'}
|
||||
payload = {"DEPLOY_PHASE_CONFIG": "configure"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.redeploy_subcloud(
|
||||
self.ctx, subcloud_id=1, payload=payload)
|
||||
self.service_obj.redeploy_subcloud(self.ctx, subcloud_id=1, payload=payload)
|
||||
self.mock_subcloud_manager().redeploy_subcloud.assert_called_once_with(
|
||||
self.ctx, 1, payload)
|
||||
self.ctx, 1, payload
|
||||
)
|
||||
|
||||
def test_backup_subclouds(self):
|
||||
payload = {'subcloud': 'subcloud1'}
|
||||
payload = {"subcloud": "subcloud1"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.backup_subclouds(
|
||||
self.ctx, payload=payload)
|
||||
self.service_obj.backup_subclouds(self.ctx, payload=payload)
|
||||
self.mock_subcloud_manager().create_subcloud_backups.assert_called_once_with(
|
||||
self.ctx, payload)
|
||||
self.ctx, payload
|
||||
)
|
||||
|
||||
def test_delete_subcloud_backups(self):
|
||||
payload = {'subcloud': 'subcloud2'}
|
||||
payload = {"subcloud": "subcloud2"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.delete_subcloud_backups(
|
||||
self.ctx, release_version=23.09, payload=payload)
|
||||
self.ctx, release_version=23.09, payload=payload
|
||||
)
|
||||
self.mock_subcloud_manager().delete_subcloud_backups.assert_called_once_with(
|
||||
self.ctx, 23.09, payload)
|
||||
self.ctx, 23.09, payload
|
||||
)
|
||||
|
||||
def test_restore_subcloud_backups(self):
|
||||
payload = {'subcloud': 'subcloud2'}
|
||||
payload = {"subcloud": "subcloud2"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.restore_subcloud_backups(
|
||||
self.ctx, payload=payload)
|
||||
self.mock_subcloud_manager().restore_subcloud_backups.\
|
||||
assert_called_once_with(self.ctx, payload)
|
||||
self.service_obj.restore_subcloud_backups(self.ctx, payload=payload)
|
||||
self.mock_subcloud_manager().restore_subcloud_backups.assert_called_once_with(
|
||||
self.ctx, payload
|
||||
)
|
||||
|
||||
def test_update_subcloud_sync_endpoint_type(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.update_subcloud_sync_endpoint_type(
|
||||
self.ctx, subcloud_name='testname',
|
||||
self.ctx,
|
||||
subcloud_name="testname",
|
||||
endpoint_type_list=dccommon_consts.ENDPOINT_TYPES_LIST_OS,
|
||||
openstack_installed=True)
|
||||
self.mock_subcloud_manager().update_subcloud_sync_endpoint_type.\
|
||||
assert_called_once_with(self.ctx,
|
||||
'testname',
|
||||
dccommon_consts.ENDPOINT_TYPES_LIST_OS,
|
||||
True)
|
||||
openstack_installed=True,
|
||||
)
|
||||
update_subcloud_sync_endpoint_type = (
|
||||
self.mock_subcloud_manager().update_subcloud_sync_endpoint_type
|
||||
)
|
||||
update_subcloud_sync_endpoint_type.assert_called_once_with(
|
||||
self.ctx, "testname", dccommon_consts.ENDPOINT_TYPES_LIST_OS, True
|
||||
)
|
||||
|
||||
def test_prestage_subcloud(self):
|
||||
payload = {'subcloud_name': 'subcloud1'}
|
||||
payload = {"subcloud_name": "subcloud1"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.prestage_subcloud(
|
||||
self.ctx, payload=payload)
|
||||
self.service_obj.prestage_subcloud(self.ctx, payload=payload)
|
||||
self.mock_subcloud_manager().prestage_subcloud.assert_called_once_with(
|
||||
self.ctx, payload)
|
||||
self.ctx, payload
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_create(self):
|
||||
payload = {'name': 'subcloud1'}
|
||||
payload = {"name": "subcloud1"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_create(
|
||||
self.ctx, subcloud_id=1, payload=payload)
|
||||
self.ctx, subcloud_id=1, payload=payload
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_create.assert_called_once_with(
|
||||
self.ctx, 1, payload)
|
||||
self.ctx, 1, payload
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_bootstrap(self):
|
||||
payload = {'name': 'subcloud1'}
|
||||
payload = {"name": "subcloud1"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_bootstrap(
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True)
|
||||
self.mock_subcloud_manager().subcloud_deploy_bootstrap.\
|
||||
assert_called_once_with(self.ctx, 1, payload, True)
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_bootstrap.assert_called_once_with(
|
||||
self.ctx, 1, payload, True
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_config(self):
|
||||
payload = {'name': 'testname'}
|
||||
payload = {"name": "testname"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_config(
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True)
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_config.assert_called_once_with(
|
||||
self.ctx, 1, payload, True)
|
||||
self.ctx, 1, payload, True
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_install(self):
|
||||
payload = {'name': 'testname'}
|
||||
payload = {"name": "testname"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_install(
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True)
|
||||
self.ctx, subcloud_id=1, payload=payload, initial_deployment=True
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_install.assert_called_once_with(
|
||||
self.ctx, 1, payload, True)
|
||||
self.ctx, 1, payload, True
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_complete(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_complete(
|
||||
self.ctx, subcloud_id=1)
|
||||
self.mock_subcloud_manager().subcloud_deploy_complete.\
|
||||
assert_called_once_with(self.ctx, 1)
|
||||
self.service_obj.subcloud_deploy_complete(self.ctx, subcloud_id=1)
|
||||
self.mock_subcloud_manager().subcloud_deploy_complete.assert_called_once_with(
|
||||
self.ctx, 1
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_abort(self):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_abort(
|
||||
self.ctx, subcloud_id=1,
|
||||
deploy_status=consts.DEPLOY_STATE_ABORTING_CONFIG)
|
||||
self.ctx, subcloud_id=1, deploy_status=consts.DEPLOY_STATE_ABORTING_CONFIG
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_abort.assert_called_once_with(
|
||||
self.ctx, 1, consts.DEPLOY_STATE_ABORTING_CONFIG)
|
||||
self.ctx, 1, consts.DEPLOY_STATE_ABORTING_CONFIG
|
||||
)
|
||||
|
||||
def test_subcloud_deploy_resume(self):
|
||||
|
||||
deploy_states_to_run = [consts.DEPLOY_PHASE_INSTALL,
|
||||
consts.DEPLOY_PHASE_BOOTSTRAP,
|
||||
consts.DEPLOY_PHASE_CONFIG]
|
||||
deploy_states_to_run = [
|
||||
consts.DEPLOY_PHASE_INSTALL,
|
||||
consts.DEPLOY_PHASE_BOOTSTRAP,
|
||||
consts.DEPLOY_PHASE_CONFIG,
|
||||
]
|
||||
|
||||
fake_payload = {'fake_payload_install': 'fake_install_values',
|
||||
'fake_payload_bootstrap': 'fake_bootstrap_values',
|
||||
'fake_payload_config': 'fake_config'}
|
||||
fake_payload = {
|
||||
"fake_payload_install": "fake_install_values",
|
||||
"fake_payload_bootstrap": "fake_bootstrap_values",
|
||||
"fake_payload_config": "fake_config",
|
||||
}
|
||||
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.subcloud_deploy_resume(
|
||||
self.ctx, subcloud_id=1, subcloud_name='testname',
|
||||
self.ctx,
|
||||
subcloud_id=1,
|
||||
subcloud_name="testname",
|
||||
payload=fake_payload,
|
||||
deploy_states_to_run=deploy_states_to_run)
|
||||
deploy_states_to_run=deploy_states_to_run,
|
||||
)
|
||||
self.mock_subcloud_manager().subcloud_deploy_resume.assert_called_once_with(
|
||||
self.ctx, 1, 'testname', fake_payload, deploy_states_to_run)
|
||||
self.ctx, 1, "testname", fake_payload, deploy_states_to_run
|
||||
)
|
||||
|
||||
def test_batch_migrate_subcloud(self):
|
||||
payload = {'peer_group': 'fake_peer_group'}
|
||||
payload = {"peer_group": "fake_peer_group"}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.batch_migrate_subcloud(
|
||||
self.ctx, payload=payload)
|
||||
self.service_obj.batch_migrate_subcloud(self.ctx, payload=payload)
|
||||
self.mock_subcloud_manager().batch_migrate_subcloud.assert_called_once_with(
|
||||
self.ctx, payload)
|
||||
self.ctx, payload
|
||||
)
|
||||
|
||||
|
||||
class TestPeerMonitorManager(BaseTestDCManagerService):
|
||||
@ -276,19 +325,24 @@ class TestPeerMonitorManager(BaseTestDCManagerService):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.peer_monitor_notify(self.ctx)
|
||||
self.mock_peer_monitor_manager().peer_monitor_notify.assert_called_once_with(
|
||||
self.ctx)
|
||||
self.ctx
|
||||
)
|
||||
|
||||
def test_peer_group_audit_notify(self):
|
||||
payload = {'peer_uuid': 2}
|
||||
payload = {"peer_uuid": 2}
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.peer_group_audit_notify(
|
||||
self.ctx, peer_group_name='fake_peer_group', payload=payload)
|
||||
self.mock_peer_monitor_manager().peer_group_audit_notify.\
|
||||
assert_called_once_with(self.ctx,
|
||||
'fake_peer_group', payload)
|
||||
self.ctx, peer_group_name="fake_peer_group", payload=payload
|
||||
)
|
||||
peer_group_audit_notify = (
|
||||
self.mock_peer_monitor_manager().peer_group_audit_notify
|
||||
)
|
||||
peer_group_audit_notify.assert_called_once_with(
|
||||
self.ctx, "fake_peer_group", payload
|
||||
)
|
||||
|
||||
|
||||
@mock.patch.object(service, 'SystemPeerManager')
|
||||
@mock.patch.object(service, "SystemPeerManager")
|
||||
class TestSystemPeerManager(BaseTestDCManagerService):
|
||||
"""Test class for testing SystemPeerManager"""
|
||||
|
||||
@ -298,13 +352,16 @@ class TestSystemPeerManager(BaseTestDCManagerService):
|
||||
def test_sync_subcloud_peer_group(self, mock_system_peer_manager):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.sync_subcloud_peer_group(
|
||||
self.ctx, association_id=2, sync_subclouds=True)
|
||||
mock_system_peer_manager().sync_subcloud_peer_group.\
|
||||
assert_called_once_with(self.ctx, 2, True)
|
||||
self.ctx, association_id=2, sync_subclouds=True
|
||||
)
|
||||
mock_system_peer_manager().sync_subcloud_peer_group.assert_called_once_with(
|
||||
self.ctx, 2, True
|
||||
)
|
||||
|
||||
def test_delete_peer_group_association(self, mock_system_peer_manager):
|
||||
self.service_obj.init_managers()
|
||||
self.service_obj.delete_peer_group_association(
|
||||
self.ctx, association_id=2)
|
||||
mock_system_peer_manager().delete_peer_group_association.\
|
||||
assert_called_once_with(self.ctx, 2)
|
||||
self.service_obj.delete_peer_group_association(self.ctx, association_id=2)
|
||||
delete_peer_group_association = (
|
||||
mock_system_peer_manager().delete_peer_group_association
|
||||
)
|
||||
delete_peer_group_association.assert_called_once_with(self.ctx, 2)
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -27,38 +27,46 @@ class TestUtils(base.DCManagerTestCase):
|
||||
|
||||
def test_has_network_reconfig_same_values(self):
|
||||
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
payload = {"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"systemcontroller_gateway_address": "192.168.204.101"}
|
||||
payload = {
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"systemcontroller_gateway_address": "192.168.204.101",
|
||||
}
|
||||
result = utils.has_network_reconfig(payload, subcloud)
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_has_network_reconfig_different_subnet(self):
|
||||
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
payload = {"management_subnet": "192.168.102.0/24",
|
||||
"management_gateway_address": "192.168.102.1",
|
||||
"management_start_address": "192.168.102.2",
|
||||
"management_end_address": "192.168.102.50"}
|
||||
payload = {
|
||||
"management_subnet": "192.168.102.0/24",
|
||||
"management_gateway_address": "192.168.102.1",
|
||||
"management_start_address": "192.168.102.2",
|
||||
"management_end_address": "192.168.102.50",
|
||||
}
|
||||
result = utils.has_network_reconfig(payload, subcloud)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_has_network_reconfig_different_start_address(self):
|
||||
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
payload = {"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.5",
|
||||
"management_start_address": "192.168.101.7",
|
||||
"management_end_address": "192.168.101.50"}
|
||||
payload = {
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.5",
|
||||
"management_start_address": "192.168.101.7",
|
||||
"management_end_address": "192.168.101.50",
|
||||
}
|
||||
result = utils.has_network_reconfig(payload, subcloud)
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_has_network_reconfig_different_sc_gateway(self):
|
||||
subcloud = fake_subcloud.create_fake_subcloud(self.ctx)
|
||||
payload = {"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"systemcontroller_gateway_address": "192.168.204.102"}
|
||||
payload = {
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_address": "192.168.101.1",
|
||||
"management_start_address": "192.168.101.2",
|
||||
"management_end_address": "192.168.101.50",
|
||||
"systemcontroller_gateway_address": "192.168.204.102",
|
||||
}
|
||||
result = utils.has_network_reconfig(payload, subcloud)
|
||||
self.assertTrue(result)
|
||||
|
@ -25,15 +25,16 @@ from dcmanager.tests import base
|
||||
class TestBaseObject(base.DCManagerTestCase):
|
||||
def test_base_class(self):
|
||||
obj = obj_base.DCManagerObject()
|
||||
self.assertEqual(obj_base.DCManagerObject.OBJ_PROJECT_NAMESPACE,
|
||||
obj.OBJ_PROJECT_NAMESPACE)
|
||||
self.assertEqual(obj_base.DCManagerObject.VERSION,
|
||||
obj.VERSION)
|
||||
self.assertEqual(
|
||||
obj_base.DCManagerObject.OBJ_PROJECT_NAMESPACE, obj.OBJ_PROJECT_NAMESPACE
|
||||
)
|
||||
self.assertEqual(obj_base.DCManagerObject.VERSION, obj.VERSION)
|
||||
|
||||
@mock.patch.object(obj_base.DCManagerObject, "obj_reset_changes")
|
||||
def test_from_db_object(self, mock_obj_reset_ch):
|
||||
class TestDCManagerObject(obj_base.DCManagerObject,
|
||||
obj_base.VersionedObjectDictCompat):
|
||||
class TestDCManagerObject(
|
||||
obj_base.DCManagerObject, obj_base.VersionedObjectDictCompat
|
||||
):
|
||||
fields = {
|
||||
"key1": obj_fields.StringField(),
|
||||
"key2": obj_fields.StringField(),
|
||||
|
@ -11,29 +11,31 @@ import mock
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dcmanager.common import consts
|
||||
|
||||
PREVIOUS_PREVIOUS_VERSION = '01.23'
|
||||
PREVIOUS_VERSION = '12.34'
|
||||
UPGRADED_VERSION = '56.78'
|
||||
PREVIOUS_PREVIOUS_VERSION = "01.23"
|
||||
PREVIOUS_VERSION = "12.34"
|
||||
UPGRADED_VERSION = "56.78"
|
||||
|
||||
PREVIOUS_KUBE_VERSION = 'v1.2.3'
|
||||
UPGRADED_KUBE_VERSION = 'v1.2.4'
|
||||
PREVIOUS_KUBE_VERSION = "v1.2.3"
|
||||
UPGRADED_KUBE_VERSION = "v1.2.4"
|
||||
|
||||
FAKE_VENDOR = '8086'
|
||||
FAKE_DEVICE = '0b30'
|
||||
FAKE_VENDOR = "8086"
|
||||
FAKE_DEVICE = "0b30"
|
||||
|
||||
|
||||
class FakeController(object):
|
||||
def __init__(self,
|
||||
host_id=1,
|
||||
hostname='controller-0',
|
||||
administrative=consts.ADMIN_UNLOCKED,
|
||||
operational=consts.OPERATIONAL_ENABLED,
|
||||
availability=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
ihost_action=None,
|
||||
target_load=UPGRADED_VERSION,
|
||||
software_load=PREVIOUS_VERSION,
|
||||
task=None,
|
||||
capabilities={"Personality": "Controller-Active"}):
|
||||
def __init__(
|
||||
self,
|
||||
host_id=1,
|
||||
hostname="controller-0",
|
||||
administrative=consts.ADMIN_UNLOCKED,
|
||||
operational=consts.OPERATIONAL_ENABLED,
|
||||
availability=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
ihost_action=None,
|
||||
target_load=UPGRADED_VERSION,
|
||||
software_load=PREVIOUS_VERSION,
|
||||
task=None,
|
||||
capabilities={"Personality": "Controller-Active"},
|
||||
):
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.id = host_id
|
||||
self.hostname = hostname
|
||||
@ -48,11 +50,9 @@ class FakeController(object):
|
||||
|
||||
|
||||
class FakeDevice(object):
|
||||
def __init__(self,
|
||||
obj_id,
|
||||
pvendor_id=FAKE_VENDOR,
|
||||
pdevice_id=FAKE_DEVICE,
|
||||
enabled=True):
|
||||
def __init__(
|
||||
self, obj_id, pvendor_id=FAKE_VENDOR, pdevice_id=FAKE_DEVICE, enabled=True
|
||||
):
|
||||
self.uuid = obj_id
|
||||
self.pvendor_id = pvendor_id
|
||||
self.pdevice_id = pdevice_id
|
||||
@ -60,13 +60,15 @@ class FakeDevice(object):
|
||||
|
||||
|
||||
class FakeDeviceImage(object):
|
||||
def __init__(self,
|
||||
obj_id,
|
||||
pci_vendor=FAKE_VENDOR,
|
||||
pci_device=FAKE_DEVICE,
|
||||
bitstream_type='functional',
|
||||
applied=False,
|
||||
applied_labels=None):
|
||||
def __init__(
|
||||
self,
|
||||
obj_id,
|
||||
pci_vendor=FAKE_VENDOR,
|
||||
pci_device=FAKE_DEVICE,
|
||||
bitstream_type="functional",
|
||||
applied=False,
|
||||
applied_labels=None,
|
||||
):
|
||||
self.uuid = obj_id
|
||||
self.pci_vendor = pci_vendor
|
||||
self.pci_device = pci_device
|
||||
@ -76,10 +78,7 @@ class FakeDeviceImage(object):
|
||||
|
||||
|
||||
class FakeDeviceLabel(object):
|
||||
def __init__(self,
|
||||
label_key=None,
|
||||
label_value=None,
|
||||
pcidevice_uuid=None):
|
||||
def __init__(self, label_key=None, label_value=None, pcidevice_uuid=None):
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.label_key = label_key
|
||||
self.label_value = label_value
|
||||
@ -87,10 +86,7 @@ class FakeDeviceLabel(object):
|
||||
|
||||
|
||||
class FakeHostFilesystem(object):
|
||||
def __init__(self,
|
||||
name='scratch',
|
||||
logical_volume='scratch-lv',
|
||||
size=16):
|
||||
def __init__(self, name="scratch", logical_volume="scratch-lv", size=16):
|
||||
self.name = name
|
||||
self.logical_volume = logical_volume
|
||||
self.size = size
|
||||
@ -103,20 +99,20 @@ class FakeKeystoneClient(object):
|
||||
|
||||
|
||||
class FakeKubeRootCaUpdate(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
state='update-started'):
|
||||
def __init__(self, obj_id=1, state="update-started"):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.state = state
|
||||
|
||||
|
||||
class FakeKubeUpgrade(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
from_version=PREVIOUS_KUBE_VERSION,
|
||||
to_version=UPGRADED_KUBE_VERSION,
|
||||
state='upgrade-complete'):
|
||||
def __init__(
|
||||
self,
|
||||
obj_id=1,
|
||||
from_version=PREVIOUS_KUBE_VERSION,
|
||||
to_version=UPGRADED_KUBE_VERSION,
|
||||
state="upgrade-complete",
|
||||
):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.from_version = state
|
||||
@ -125,11 +121,9 @@ class FakeKubeUpgrade(object):
|
||||
|
||||
|
||||
class FakeKubeVersion(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'):
|
||||
def __init__(
|
||||
self, obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.version = version
|
||||
@ -144,14 +138,16 @@ class FakeKubeVersion(object):
|
||||
|
||||
|
||||
class FakeLoad(object):
|
||||
def __init__(self,
|
||||
obj_id,
|
||||
compatible_version='N/A',
|
||||
required_patches='N/A',
|
||||
software_version=PREVIOUS_VERSION,
|
||||
state='active',
|
||||
created_at=None,
|
||||
updated_at=None):
|
||||
def __init__(
|
||||
self,
|
||||
obj_id,
|
||||
compatible_version="N/A",
|
||||
required_patches="N/A",
|
||||
software_version=PREVIOUS_VERSION,
|
||||
state="active",
|
||||
created_at=None,
|
||||
updated_at=None,
|
||||
):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.compatible_version = compatible_version
|
||||
@ -190,20 +186,20 @@ class FakeFmClient(object):
|
||||
|
||||
|
||||
class FakeSystem(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
software_version=UPGRADED_VERSION):
|
||||
def __init__(self, obj_id=1, software_version=UPGRADED_VERSION):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.software_version = software_version
|
||||
|
||||
|
||||
class FakeUpgrade(object):
|
||||
def __init__(self,
|
||||
obj_id=1,
|
||||
state='completed',
|
||||
from_release=PREVIOUS_VERSION,
|
||||
to_release=UPGRADED_VERSION):
|
||||
def __init__(
|
||||
self,
|
||||
obj_id=1,
|
||||
state="completed",
|
||||
from_release=PREVIOUS_VERSION,
|
||||
to_release=UPGRADED_VERSION,
|
||||
):
|
||||
self.id = obj_id
|
||||
self.uuid = str(uuid.uuid4())
|
||||
self.state = state
|
||||
@ -213,8 +209,6 @@ class FakeUpgrade(object):
|
||||
|
||||
|
||||
class FakeAlarm(object):
|
||||
def __init__(self,
|
||||
alarm_id='12.34',
|
||||
mgmt_affecting='False'):
|
||||
def __init__(self, alarm_id="12.34", mgmt_affecting="False"):
|
||||
self.alarm_id = alarm_id
|
||||
self.mgmt_affecting = mgmt_affecting
|
||||
|
@ -11,31 +11,39 @@ from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
from dcmanager.orchestrator.states.firmware import applying_vim_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base \
|
||||
import TestFwUpdateState
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base import (
|
||||
TestFwUpdateState,
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware.applying_vim_strategy."
|
||||
"DEFAULT_MAX_FAILED_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware.applying_vim_strategy."
|
||||
"DEFAULT_MAX_WAIT_ATTEMPTS", 5)
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware.applying_vim_strategy."
|
||||
"WAIT_INTERVAL", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware.applying_vim_strategy."
|
||||
"DEFAULT_MAX_FAILED_QUERIES",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware.applying_vim_strategy."
|
||||
"DEFAULT_MAX_WAIT_ATTEMPTS",
|
||||
5,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware.applying_vim_strategy.WAIT_INTERVAL", 1
|
||||
)
|
||||
class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
||||
# set the next state in the chain (when this state is successful)
|
||||
self.on_success_state = \
|
||||
consts.STRATEGY_STATE_FINISHING_FW_UPDATE
|
||||
self.on_success_state = consts.STRATEGY_STATE_FINISHING_FW_UPDATE
|
||||
|
||||
# Add the subcloud being processed by this unit test
|
||||
self.subcloud = self.setup_subcloud()
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY
|
||||
)
|
||||
|
||||
# Add mock API endpoints for client calls invcked by this state
|
||||
self.vim_client.get_strategy = mock.MagicMock()
|
||||
@ -53,51 +61,57 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
]
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_applying_vim_strategy_raises_exception(self):
|
||||
"""Test applying a VIM strategy that raises an exception"""
|
||||
|
||||
# first api query is before the apply
|
||||
self.vim_client.get_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY)
|
||||
self.vim_client.get_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_READY_TO_APPLY
|
||||
)
|
||||
|
||||
# raise an exception during apply_strategy
|
||||
self.vim_client.apply_strategy.side_effect =\
|
||||
Exception("HTTPBadRequest: this is a fake exception")
|
||||
self.vim_client.apply_strategy.side_effect = Exception(
|
||||
"HTTPBadRequest: this is a fake exception"
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_applying_vim_strategy_fails_apply_immediately(self):
|
||||
"""Test applying a VIM strategy that returns a failed result"""
|
||||
|
||||
# first api query is before the apply
|
||||
self.vim_client.get_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY)
|
||||
self.vim_client.get_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_READY_TO_APPLY
|
||||
)
|
||||
|
||||
# return a failed strategy
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLY_FAILED)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLY_FAILED
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_applying_vim_strategy_fails_apply_later(self):
|
||||
"""Test applying a VIM strategy that starts to apply but then fails"""
|
||||
@ -110,15 +124,17 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
]
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_applying_vim_strategy_timeout(self):
|
||||
"""Test applying a VIM strategy that times out"""
|
||||
@ -126,23 +142,30 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
# first api query is before the apply
|
||||
# test where it never progresses past 'applying'
|
||||
self.vim_client.get_strategy.side_effect = itertools.chain(
|
||||
[self._create_fake_strategy(vim.STATE_READY_TO_APPLY), ],
|
||||
itertools.repeat(self._create_fake_strategy(vim.STATE_APPLYING)))
|
||||
[
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY),
|
||||
],
|
||||
itertools.repeat(self._create_fake_strategy(vim.STATE_APPLYING)),
|
||||
)
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# verify the max number of queries was attempted (plus 1 before loop)
|
||||
self.assertEqual(applying_vim_strategy.DEFAULT_MAX_WAIT_ATTEMPTS + 1,
|
||||
self.vim_client.get_strategy.call_count)
|
||||
self.assertEqual(
|
||||
applying_vim_strategy.DEFAULT_MAX_WAIT_ATTEMPTS + 1,
|
||||
self.vim_client.get_strategy.call_count,
|
||||
)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_applying_vim_strategy_already_applying_and_completes(self):
|
||||
"""Test applying a VIM strategy while one already is applying"""
|
||||
@ -161,8 +184,7 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.apply_strategy.assert_not_called()
|
||||
|
||||
# SUCCESS case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_applying_vim_strategy_already_exists_and_is_broken(self):
|
||||
"""Test applying a VIM strategy while a broken strategy exists"""
|
||||
@ -180,8 +202,9 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.apply_strategy.assert_not_called()
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_applying_vim_strategy_skips_without_subcloud_strategy(self):
|
||||
"""Test applying a VIM strategy skips when there isn't a strategy to apply"""
|
||||
@ -193,11 +216,10 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.apply_strategy.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FINISHING_FW_UPDATE
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FINISHING_FW_UPDATE
|
||||
)
|
||||
|
||||
@mock.patch.object(BaseState, 'stopped', return_value=True)
|
||||
@mock.patch.object(BaseState, "stopped", return_value=True)
|
||||
def test_applying_vim_strategy_fails_when_strategy_stops(self, _):
|
||||
"""Test applying a VIM strategy fails when strategy stops"""
|
||||
|
||||
@ -205,8 +227,9 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY)
|
||||
]
|
||||
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -226,11 +249,12 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self._create_fake_strategy(vim.STATE_APPLYING),
|
||||
Exception(),
|
||||
Exception(),
|
||||
Exception()
|
||||
Exception(),
|
||||
]
|
||||
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -243,11 +267,12 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
|
||||
self.vim_client.get_strategy.side_effect = [
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY),
|
||||
None
|
||||
None,
|
||||
]
|
||||
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -263,8 +288,9 @@ class TestFwUpdateApplyingVIMStrategyStage(TestFwUpdateState):
|
||||
self._create_fake_strategy(vim.STATE_ABORTED),
|
||||
]
|
||||
|
||||
self.vim_client.apply_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLYING)
|
||||
self.vim_client.apply_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLYING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
|
@ -32,14 +32,12 @@ class TestFwUpdateState(TestSwUpdate):
|
||||
str(uuid.uuid4()),
|
||||
pvendor_id=pvendor_id,
|
||||
pdevice_id=pdevice_id,
|
||||
enabled=enabled
|
||||
enabled=enabled,
|
||||
)
|
||||
|
||||
def _create_fake_device_label(self, label_key, label_value, pcidevice_uuid):
|
||||
return FakeDeviceLabel(
|
||||
label_key=label_key,
|
||||
label_value=label_value,
|
||||
pcidevice_uuid=pcidevice_uuid
|
||||
label_key=label_key, label_value=label_value, pcidevice_uuid=pcidevice_uuid
|
||||
)
|
||||
|
||||
def _create_fake_device_image(
|
||||
@ -50,12 +48,8 @@ class TestFwUpdateState(TestSwUpdate):
|
||||
pci_vendor=pci_vendor,
|
||||
pci_device=pci_device,
|
||||
applied=applied,
|
||||
applied_labels=applied_labels
|
||||
applied_labels=applied_labels,
|
||||
)
|
||||
|
||||
def _create_fake_device_image_state(self, pcidevice_uuid, image_uuid, status):
|
||||
return DeviceImageState(
|
||||
pcidevice_uuid,
|
||||
image_uuid,
|
||||
status
|
||||
)
|
||||
return DeviceImageState(pcidevice_uuid, image_uuid, status)
|
||||
|
@ -11,29 +11,36 @@ from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
from dcmanager.orchestrator.states.firmware import creating_vim_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base \
|
||||
import TestFwUpdateState
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base import (
|
||||
TestFwUpdateState,
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware.creating_vim_strategy."
|
||||
"DEFAULT_MAX_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware.creating_vim_strategy."
|
||||
"DEFAULT_SLEEP_DURATION", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware.creating_vim_strategy."
|
||||
"DEFAULT_MAX_QUERIES",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware.creating_vim_strategy."
|
||||
"DEFAULT_SLEEP_DURATION",
|
||||
1,
|
||||
)
|
||||
class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
|
||||
def setUp(self):
|
||||
super(TestFwUpdateCreatingVIMStrategyStage, self).setUp()
|
||||
|
||||
# set the next state in the chain (when this state is successful)
|
||||
self.on_success_state =\
|
||||
consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY
|
||||
self.on_success_state = consts.STRATEGY_STATE_APPLYING_FW_UPDATE_STRATEGY
|
||||
|
||||
# Add the subcloud being processed by this unit test
|
||||
self.subcloud = self.setup_subcloud()
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY
|
||||
)
|
||||
|
||||
# Add mock API endpoints for sysinv client calls invcked by this state
|
||||
self.vim_client.create_strategy = mock.MagicMock()
|
||||
@ -52,15 +59,15 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
]
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_creating_vim_strategy_raises_exception(self):
|
||||
"""Test creating a VIM strategy that raises an exception"""
|
||||
@ -69,15 +76,17 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.get_strategy.return_value = None
|
||||
|
||||
# raise an exception during create_strategy
|
||||
self.vim_client.create_strategy.side_effect =\
|
||||
Exception("HTTPBadRequest: this is a fake exception")
|
||||
self.vim_client.create_strategy.side_effect = Exception(
|
||||
"HTTPBadRequest: this is a fake exception"
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_fails_create_immediately(self):
|
||||
"""Test creating a VIM strategy that returns a failed create"""
|
||||
@ -86,15 +95,17 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.get_strategy.return_value = None
|
||||
|
||||
# return a failed strategy
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILD_FAILED)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILD_FAILED
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_fails_create_later(self):
|
||||
"""Test creating a VIM strategy that starts to build but then fails"""
|
||||
@ -107,39 +118,47 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
]
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_timeout(self):
|
||||
"""Test creating a VIM strategy that times out"""
|
||||
|
||||
# first api query is before the create
|
||||
self.vim_client.get_strategy.side_effect = itertools.chain(
|
||||
[None, ],
|
||||
itertools.repeat(self._create_fake_strategy(vim.STATE_BUILDING))
|
||||
[
|
||||
None,
|
||||
],
|
||||
itertools.repeat(self._create_fake_strategy(vim.STATE_BUILDING)),
|
||||
)
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# verify the max number of queries was attempted (plus 1)
|
||||
self.assertEqual(creating_vim_strategy.DEFAULT_MAX_QUERIES + 1,
|
||||
self.vim_client.get_strategy.call_count)
|
||||
self.assertEqual(
|
||||
creating_vim_strategy.DEFAULT_MAX_QUERIES + 1,
|
||||
self.vim_client.get_strategy.call_count,
|
||||
)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_already_exists_and_completes(self):
|
||||
"""Test creating a VIM strategy while one already exists"""
|
||||
@ -157,8 +176,9 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self._create_fake_strategy(vim.STATE_READY_TO_APPLY),
|
||||
]
|
||||
# The strategy should be deleted and then created
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -168,8 +188,7 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
# create API call should be invoked
|
||||
self.assertEqual(1, self.vim_client.create_strategy.call_count)
|
||||
# SUCCESS case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_creating_vim_strategy_already_exists_and_is_broken(self):
|
||||
"""Test creating a VIM strategy while a broken strategy exists"""
|
||||
@ -188,17 +207,19 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.create_strategy.assert_not_called()
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
@mock.patch.object(BaseState, 'stopped', return_value=True)
|
||||
@mock.patch.object(BaseState, "stopped", return_value=True)
|
||||
def test_creating_vim_strategy_fails_with_strategy_stop(self, _):
|
||||
"""Test creating a VIM strategy fails when strategy stops"""
|
||||
|
||||
self.vim_client.get_strategy.side_effect = [None]
|
||||
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -212,11 +233,12 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.get_strategy.side_effect = [
|
||||
None,
|
||||
self._create_fake_strategy(vim.STATE_BUILDING),
|
||||
self._create_fake_strategy(vim.STATE_BUILD_TIMEOUT)
|
||||
self._create_fake_strategy(vim.STATE_BUILD_TIMEOUT),
|
||||
]
|
||||
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -230,11 +252,12 @@ class TestFwUpdateCreatingVIMStrategyStage(TestFwUpdateState):
|
||||
self.vim_client.get_strategy.side_effect = [
|
||||
None,
|
||||
self._create_fake_strategy(vim.STATE_BUILDING),
|
||||
self._create_fake_strategy(vim.STATE_ABORTED)
|
||||
self._create_fake_strategy(vim.STATE_ABORTED),
|
||||
]
|
||||
|
||||
self.vim_client.create_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_BUILDING)
|
||||
self.vim_client.create_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_BUILDING
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
|
||||
import mock
|
||||
|
||||
from dccommon.drivers.openstack import vim
|
||||
@ -12,17 +11,24 @@ from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
from dcmanager.orchestrator.states.firmware import finishing_fw_update
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeController
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base \
|
||||
import TestFwUpdateState
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base import (
|
||||
TestFwUpdateState,
|
||||
)
|
||||
|
||||
VENDOR_ID = '1'
|
||||
DEVICE_ID = '2'
|
||||
VENDOR_ID = "1"
|
||||
DEVICE_ID = "2"
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware."
|
||||
"finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.firmware."
|
||||
"finishing_fw_update.DEFAULT_FAILED_SLEEP", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware."
|
||||
"finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.firmware."
|
||||
"finishing_fw_update.DEFAULT_FAILED_SLEEP",
|
||||
1,
|
||||
)
|
||||
class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
|
||||
def setUp(self):
|
||||
@ -59,9 +65,7 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
)
|
||||
|
||||
self.fake_device_image_state = self._create_fake_device_image_state(
|
||||
self.fake_device.uuid,
|
||||
self.fake_device_image.uuid,
|
||||
'completed'
|
||||
self.fake_device.uuid, self.fake_device_image.uuid, "completed"
|
||||
)
|
||||
|
||||
def test_finishing_vim_strategy_success(self):
|
||||
@ -73,16 +77,15 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
# this tests successful steps of:
|
||||
# - vim strategy exists on subcloud and can be deleted
|
||||
# - no device image states on the subcloud are 'failed'
|
||||
self.vim_client.get_strategy.return_value = \
|
||||
self._create_fake_strategy(vim.STATE_APPLIED)
|
||||
self.vim_client.get_strategy.return_value = self._create_fake_strategy(
|
||||
vim.STATE_APPLIED
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_finishing_vim_strategy_success_no_strategy(self):
|
||||
"""Test finishing the firmware update.
|
||||
@ -102,16 +105,13 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.vim_client.delete_strategy.assert_not_called()
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_finishing_vim_strategy_failure_get_hosts(self):
|
||||
"""Test finishing firmware update with communication error to subcloud"""
|
||||
|
||||
# mock the get_host query fails and raises an exception
|
||||
self.sysinv_client.get_hosts.side_effect = \
|
||||
Exception("HTTP CommunicationError")
|
||||
self.sysinv_client.get_hosts.side_effect = Exception("HTTP CommunicationError")
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -120,17 +120,20 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.sysinv_client.get_hosts.assert_called()
|
||||
|
||||
# verified the query was tried max retries + 1
|
||||
self.assertEqual(finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1,
|
||||
self.sysinv_client.get_hosts.call_count)
|
||||
self.assertEqual(
|
||||
finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1,
|
||||
self.sysinv_client.get_hosts.call_count,
|
||||
)
|
||||
|
||||
# verify the subsequent sysinv command was never attempted
|
||||
self.sysinv_client.get_host_device_list.assert_not_called()
|
||||
|
||||
# verify that the state moves to the next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
@mock.patch.object(BaseState, 'stopped', return_value=True)
|
||||
@mock.patch.object(BaseState, "stopped", return_value=True)
|
||||
def test_finishing_fw_update_fails_when_strategy_stops(self, _):
|
||||
"""Test finishing fw update fails when strategy stops before acquiring
|
||||
|
||||
@ -160,9 +163,7 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.sysinv_client.get_device_images.assert_called_once()
|
||||
self.sysinv_client.get_device_image_states.assert_called_once()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_finishing_fw_update_succeeds_with_host_device_disabled(self):
|
||||
"""Test finishing fw update succeeds with a device disabled"""
|
||||
@ -178,11 +179,9 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.sysinv_client.get_device_images.assert_not_called()
|
||||
self.sysinv_client.get_device_image_states.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch.object(BaseState, 'stopped')
|
||||
@mock.patch.object(BaseState, "stopped")
|
||||
def test_finishing_fw_update_fails_when_strategy_stops_with_enabled_host_device(
|
||||
self, mock_base_state
|
||||
):
|
||||
@ -224,13 +223,13 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.sysinv_client.get_host_device_list.assert_called_once()
|
||||
self.assertEqual(
|
||||
self.sysinv_client.get_device_images.call_count,
|
||||
finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1
|
||||
finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1,
|
||||
)
|
||||
# TODO(rlima): update the code to fix the error where the call_count is
|
||||
# always greater than the DEFAULT_MAX_FAILED_QUERIES
|
||||
self.assertEqual(
|
||||
self.sysinv_client.get_device_image_states.call_count,
|
||||
finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1
|
||||
finishing_fw_update.DEFAULT_MAX_FAILED_QUERIES + 1,
|
||||
)
|
||||
|
||||
self.assert_step_updated(
|
||||
@ -247,16 +246,14 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
- The third has the same status but rs device is None
|
||||
"""
|
||||
|
||||
self.fake_device_image_state.status = 'pending'
|
||||
self.fake_device_image_state.status = "pending"
|
||||
|
||||
fake_device_image_state_with_image_none = \
|
||||
self._create_fake_device_image_state(
|
||||
self.fake_device.uuid, None, 'pending'
|
||||
)
|
||||
fake_device_image_state_with_device_none = \
|
||||
self._create_fake_device_image_state(
|
||||
None, self.fake_device_image.uuid, 'pending'
|
||||
)
|
||||
fake_device_image_state_with_image_none = self._create_fake_device_image_state(
|
||||
self.fake_device.uuid, None, "pending"
|
||||
)
|
||||
fake_device_image_state_with_device_none = self._create_fake_device_image_state(
|
||||
None, self.fake_device_image.uuid, "pending"
|
||||
)
|
||||
|
||||
self.sysinv_client.get_hosts.return_value = [self.fake_host]
|
||||
self.sysinv_client.get_host_device_list.return_value = [self.fake_device]
|
||||
@ -264,7 +261,7 @@ class TestFwUpdateFinishingFwUpdateStage(TestFwUpdateState):
|
||||
self.sysinv_client.get_device_image_states.return_value = [
|
||||
self.fake_device_image_state,
|
||||
fake_device_image_state_with_image_none,
|
||||
fake_device_image_state_with_device_none
|
||||
fake_device_image_state_with_device_none,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
@ -8,16 +8,17 @@ import mock
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeController
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base \
|
||||
import TestFwUpdateState
|
||||
from dcmanager.tests.unit.orchestrator.states.firmware.test_base import (
|
||||
TestFwUpdateState,
|
||||
)
|
||||
|
||||
VENDOR_1 = "1001"
|
||||
VENDOR_2 = "2002"
|
||||
VENDOR_3 = "3003"
|
||||
|
||||
VENDOR_DEVICE_1 = '9009'
|
||||
VENDOR_DEVICE_2 = '9009'
|
||||
VENDOR_DEVICE_3 = '9009'
|
||||
VENDOR_DEVICE_1 = "9009"
|
||||
VENDOR_DEVICE_2 = "9009"
|
||||
VENDOR_DEVICE_3 = "9009"
|
||||
|
||||
FAKE_SUBCLOUD_CONTROLLER = FakeController()
|
||||
FAKE_ALL_LABEL = [{}]
|
||||
@ -31,7 +32,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
# Sets up the necessary variables for mocking
|
||||
self.fake_device = self._create_fake_device(VENDOR_1, VENDOR_DEVICE_1)
|
||||
fake_device_label = self._create_fake_device_label(
|
||||
'fake key', 'fake label', self.fake_device.uuid
|
||||
"fake key", "fake label", self.fake_device.uuid
|
||||
)
|
||||
fake_device_image_from_vendor_1 = self._create_fake_device_image(
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, FAKE_ALL_LABEL
|
||||
@ -45,14 +46,12 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.fake_device_image_list = [
|
||||
fake_device_image_from_vendor_1,
|
||||
fake_device_image_from_vendor_2,
|
||||
fake_device_image_from_vendor_3
|
||||
fake_device_image_from_vendor_3,
|
||||
]
|
||||
self.empty_fake_device_image_list = []
|
||||
|
||||
self.fake_device_image = self._create_fake_device_image_state(
|
||||
self.fake_device.uuid,
|
||||
fake_device_image_from_vendor_1.uuid,
|
||||
'completed'
|
||||
self.fake_device.uuid, fake_device_image_from_vendor_1.uuid, "completed"
|
||||
)
|
||||
|
||||
# set the next state in the chain (when this state is successful)
|
||||
@ -91,7 +90,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
# first query is system controller
|
||||
# second query is subcloud
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.empty_fake_device_image_list, self.fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
self.fake_device_image_list,
|
||||
]
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
@ -107,9 +107,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test_importing_firmware_empty_subcloud(self, _):
|
||||
@ -118,7 +116,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
# first query is system controller
|
||||
# second query is subcloud
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.fake_device_image_list, self.empty_fake_device_image_list
|
||||
self.fake_device_image_list,
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
@ -136,9 +135,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.assertEqual(1, self.sysinv_client.apply_device_image.call_count)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_importing_firmware_skips(self):
|
||||
"""Test importing firmware skips when subcloud matches controller."""
|
||||
@ -147,7 +144,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
# second query is subcloud
|
||||
# Both are the same
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.fake_device_image_list, self.fake_device_image_list
|
||||
self.fake_device_image_list,
|
||||
self.fake_device_image_list,
|
||||
]
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
@ -158,9 +156,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
|
||||
# On success, should have moved to the next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_importing_firmware_succeeds_without_enabled_host_device_list(self):
|
||||
"""Test importing firmware succeeds without enabled host device list"""
|
||||
@ -170,7 +166,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
]
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.empty_fake_device_image_list, self.fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
self.fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -179,11 +176,9 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch('os.path.isfile', return_value=False)
|
||||
@mock.patch("os.path.isfile", return_value=False)
|
||||
def test_importing_firmware_fails_when_image_file_is_missing(self, _):
|
||||
"""Test importing firmware fails when image file is missing
|
||||
|
||||
@ -191,7 +186,8 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
"""
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.fake_device_image_list, self.empty_fake_device_image_list
|
||||
self.fake_device_image_list,
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -204,12 +200,13 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
@mock.patch('os.path.isfile', return_value=True)
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test_importing_firmware_succeeds_with_device_image_state_completed(self, _):
|
||||
"""Test importing firmware success with a device image state completed"""
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.fake_device_image_list, self.empty_fake_device_image_list
|
||||
self.fake_device_image_list,
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.sysinv_client.get_device_image_states.return_value = [
|
||||
@ -222,19 +219,18 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
self.sysinv_client.upload_device_image.assert_called_once()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch('os.path.isfile', return_value=True)
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test_importing_firmware_succeeds_with_device_image_state_pending(self, _):
|
||||
"""Test importing firmware success with a device image state pending"""
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
self.fake_device_image_list, self.empty_fake_device_image_list
|
||||
self.fake_device_image_list,
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.fake_device_image.status = 'pending'
|
||||
self.fake_device_image.status = "pending"
|
||||
|
||||
self.sysinv_client.get_device_image_states.return_value = [
|
||||
self.fake_device_image
|
||||
@ -246,16 +242,14 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
self.sysinv_client.upload_device_image.assert_called_once()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch('os.path.isfile', return_value=True)
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test_importing_firmware_succeeds_with_applied_subcloud_images(self, _):
|
||||
"""Test importing firmware success with applied subcloudimages"""
|
||||
|
||||
fake_device_image_with_label = self._create_fake_device_image(
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{'fake label': 'fake value'}]
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{"fake label": "fake value"}]
|
||||
)
|
||||
|
||||
self.fake_device_image_list.append(fake_device_image_with_label)
|
||||
@ -269,19 +263,17 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
|
||||
self.assertEqual(
|
||||
self.sysinv_client.remove_device_image.call_count,
|
||||
len(self.fake_device_image_list,)
|
||||
len(
|
||||
self.fake_device_image_list,
|
||||
),
|
||||
)
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
@mock.patch('os.path.isfile', return_value=True)
|
||||
def test_importing_firmware_succeeds_without_subcloud_device_image_states(
|
||||
self, _
|
||||
):
|
||||
@mock.patch("os.path.isfile", return_value=True)
|
||||
def test_importing_firmware_succeeds_without_subcloud_device_image_states(self, _):
|
||||
"""Test importing firmware success without subcloud device image states
|
||||
|
||||
In this scenario, a device image with applied_labels should have them
|
||||
@ -289,12 +281,12 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
"""
|
||||
|
||||
fake_device_image_with_label = self._create_fake_device_image(
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{'fake key': 'fake label'}]
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{"fake key": "fake label"}]
|
||||
)
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
[fake_device_image_with_label],
|
||||
self.empty_fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -303,9 +295,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.apply_device_image.assert_called_once()
|
||||
self.sysinv_client.upload_device_image.assert_called_once()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_importing_firmware_succeeds_with_device_image_without_label(self):
|
||||
"""Test importing firmware succeeds with device image without label
|
||||
@ -324,7 +314,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
[fake_device_image_with_label],
|
||||
self.empty_fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -333,9 +323,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_importing_firmware_succeeds_with_device_inelegible(self):
|
||||
"""Test importing firmware succeeds with device image inalegible
|
||||
@ -345,12 +333,12 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
"""
|
||||
|
||||
fake_device_image_with_label = self._create_fake_device_image(
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{'fake label': 'fake value'}]
|
||||
VENDOR_1, VENDOR_DEVICE_1, True, [{"fake label": "fake value"}]
|
||||
)
|
||||
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
[fake_device_image_with_label],
|
||||
self.empty_fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -359,9 +347,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_importing_firmware_succeeds_with_device_not_applied(self):
|
||||
"""Test importing firmware succeeds with device not applied"""
|
||||
@ -371,7 +357,7 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
)
|
||||
self.sysinv_client.get_device_images.side_effect = [
|
||||
[fake_device_image_with_label],
|
||||
self.empty_fake_device_image_list
|
||||
self.empty_fake_device_image_list,
|
||||
]
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -380,6 +366,4 @@ class TestFwUpdateImportingFirmwareStage(TestFwUpdateState):
|
||||
self.sysinv_client.upload_device_image.assert_not_called()
|
||||
self.sysinv_client.apply_device_image.assert_not_called()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
@ -1,21 +1,23 @@
|
||||
#
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
||||
import TestKubeUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy \
|
||||
import ApplyingVIMStrategyMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base import TestKubeUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy import (
|
||||
ApplyingVIMStrategyMixin,
|
||||
)
|
||||
|
||||
|
||||
class TestApplyingVIMKubeUpgradeStrategyStage(ApplyingVIMStrategyMixin,
|
||||
TestKubeUpgradeState):
|
||||
class TestApplyingVIMKubeUpgradeStrategyStage(
|
||||
ApplyingVIMStrategyMixin, TestKubeUpgradeState
|
||||
):
|
||||
"""This test applies the 'kube' vim strategy during kube upgrade"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestApplyingVIMKubeUpgradeStrategyStage, self).setUp()
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
consts.STRATEGY_STATE_COMPLETE)
|
||||
consts.STRATEGY_STATE_COMPLETE,
|
||||
)
|
||||
|
@ -13,63 +13,38 @@ from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.fakes import FakeVimStrategy
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeUpgrade
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||
import PREVIOUS_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||
import UPGRADED_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
||||
import TestKubeUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy \
|
||||
import CreatingVIMStrategyStageMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import PREVIOUS_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import UPGRADED_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base import TestKubeUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy import (
|
||||
CreatingVIMStrategyStageMixin,
|
||||
)
|
||||
|
||||
STRATEGY_BUILDING = FakeVimStrategy(state=vim.STATE_BUILDING)
|
||||
STRATEGY_DONE_BUILDING = FakeVimStrategy(state=vim.STATE_READY_TO_APPLY)
|
||||
|
||||
KUBE_VERSION_LIST = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version='v1.2.3',
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version='v1.2.4',
|
||||
target=False,
|
||||
state='available'),
|
||||
FakeKubeVersion(obj_id=3,
|
||||
version='v1.2.5',
|
||||
target=False,
|
||||
state='available'),
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.3", target=True, state="active"),
|
||||
FakeKubeVersion(obj_id=2, version="v1.2.4", target=False, state="available"),
|
||||
FakeKubeVersion(obj_id=3, version="v1.2.5", target=False, state="available"),
|
||||
]
|
||||
|
||||
KUBE_VERSION_LIST_SC = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version='v1.2.5',
|
||||
target=True,
|
||||
state='active')
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.5", target=True, state="active")
|
||||
]
|
||||
|
||||
KUBE_VERSION_LIST_SC_2 = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version='v1.2.4',
|
||||
target=True,
|
||||
state='active')
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.4", target=True, state="active")
|
||||
]
|
||||
|
||||
KUBE_UPGRADE_LIST = [
|
||||
FakeKubeUpgrade(
|
||||
obj_id=1,
|
||||
to_version='v1.2.5',
|
||||
from_version='v1.2.4',
|
||||
state='active'
|
||||
obj_id=1, to_version="v1.2.5", from_version="v1.2.4", state="active"
|
||||
)
|
||||
]
|
||||
|
||||
KUBE_VERSION_LIST_WITHOUT_ACTIVE = [
|
||||
FakeKubeVersion(
|
||||
obj_id=1,
|
||||
version='v1.2.3',
|
||||
target=True,
|
||||
state='available'
|
||||
)
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.3", target=True, state="available")
|
||||
]
|
||||
|
||||
|
||||
@ -83,7 +58,7 @@ class TestCreatingVIMKubeUpgradeStrategyStage(
|
||||
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
consts.STRATEGY_STATE_KUBE_APPLYING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
)
|
||||
|
||||
# creating the vim strategy checks if an existing upgrade exists
|
||||
@ -93,27 +68,25 @@ class TestCreatingVIMKubeUpgradeStrategyStage(
|
||||
# when no vim strategy exists, the available version is used
|
||||
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
||||
self.sysinv_client.get_kube_versions.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=False,
|
||||
state='available'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=PREVIOUS_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2, version=UPGRADED_KUBE_VERSION, target=False, state="available"
|
||||
),
|
||||
]
|
||||
|
||||
self._mock_read_from_cache(BaseState)
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=False,
|
||||
state='available'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=PREVIOUS_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2, version=UPGRADED_KUBE_VERSION, target=False, state="available"
|
||||
),
|
||||
]
|
||||
self.vim_client.get_strategy = mock.MagicMock()
|
||||
self.vim_client.create_strategy = mock.MagicMock()
|
||||
|
||||
def mock_and_assert_step_update(
|
||||
self, is_upgrade=False, kube_version=None, kube_version_list=None
|
||||
@ -137,9 +110,8 @@ class TestCreatingVIMKubeUpgradeStrategyStage(
|
||||
if kube_version:
|
||||
extra_args = {"to-version": kube_version}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
else:
|
||||
kube_version = kube_version_list[0].version
|
||||
# Subcloud query
|
||||
@ -154,24 +126,27 @@ class TestCreatingVIMKubeUpgradeStrategyStage(
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
self.vim_client.create_strategy.assert_called_with(
|
||||
'kube-upgrade', 'parallel', 'parallel', 10,
|
||||
'migrate', 'relaxed', to_version=kube_version
|
||||
"kube-upgrade",
|
||||
"parallel",
|
||||
"parallel",
|
||||
10,
|
||||
"migrate",
|
||||
"relaxed",
|
||||
to_version=kube_version,
|
||||
)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_strategy_succeeds_with_highest_kube_version(self):
|
||||
"""Test strategy succeeds when selecting the highest kube version"""
|
||||
|
||||
self.mock_and_assert_step_update(kube_version='v1.2.5')
|
||||
self.mock_and_assert_step_update(kube_version="v1.2.5")
|
||||
|
||||
def test_strategy_succeeds_with_lowest_kube_version(self):
|
||||
"""Test strategy succeeds when selecting the lowest kube version"""
|
||||
|
||||
self.mock_and_assert_step_update(kube_version='v1.2.4')
|
||||
self.mock_and_assert_step_update(kube_version="v1.2.4")
|
||||
|
||||
def test_strategy_succeeds_without_kube_version_selected(self):
|
||||
"""Test strategy succeeds without a selected kube_version"""
|
||||
@ -193,14 +168,12 @@ class TestCreatingVIMKubeUpgradeStrategyStage(
|
||||
def test_strategy_fails_without_active_version_to_upgrade(self):
|
||||
"""Test upgrade fails without an active version to upgrade"""
|
||||
|
||||
fake_strategy.create_fake_strategy(
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE
|
||||
)
|
||||
fake_strategy.create_fake_strategy(self.ctx, self.DEFAULT_STRATEGY_TYPE)
|
||||
|
||||
self.sysinv_client.get_kube_versions.return_value = \
|
||||
KUBE_VERSION_LIST_WITHOUT_ACTIVE
|
||||
self.mock_read_from_cache.return_value = \
|
||||
self.sysinv_client.get_kube_versions.return_value = (
|
||||
KUBE_VERSION_LIST_WITHOUT_ACTIVE
|
||||
)
|
||||
self.mock_read_from_cache.return_value = KUBE_VERSION_LIST_WITHOUT_ACTIVE
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
|
@ -9,8 +9,9 @@ import mock
|
||||
from dcmanager.common.consts import DEPLOY_STATE_DONE
|
||||
from dcmanager.common.consts import STRATEGY_STATE_COMPLETE
|
||||
from dcmanager.common.consts import STRATEGY_STATE_FAILED
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
@ -18,73 +19,74 @@ from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeAlarm
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeUpgrade
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeVersion
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||
import PREVIOUS_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes \
|
||||
import UPGRADED_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base \
|
||||
import TestKubeUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import PREVIOUS_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import UPGRADED_KUBE_VERSION
|
||||
from dcmanager.tests.unit.orchestrator.states.kube.test_base import TestKubeUpgradeState
|
||||
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS = \
|
||||
"System Health:\n" \
|
||||
"All hosts are provisioned: [OK]\n" \
|
||||
"All hosts are unlocked/enabled: [OK]\n" \
|
||||
"All hosts have current configurations: [OK]\n" \
|
||||
"All hosts are patch current: [OK]\n" \
|
||||
"No alarms: [OK]\n" \
|
||||
"All kubernetes nodes are ready: [OK]\n" \
|
||||
"All kubernetes control plane pods are ready: [OK]\n" \
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS = (
|
||||
"System Health:\n"
|
||||
"All hosts are provisioned: [OK]\n"
|
||||
"All hosts are unlocked/enabled: [OK]\n"
|
||||
"All hosts have current configurations: [OK]\n"
|
||||
"All hosts are patch current: [OK]\n"
|
||||
"No alarms: [OK]\n"
|
||||
"All kubernetes nodes are ready: [OK]\n"
|
||||
"All kubernetes control plane pods are ready: [OK]\n"
|
||||
"All kubernetes applications are in a valid state: [OK]"
|
||||
)
|
||||
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM = \
|
||||
"System Health:\n" \
|
||||
"All hosts are provisioned: [OK]\n" \
|
||||
"All hosts are unlocked/enabled: [OK]\n" \
|
||||
"All hosts have current configurations: [OK]\n" \
|
||||
"All hosts are patch current: [OK]\n" \
|
||||
"No alarms: [Fail]\n" \
|
||||
"[2] alarms found, [2] of which are management affecting\n" \
|
||||
"All kubernetes nodes are ready: [OK]\n" \
|
||||
"All kubernetes control plane pods are ready: [OK]\n" \
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM = (
|
||||
"System Health:\n"
|
||||
"All hosts are provisioned: [OK]\n"
|
||||
"All hosts are unlocked/enabled: [OK]\n"
|
||||
"All hosts have current configurations: [OK]\n"
|
||||
"All hosts are patch current: [OK]\n"
|
||||
"No alarms: [Fail]\n"
|
||||
"[2] alarms found, [2] of which are management affecting\n"
|
||||
"All kubernetes nodes are ready: [OK]\n"
|
||||
"All kubernetes control plane pods are ready: [OK]\n"
|
||||
"All kubernetes applications are in a valid state: [OK]"
|
||||
)
|
||||
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM = \
|
||||
"System Health:\n" \
|
||||
"All hosts are provisioned: [OK]\n" \
|
||||
"All hosts are unlocked/enabled: [OK]\n" \
|
||||
"All hosts have current configurations: [OK]\n" \
|
||||
"All hosts are patch current: [OK]\n" \
|
||||
"No alarms: [Fail]\n" \
|
||||
"[1] alarms found, [0] of which are management affecting\n" \
|
||||
"All kubernetes nodes are ready: [OK]\n" \
|
||||
"All kubernetes control plane pods are ready: [OK]\n" \
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM = (
|
||||
"System Health:\n"
|
||||
"All hosts are provisioned: [OK]\n"
|
||||
"All hosts are unlocked/enabled: [OK]\n"
|
||||
"All hosts have current configurations: [OK]\n"
|
||||
"All hosts are patch current: [OK]\n"
|
||||
"No alarms: [Fail]\n"
|
||||
"[1] alarms found, [0] of which are management affecting\n"
|
||||
"All kubernetes nodes are ready: [OK]\n"
|
||||
"All kubernetes control plane pods are ready: [OK]\n"
|
||||
"All kubernetes applications are in a valid state: [OK]"
|
||||
)
|
||||
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_AND_KUBERNETES_ALARM = \
|
||||
"System Health:\n" \
|
||||
"All hosts are provisioned: [OK]\n" \
|
||||
"All hosts are unlocked/enabled: [OK]\n" \
|
||||
"All hosts have current configurations: [OK]\n" \
|
||||
"All hosts are patch current: [OK]\n" \
|
||||
"No alarms: [Fail]\n" \
|
||||
"[2] alarms found, [2] of which are management affecting\n" \
|
||||
"All kubernetes nodes are ready: [Fail]\n" \
|
||||
"All kubernetes control plane pods are ready: [OK]\n" \
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_AND_KUBERNETES_ALARM = (
|
||||
"System Health:\n"
|
||||
"All hosts are provisioned: [OK]\n"
|
||||
"All hosts are unlocked/enabled: [OK]\n"
|
||||
"All hosts have current configurations: [OK]\n"
|
||||
"All hosts are patch current: [OK]\n"
|
||||
"No alarms: [Fail]\n"
|
||||
"[2] alarms found, [2] of which are management affecting\n"
|
||||
"All kubernetes nodes are ready: [Fail]\n"
|
||||
"All kubernetes control plane pods are ready: [OK]\n"
|
||||
"All kubernetes applications are in a valid state: [OK]"
|
||||
)
|
||||
|
||||
MEMORY_THRESHOLD_ALARM = FakeAlarm('100.101', 'True')
|
||||
KUBERNETES_UPGRADE_ALARM = FakeAlarm('900.007', 'True')
|
||||
CONFIG_OUT_OF_DATE_ALARM = FakeAlarm('250.001', 'False')
|
||||
MEMORY_THRESHOLD_ALARM = FakeAlarm("100.101", "True")
|
||||
KUBERNETES_UPGRADE_ALARM = FakeAlarm("900.007", "True")
|
||||
CONFIG_OUT_OF_DATE_ALARM = FakeAlarm("250.001", "False")
|
||||
|
||||
KUBE_VERSION_LIST = [
|
||||
FakeKubeVersion(obj_id=1, version='v1.2.3', target=True, state='active'),
|
||||
FakeKubeVersion(obj_id=2, version='v1.2.5', target=False, state='available'),
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.3", target=True, state="active"),
|
||||
FakeKubeVersion(obj_id=2, version="v1.2.5", target=False, state="available"),
|
||||
]
|
||||
|
||||
KUBE_VERSION_LIST_2 = [
|
||||
FakeKubeVersion(obj_id=1, version='v1.2.3', target=True, state='active'),
|
||||
FakeKubeVersion(obj_id=2, version='v1.2.5', target=False, state='available'),
|
||||
FakeKubeVersion(obj_id=3, version='v1.2.6', target=False, state='available'),
|
||||
FakeKubeVersion(obj_id=1, version="v1.2.3", target=True, state="active"),
|
||||
FakeKubeVersion(obj_id=2, version="v1.2.5", target=False, state="available"),
|
||||
FakeKubeVersion(obj_id=3, version="v1.2.6", target=False, state="available"),
|
||||
]
|
||||
|
||||
|
||||
@ -99,7 +101,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK)
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_UPGRADE_PRE_CHECK
|
||||
)
|
||||
|
||||
# mock there not being a kube upgrade in progress
|
||||
self.sysinv_client.get_kube_upgrades = mock.MagicMock()
|
||||
@ -108,7 +111,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
self.fm_client.get_alarms = mock.MagicMock()
|
||||
self.sysinv_client.get_kube_upgrade_health = mock.MagicMock()
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = (
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS)
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_SUCCESS
|
||||
)
|
||||
|
||||
# mock the get_kube_versions calls
|
||||
self.sysinv_client.get_kube_versions = mock.MagicMock()
|
||||
@ -116,14 +120,12 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# mock the cached get_kube_versions calls
|
||||
self._mock_read_from_cache(BaseState)
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=False,
|
||||
state='available'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=PREVIOUS_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2, version=UPGRADED_KUBE_VERSION, target=False, state="available"
|
||||
),
|
||||
]
|
||||
|
||||
def test_pre_check_subcloud_existing_upgrade(self):
|
||||
@ -135,16 +137,15 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||
# get kube versions invoked only for the system controller
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
]
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
@ -171,7 +172,7 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state='active'
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
)
|
||||
]
|
||||
|
||||
@ -184,28 +185,29 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
def test_pre_check_subcloud_failed_health_check_with_management_alarms(self):
|
||||
"""Test pre check step where subcloud has management affecting alarms"""
|
||||
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
self.fm_client.get_alarms.return_value = [MEMORY_THRESHOLD_ALARM,
|
||||
KUBERNETES_UPGRADE_ALARM]
|
||||
self.fm_client.get_alarms.return_value = [
|
||||
MEMORY_THRESHOLD_ALARM,
|
||||
KUBERNETES_UPGRADE_ALARM,
|
||||
]
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = (
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM)
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM
|
||||
)
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
]
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
self.sysinv_client.get_kube_upgrade_health.assert_called_once()
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED)
|
||||
|
||||
def test_pre_check_subcloud_failed_health_check_with_mgmt_and_kubernetes_alarm(
|
||||
self
|
||||
self,
|
||||
):
|
||||
"""Test pre check step where subcloud has management and kubernetes
|
||||
|
||||
@ -216,64 +218,67 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = \
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = (
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_AND_KUBERNETES_ALARM
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
self.sysinv_client.get_kube_upgrade_health.assert_called_once()
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED)
|
||||
|
||||
def test_pre_check_subcloud_failed_health_check_with_allowed_management_alarms(
|
||||
self
|
||||
self,
|
||||
):
|
||||
"""Test pre check step where subcloud has management affecting alarms"""
|
||||
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
self.fm_client.get_alarms.return_value = [CONFIG_OUT_OF_DATE_ALARM,
|
||||
KUBERNETES_UPGRADE_ALARM]
|
||||
self.fm_client.get_alarms.return_value = [
|
||||
CONFIG_OUT_OF_DATE_ALARM,
|
||||
KUBERNETES_UPGRADE_ALARM,
|
||||
]
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = (
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM)
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_MGMT_AFFECTING_ALARM
|
||||
)
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
]
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
self.sysinv_client.get_kube_upgrade_health.assert_called_once()
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY)
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_pre_check_subcloud_failed_health_check_with_non_management_alarms(self):
|
||||
"""Test pre check step where subcloud has non-management affecting alarms"""
|
||||
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
self.sysinv_client.get_kube_upgrade_health.return_value = (
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM)
|
||||
KUBERNETES_UPGRADE_HEALTH_RESPONSE_NON_MGMT_AFFECTING_ALARM
|
||||
)
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [FakeKubeUpgrade()]
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
]
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
self.sysinv_client.get_kube_upgrade_health.assert_called_once()
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY)
|
||||
STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_pre_check_no_sys_controller_active_version(self):
|
||||
"""Test pre check step where system controller has no active version
|
||||
@ -286,23 +291,24 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_FAILED
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# No extra args / to-version in the database
|
||||
# Query system controller kube versions
|
||||
# override the first get, so that there is no active release
|
||||
# 'partial' indicates the system controller is still upgrading
|
||||
self.mock_read_from_cache.return_value = [
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=True,
|
||||
state='partial'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=False,
|
||||
state='unavailable'),
|
||||
FakeKubeVersion(
|
||||
obj_id=1, version=PREVIOUS_KUBE_VERSION, target=True, state="partial"
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=False,
|
||||
state="unavailable",
|
||||
),
|
||||
]
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -326,32 +332,34 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
"""
|
||||
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# No extra args / to-version in the database
|
||||
# Query system controller kube versions
|
||||
self.mock_read_from_cache.side_effect = [
|
||||
[ # first list: (system controller) has an active release
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=False,
|
||||
state='unavailable'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
[ # first list: (system controller) has an active release
|
||||
FakeKubeVersion(
|
||||
obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=False,
|
||||
state="unavailable",
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
],
|
||||
[ # second list: (subcloud) fully upgraded (no available release)
|
||||
FakeKubeVersion(obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=False,
|
||||
state='unavailable'),
|
||||
FakeKubeVersion(obj_id=2,
|
||||
version=UPGRADED_KUBE_VERSION,
|
||||
target=True,
|
||||
state='active'),
|
||||
[ # second list: (subcloud) fully upgraded (no available release)
|
||||
FakeKubeVersion(
|
||||
obj_id=1,
|
||||
version=PREVIOUS_KUBE_VERSION,
|
||||
target=False,
|
||||
state="unavailable",
|
||||
),
|
||||
FakeKubeVersion(
|
||||
obj_id=2, version=UPGRADED_KUBE_VERSION, target=True, state="active"
|
||||
),
|
||||
],
|
||||
]
|
||||
# fully upgraded subcloud. Next state will be complete.
|
||||
@ -375,9 +383,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
low_version = "v1.2.3"
|
||||
high_partial_version = "v1.3"
|
||||
@ -391,9 +399,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# continue
|
||||
extra_args = {"to-version": high_partial_version}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -406,9 +413,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# Verify the transition to the expected next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, next_state)
|
||||
|
||||
def _test_pre_check_subcloud_existing_upgrade_skip(self,
|
||||
target_version,
|
||||
subcloud_version):
|
||||
def _test_pre_check_subcloud_existing_upgrade_skip(
|
||||
self, target_version, subcloud_version
|
||||
):
|
||||
"""Test pre check step where the subcloud existing upgrade too high.
|
||||
|
||||
When a kube upgrade exists in the subcloud, it is skipped if to-version
|
||||
@ -419,9 +426,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_COMPLETE
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [
|
||||
FakeKubeUpgrade(to_version=subcloud_version)
|
||||
@ -429,9 +436,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
extra_args = {"to-version": target_version}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -447,24 +453,27 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
def test_pre_check_subcloud_existing_upgrade_too_high(self):
|
||||
target_version = "v1.2.1"
|
||||
subcloud_version = "v1.3.3"
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def test_pre_check_subcloud_existing_upgrade_too_high_target_partial(self):
|
||||
target_version = "v1.2"
|
||||
subcloud_version = "v1.3.3"
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def test_pre_check_subcloud_existing_upgrade_too_high_subcl_partial(self):
|
||||
target_version = "v1.2.1"
|
||||
subcloud_version = "v1.3"
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_skip(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def _test_pre_check_subcloud_existing_upgrade_resume(self,
|
||||
target_version,
|
||||
subcloud_version):
|
||||
def _test_pre_check_subcloud_existing_upgrade_resume(
|
||||
self, target_version, subcloud_version
|
||||
):
|
||||
"""Test pre check step where target version >= existing upgrade
|
||||
|
||||
When a kube upgrade exists in the subcloud, it is resumed if to-version
|
||||
@ -474,9 +483,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# Setup a fake kube upgrade in progress
|
||||
self.sysinv_client.get_kube_upgrades.return_value = [
|
||||
@ -486,9 +495,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# Setup a fake kube upgrade strategy with the to-version specified
|
||||
extra_args = {"to-version": target_version}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -504,31 +512,34 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
def test_pre_check_subcloud_existing_upgrade_match(self):
|
||||
target_version = "v1.2.3"
|
||||
subcloud_version = "v1.2.3"
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def test_pre_check_subcloud_existing_upgrade_match_target_partial(self):
|
||||
# v1.2 is considered the same as v1.2.3 (micro version gets ignored)
|
||||
target_version = "v1.2"
|
||||
subcloud_version = "v1.2.3"
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def test_pre_check_subcloud_existing_upgrade_match_subcloud_partial(self):
|
||||
# v1.2 is considered the same as v1.2.3 (micro version gets ignored)
|
||||
target_version = "v1.2.3"
|
||||
subcloud_version = "v1.2"
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(target_version,
|
||||
subcloud_version)
|
||||
self._test_pre_check_subcloud_existing_upgrade_resume(
|
||||
target_version, subcloud_version
|
||||
)
|
||||
|
||||
def test_pre_check_skip_when_target_version_is_greater_than_to_version(self):
|
||||
"""Test creating pre check when target version is greater than to_version."""
|
||||
|
||||
next_state = STRATEGY_STATE_COMPLETE
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# Setup a fake kube upgrade in progress
|
||||
self.mock_read_from_cache.return_value = KUBE_VERSION_LIST
|
||||
@ -536,9 +547,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# Setup a fake kube upgrade strategy with the to-version specified
|
||||
extra_args = {"to-version": "v1.2.4"}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -551,9 +561,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_COMPLETE
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# Setup a fake kube upgrade in progress
|
||||
self.mock_read_from_cache.return_value = []
|
||||
@ -561,9 +571,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# Setup a fake kube upgrade strategy with the to-version specified
|
||||
extra_args = {"to-version": "v1.2.4"}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -576,9 +585,9 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
|
||||
next_state = STRATEGY_STATE_KUBE_CREATING_VIM_KUBE_UPGRADE_STRATEGY
|
||||
# Update the subcloud to have deploy state as "complete"
|
||||
db_api.subcloud_update(self.ctx,
|
||||
self.subcloud.id,
|
||||
deploy_status=DEPLOY_STATE_DONE)
|
||||
db_api.subcloud_update(
|
||||
self.ctx, self.subcloud.id, deploy_status=DEPLOY_STATE_DONE
|
||||
)
|
||||
|
||||
# Setup a fake kube upgrade in progress
|
||||
self.sysinv_client.get_kube_versions.return_value = KUBE_VERSION_LIST_2
|
||||
@ -587,9 +596,8 @@ class TestKubeUpgradePreCheckStage(TestKubeUpgradeState):
|
||||
# Setup a fake kube upgrade strategy with the to-version specified
|
||||
extra_args = {"to-version": "v1.2.6"}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
@ -1,27 +1,37 @@
|
||||
#
|
||||
# Copyright (c) 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
import mock
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base \
|
||||
import TestKubeRootCaUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy \
|
||||
import ApplyingVIMStrategyMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base import (
|
||||
TestKubeRootCaUpgradeState,
|
||||
)
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy import (
|
||||
ApplyingVIMStrategyMixin,
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy."
|
||||
"KUBE_ROOTCA_UPDATE_MAX_WAIT_ATTEMPTS", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy."
|
||||
"KUBE_ROOTCA_UPDATE_WAIT_INTERVAL", 1)
|
||||
class TestApplyingVIMKubeRootCAUpgradeStrategyStage(ApplyingVIMStrategyMixin,
|
||||
TestKubeRootCaUpgradeState):
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy."
|
||||
"KUBE_ROOTCA_UPDATE_MAX_WAIT_ATTEMPTS",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.kube_rootca.applying_vim_strategy."
|
||||
"KUBE_ROOTCA_UPDATE_WAIT_INTERVAL",
|
||||
1,
|
||||
)
|
||||
class TestApplyingVIMKubeRootCAUpgradeStrategyStage(
|
||||
ApplyingVIMStrategyMixin, TestKubeRootCaUpgradeState
|
||||
):
|
||||
"""Tests apply 'kube_rootca' vim strategy during kube rootca upgrade"""
|
||||
|
||||
def setUp(self):
|
||||
super(TestApplyingVIMKubeRootCAUpgradeStrategyStage, self).setUp()
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
consts.STRATEGY_STATE_COMPLETE)
|
||||
consts.STRATEGY_STATE_COMPLETE,
|
||||
)
|
||||
|
@ -6,10 +6,12 @@
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base \
|
||||
import TestKubeRootCaUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy \
|
||||
import CreatingVIMStrategyStageMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base import (
|
||||
TestKubeRootCaUpgradeState,
|
||||
)
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy import (
|
||||
CreatingVIMStrategyStageMixin,
|
||||
)
|
||||
|
||||
|
||||
class TestCreatingVIMKubeRootCAUpgradeStrategyStage(
|
||||
@ -22,7 +24,7 @@ class TestCreatingVIMKubeRootCAUpgradeStrategyStage(
|
||||
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_create_strategy_succeeds_with_extra_args(self):
|
||||
@ -31,7 +33,7 @@ class TestCreatingVIMKubeRootCAUpgradeStrategyStage(
|
||||
# Create a strategy with extra_args
|
||||
extra_args = {
|
||||
"expiry-date": "2020:01:31",
|
||||
"subject": "C=CA ST=ON L=OTT O=WR OU=STX CN=AL_RULES"
|
||||
"subject": "C=CA ST=ON L=OTT O=WR OU=STX CN=AL_RULES",
|
||||
}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
|
@ -1,16 +1,18 @@
|
||||
#
|
||||
# Copyright (c) 2021-2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
|
||||
from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base \
|
||||
import TestKubeRootCaUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base import (
|
||||
TestKubeRootCaUpgradeState,
|
||||
)
|
||||
|
||||
|
||||
class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
@ -23,7 +25,8 @@ class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK)
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_ROOTCA_UPDATE_PRE_CHECK
|
||||
)
|
||||
|
||||
def test_pre_check_no_extra_args(self):
|
||||
"""Test pre check step where there are no extra args
|
||||
@ -33,9 +36,8 @@ class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
# Create a strategy with no extra_args
|
||||
extra_args = None
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -43,7 +45,8 @@ class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
# Verify the expected next state happened
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY)
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_pre_check_extra_args_no_cert_file(self):
|
||||
"""Test pre check step where extra args exist, but no cert-file entry
|
||||
@ -52,19 +55,19 @@ class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
"""
|
||||
extra_args = {
|
||||
"expiry-date": "2020:01:31",
|
||||
"subject": "C=CA ST=ON L=OTT O=WR OU=STX CN=AL_RULES"
|
||||
"subject": "C=CA ST=ON L=OTT O=WR OU=STX CN=AL_RULES",
|
||||
}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Verify the expected next state happened
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY)
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_pre_check_cert_file_extra_args_detected(self):
|
||||
"""Test pre check step where extra args cert-file exists
|
||||
@ -74,17 +77,14 @@ class TestPreCheckStage(TestKubeRootCaUpgradeState):
|
||||
The unit test should transition to the start-update state.
|
||||
"""
|
||||
|
||||
extra_args = {
|
||||
"cert-file": "some_fake_cert_file"
|
||||
}
|
||||
extra_args = {"cert-file": "some_fake_cert_file"}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Verify the expected next state happened
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START)
|
||||
self.strategy_step.subcloud_id, STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
)
|
||||
|
@ -1,24 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2021-2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2021-2022, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
from dcmanager.common.consts \
|
||||
import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeKubeRootCaUpdate
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base import (
|
||||
TestKubeRootCaUpgradeState,
|
||||
)
|
||||
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base \
|
||||
import TestKubeRootCaUpgradeState
|
||||
|
||||
KUBE_ROOTCA_UPDATE_STARTED = FakeKubeRootCaUpdate(state='update-started')
|
||||
KUBE_ROOTCA_UPDATE_ABORTED = FakeKubeRootCaUpdate(state='update-aborted')
|
||||
KUBE_ROOTCA_UPDATE_CERT_UPLOADED = \
|
||||
FakeKubeRootCaUpdate(state='update-new-rootca-cert-uploaded')
|
||||
KUBE_ROOTCA_UPDATE_STARTED = FakeKubeRootCaUpdate(state="update-started")
|
||||
KUBE_ROOTCA_UPDATE_ABORTED = FakeKubeRootCaUpdate(state="update-aborted")
|
||||
KUBE_ROOTCA_UPDATE_CERT_UPLOADED = FakeKubeRootCaUpdate(
|
||||
state="update-new-rootca-cert-uploaded"
|
||||
)
|
||||
|
||||
|
||||
class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
@ -31,7 +33,8 @@ class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START)
|
||||
self.subcloud.id, STRATEGY_STATE_KUBE_ROOTCA_UPDATE_START
|
||||
)
|
||||
|
||||
self.sysinv_client.kube_rootca_update_start = mock.MagicMock()
|
||||
self.sysinv_client.get_kube_rootca_updates = mock.MagicMock()
|
||||
@ -42,14 +45,17 @@ class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
The start update operation should be invoked, and move to upload cert
|
||||
"""
|
||||
self.sysinv_client.get_kube_rootca_updates.return_value = []
|
||||
self.sysinv_client.kube_rootca_update_start.return_value = \
|
||||
self.sysinv_client.kube_rootca_update_start.return_value = (
|
||||
KUBE_ROOTCA_UPDATE_STARTED
|
||||
)
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Verify the expected next state happened (upload cert)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT,
|
||||
)
|
||||
|
||||
def test_existing_update_started(self):
|
||||
"""Test start update when there is an update in started state
|
||||
@ -63,8 +69,10 @@ class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Verify the expected next state happened (upload cert)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT,
|
||||
)
|
||||
|
||||
def test_existing_update_aborted(self):
|
||||
"""Test start update when there is an update in aborted state
|
||||
@ -74,14 +82,17 @@ class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
self.sysinv_client.get_kube_rootca_updates.return_value = [
|
||||
KUBE_ROOTCA_UPDATE_ABORTED
|
||||
]
|
||||
self.sysinv_client.kube_rootca_update_start.return_value = \
|
||||
self.sysinv_client.kube_rootca_update_start.return_value = (
|
||||
KUBE_ROOTCA_UPDATE_STARTED
|
||||
)
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Verify the expected next state happened (upload cert)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT,
|
||||
)
|
||||
|
||||
def test_existing_update_other_state(self):
|
||||
"""Test start update if there is an update in an other state.
|
||||
@ -98,4 +109,5 @@ class TestStartUpdateStage(TestKubeRootCaUpgradeState):
|
||||
# Verify the expected next state happened (upload cert)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY)
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
@ -6,15 +6,16 @@
|
||||
|
||||
import mock
|
||||
|
||||
from dcmanager.common.consts import \
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
from dcmanager.common.consts import (
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
from dcmanager.common.consts import STRATEGY_STATE_FAILED
|
||||
from dcmanager.common.consts import \
|
||||
STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
from dcmanager.common.consts import STRATEGY_STATE_KUBE_ROOTCA_UPDATE_UPLOAD_CERT
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base \
|
||||
import TestKubeRootCaUpgradeState
|
||||
from dcmanager.tests.unit.orchestrator.states.kube_rootca.test_base import (
|
||||
TestKubeRootCaUpgradeState,
|
||||
)
|
||||
|
||||
# Only the 'error' field is checked on upload_cert
|
||||
ERROR_UPLOADING_CERT = {"error": "File not found"}
|
||||
@ -38,9 +39,7 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
self.sysinv_client.kube_rootca_update_upload_cert = mock.MagicMock()
|
||||
|
||||
# Mock the strategy with a reference to a cert-file in extra_args
|
||||
extra_args = {
|
||||
"cert-file": FAKE_CERT_FILE
|
||||
}
|
||||
extra_args = {"cert-file": FAKE_CERT_FILE}
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
@ -51,11 +50,12 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
The state should fail
|
||||
"""
|
||||
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = \
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = (
|
||||
ERROR_UPLOADING_CERT
|
||||
)
|
||||
|
||||
mock_open = mock.mock_open(read_data='test')
|
||||
with mock.patch('builtins.open', mock_open):
|
||||
mock_open = mock.mock_open(read_data="test")
|
||||
with mock.patch("builtins.open", mock_open):
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -63,9 +63,7 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.assert_called()
|
||||
|
||||
# Verify the strategy failed
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, STRATEGY_STATE_FAILED)
|
||||
|
||||
def test_upload_cert_pass(self):
|
||||
"""Test upload cert passes the sysinv operation
|
||||
@ -73,11 +71,12 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
The state should transition to the vim creation state
|
||||
"""
|
||||
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = \
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = (
|
||||
SUCCESS_UPLOADING_CERT
|
||||
)
|
||||
|
||||
mock_open = mock.mock_open(read_data='test')
|
||||
with mock.patch('builtins.open', mock_open):
|
||||
mock_open = mock.mock_open(read_data="test")
|
||||
with mock.patch("builtins.open", mock_open):
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -87,7 +86,7 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
# Verify the expected next state happened
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
||||
def test_upload_cert_pass_without_extra_args(self):
|
||||
@ -101,8 +100,9 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE
|
||||
)
|
||||
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = \
|
||||
self.sysinv_client.kube_rootca_update_upload_cert.return_value = (
|
||||
SUCCESS_UPLOADING_CERT
|
||||
)
|
||||
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
@ -110,5 +110,5 @@ class TestUploadCertStage(TestKubeRootCaUpgradeState):
|
||||
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id,
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY
|
||||
STRATEGY_STATE_CREATING_VIM_KUBE_ROOTCA_UPDATE_STRATEGY,
|
||||
)
|
||||
|
@ -1,18 +1,19 @@
|
||||
#
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import \
|
||||
TestPatchState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy import \
|
||||
ApplyingVIMStrategyMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import TestPatchState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_applying_vim_strategy import (
|
||||
ApplyingVIMStrategyMixin,
|
||||
)
|
||||
|
||||
|
||||
class TestApplyingVIMPatchStrategyStage(ApplyingVIMStrategyMixin,
|
||||
TestPatchState):
|
||||
class TestApplyingVIMPatchStrategyStage(ApplyingVIMStrategyMixin, TestPatchState):
|
||||
def setUp(self):
|
||||
super(TestApplyingVIMPatchStrategyStage, self).setUp()
|
||||
self.set_state(consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY,
|
||||
consts.STRATEGY_STATE_COMPLETE)
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY,
|
||||
consts.STRATEGY_STATE_COMPLETE,
|
||||
)
|
||||
|
@ -11,10 +11,10 @@ import mock
|
||||
from dccommon.drivers.openstack import vim
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.tests.unit.fakes import FakeVimStrategy
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import \
|
||||
TestPatchState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy import \
|
||||
CreatingVIMStrategyStageMixin
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import TestPatchState
|
||||
from dcmanager.tests.unit.orchestrator.states.test_creating_vim_strategy import (
|
||||
CreatingVIMStrategyStageMixin,
|
||||
)
|
||||
|
||||
|
||||
BuildPhase = namedtuple("BuildPhase", "reason")
|
||||
@ -22,20 +22,28 @@ BuildPhase = namedtuple("BuildPhase", "reason")
|
||||
|
||||
REASON = "no software patches need to be applied"
|
||||
STRATEGY_BUILDING = FakeVimStrategy(state=vim.STATE_BUILDING)
|
||||
STRATEGY_FAILED_BUILDING = FakeVimStrategy(state=vim.STATE_BUILD_FAILED,
|
||||
build_phase=BuildPhase(REASON))
|
||||
STRATEGY_FAILED_BUILDING = FakeVimStrategy(
|
||||
state=vim.STATE_BUILD_FAILED, build_phase=BuildPhase(REASON)
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.patch.creating_vim_patch_strategy."
|
||||
"DEFAULT_MAX_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.patch.creating_vim_patch_strategy."
|
||||
"DEFAULT_SLEEP_DURATION", 1)
|
||||
class TestCreatingVIMPatchStrategyStage(CreatingVIMStrategyStageMixin,
|
||||
TestPatchState):
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.patch.creating_vim_patch_strategy."
|
||||
"DEFAULT_MAX_QUERIES",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.patch.creating_vim_patch_strategy."
|
||||
"DEFAULT_SLEEP_DURATION",
|
||||
1,
|
||||
)
|
||||
class TestCreatingVIMPatchStrategyStage(CreatingVIMStrategyStageMixin, TestPatchState):
|
||||
def setUp(self):
|
||||
super(TestCreatingVIMPatchStrategyStage, self).setUp()
|
||||
self.set_state(consts.STRATEGY_STATE_CREATING_VIM_PATCH_STRATEGY,
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY)
|
||||
self.set_state(
|
||||
consts.STRATEGY_STATE_CREATING_VIM_PATCH_STRATEGY,
|
||||
consts.STRATEGY_STATE_APPLYING_VIM_PATCH_STRATEGY,
|
||||
)
|
||||
self.skip_state = consts.STRATEGY_STATE_FINISHING_PATCH_STRATEGY
|
||||
|
||||
def test_skip_if_not_needed(self):
|
||||
@ -47,9 +55,11 @@ class TestCreatingVIMPatchStrategyStage(CreatingVIMStrategyStageMixin,
|
||||
"""
|
||||
|
||||
# first api query is before the create
|
||||
self.vim_client.get_strategy.side_effect = [None,
|
||||
STRATEGY_BUILDING,
|
||||
STRATEGY_FAILED_BUILDING]
|
||||
self.vim_client.get_strategy.side_effect = [
|
||||
None,
|
||||
STRATEGY_BUILDING,
|
||||
STRATEGY_FAILED_BUILDING,
|
||||
]
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.create_strategy.return_value = STRATEGY_BUILDING
|
||||
@ -57,5 +67,4 @@ class TestCreatingVIMPatchStrategyStage(CreatingVIMStrategyStageMixin,
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.skip_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.skip_state)
|
||||
|
@ -9,8 +9,7 @@ import mock
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.patch.pre_check import IGNORED_ALARMS_IDS
|
||||
from dcmanager.tests.unit.orchestrator.states.fakes import FakeAlarm
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import \
|
||||
TestPatchState
|
||||
from dcmanager.tests.unit.orchestrator.states.patch.test_base import TestPatchState
|
||||
|
||||
|
||||
class TestPatchPreCheckStage(TestPatchState):
|
||||
@ -24,7 +23,8 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_PRE_CHECK)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_PRE_CHECK
|
||||
)
|
||||
|
||||
self.fm_client.get_alarms = mock.MagicMock()
|
||||
|
||||
@ -43,8 +43,7 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
self.fm_client.get_alarms.assert_called()
|
||||
|
||||
# verify the expected next state happened
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.success_state)
|
||||
|
||||
def test_no_management_affecting_alarm(self):
|
||||
"""Test pre check step where there are no management affecting alarms
|
||||
@ -61,8 +60,7 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
self.fm_client.get_alarms.assert_called()
|
||||
|
||||
# verify the expected next state happened
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.success_state)
|
||||
|
||||
def test_management_affected_alarm(self):
|
||||
"""Test pre check step where there is a management affecting alarm
|
||||
@ -70,8 +68,7 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
The pre-check should transition to the failed state
|
||||
"""
|
||||
|
||||
alarm_list = [FakeAlarm("100.001", "True"),
|
||||
FakeAlarm("100.002", "True")]
|
||||
alarm_list = [FakeAlarm("100.001", "True"), FakeAlarm("100.002", "True")]
|
||||
|
||||
# also add ignored alarms
|
||||
for alarm_str in IGNORED_ALARMS_IDS:
|
||||
@ -86,8 +83,9 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
self.fm_client.get_alarms.assert_called()
|
||||
|
||||
# verify the expected next state happened
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_ignored_alarm(self):
|
||||
"""Test pre check step where there is only a ignored alarm
|
||||
@ -108,8 +106,7 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
self.fm_client.get_alarms.assert_called()
|
||||
|
||||
# verify the expected next state happened
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.success_state)
|
||||
|
||||
def test_get_alarms_unexpected_failure(self):
|
||||
"""Test pre check step where fm-client get_alarms() fails
|
||||
@ -118,7 +115,7 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
field should contain the correct message detailing the error
|
||||
"""
|
||||
|
||||
self.fm_client.get_alarms.side_effect = Exception('Test error message')
|
||||
self.fm_client.get_alarms.side_effect = Exception("Test error message")
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
@ -127,10 +124,13 @@ class TestPatchPreCheckStage(TestPatchState):
|
||||
self.fm_client.get_alarms.assert_called()
|
||||
|
||||
# verify the expected next state happened
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
details = ("pre check: Failed to obtain subcloud alarm report due to:"
|
||||
" (Test error message). Please see /var/log/dcmanager/orche"
|
||||
"strator.log for details")
|
||||
details = (
|
||||
"pre check: Failed to obtain subcloud alarm report due to: "
|
||||
"(Test error message). Please see /var/log/dcmanager/orchestrator.log "
|
||||
"for details"
|
||||
)
|
||||
self.assert_step_details(self.strategy_step.subcloud_id, details)
|
||||
|
@ -40,10 +40,12 @@ SUBCLOUD_USM_PATCHES = {
|
||||
}
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.patch.updating_patches."
|
||||
"DEFAULT_MAX_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.patch.updating_patches"
|
||||
".DEFAULT_SLEEP_DURATION", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.patch.updating_patches.DEFAULT_MAX_QUERIES", 3
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.patch.updating_patches.DEFAULT_SLEEP_DURATION", 1
|
||||
)
|
||||
class TestUpdatingPatchesStage(TestPatchState):
|
||||
def setUp(self):
|
||||
super(TestUpdatingPatchesStage, self).setUp()
|
||||
@ -55,7 +57,8 @@ class TestUpdatingPatchesStage(TestPatchState):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_UPDATING_PATCHES)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_UPDATING_PATCHES
|
||||
)
|
||||
|
||||
# Add mock API endpoints for patching and sysinv client calls
|
||||
# invoked by this state
|
||||
@ -67,11 +70,11 @@ class TestUpdatingPatchesStage(TestPatchState):
|
||||
def _create_fake_strategy(self, upload_only=False, patch_file=None):
|
||||
extra_args = {
|
||||
consts.EXTRA_ARGS_UPLOAD_ONLY: upload_only,
|
||||
consts.EXTRA_ARGS_PATCH: patch_file
|
||||
consts.EXTRA_ARGS_PATCH: patch_file,
|
||||
}
|
||||
return fake_strategy.create_fake_strategy(self.ctx,
|
||||
self.DEFAULT_STRATEGY_TYPE,
|
||||
extra_args=extra_args)
|
||||
return fake_strategy.create_fake_strategy(
|
||||
self.ctx, self.DEFAULT_STRATEGY_TYPE, extra_args=extra_args
|
||||
)
|
||||
|
||||
@mock.patch.object(os_path, "isfile")
|
||||
def test_update_subcloud_patches_patch_file_success(self, mock_os_path_isfile):
|
||||
@ -118,7 +121,7 @@ class TestUpdatingPatchesStage(TestPatchState):
|
||||
|
||||
@mock.patch.object(os_path, "isfile")
|
||||
def test_update_subcloud_patches_patch_file_upload_only_success(
|
||||
self, mock_os_path_isfile
|
||||
self, mock_os_path_isfile
|
||||
):
|
||||
"""Test update_patches where the API call succeeds with patch/upload only."""
|
||||
|
||||
|
@ -22,12 +22,9 @@ from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.test_base import TestSwUpdate
|
||||
|
||||
FAKE_PASSWORD = (base64.b64encode('testpass'.encode('utf-8'))).decode('ascii')
|
||||
OAM_FLOATING_IP = '10.10.10.12'
|
||||
REQUIRED_EXTRA_ARGS = {
|
||||
'sysadmin_password': FAKE_PASSWORD,
|
||||
'force': False
|
||||
}
|
||||
FAKE_PASSWORD = (base64.b64encode("testpass".encode("utf-8"))).decode("ascii")
|
||||
OAM_FLOATING_IP = "10.10.10.12"
|
||||
REQUIRED_EXTRA_ARGS = {"sysadmin_password": FAKE_PASSWORD, "force": False}
|
||||
|
||||
|
||||
class TestPrestage(TestSwUpdate):
|
||||
@ -50,9 +47,7 @@ class TestPrestage(TestSwUpdate):
|
||||
}
|
||||
|
||||
def _setup_strategy_step(self, strategy_step):
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, strategy_step
|
||||
)
|
||||
self.strategy_step = self.setup_strategy_step(self.subcloud.id, strategy_step)
|
||||
|
||||
def _setup_and_assert(self, next_state, extra_args=None):
|
||||
self.strategy = fake_strategy.create_fake_strategy(
|
||||
@ -83,7 +78,7 @@ class TestPrestagePreCheckState(TestPrestage):
|
||||
individual tests.
|
||||
"""
|
||||
|
||||
mock_class = mock.patch('dcmanager.common.prestage.validate_prestage')
|
||||
mock_class = mock.patch("dcmanager.common.prestage.validate_prestage")
|
||||
self.mock_prestage_subcloud = mock_class.start()
|
||||
self.mock_prestage_subcloud.return_value = OAM_FLOATING_IP
|
||||
self.addCleanup(mock_class.stop)
|
||||
@ -91,7 +86,7 @@ class TestPrestagePreCheckState(TestPrestage):
|
||||
def _mock_threading_start(self):
|
||||
"""Mock threading's Thread.start"""
|
||||
|
||||
mock_thread = mock.patch.object(threading.Thread, 'start')
|
||||
mock_thread = mock.patch.object(threading.Thread, "start")
|
||||
self.mock_thread_start = mock_thread.start()
|
||||
self.addCleanup(mock_thread.stop)
|
||||
|
||||
@ -103,35 +98,31 @@ class TestPrestagePreCheckState(TestPrestage):
|
||||
def test_prestage_pre_check_validate_failed_with_orch_skip_false(self):
|
||||
"""Test prestage pre check validate failed with orch skip as false"""
|
||||
|
||||
self.mock_prestage_subcloud.side_effect = \
|
||||
self.mock_prestage_subcloud.side_effect = (
|
||||
exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=None, orch_skip=False, details='test'
|
||||
subcloud=None, orch_skip=False, details="test"
|
||||
)
|
||||
)
|
||||
|
||||
self._setup_and_assert(STRATEGY_STATE_FAILED, extra_args=REQUIRED_EXTRA_ARGS)
|
||||
|
||||
new_strategy_step = db_api.strategy_step_get(
|
||||
self.ctx, self.subcloud.id
|
||||
)
|
||||
new_strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id)
|
||||
|
||||
# The strategy step details field should be updated with the Exception string
|
||||
self.assertTrue('test' in str(new_strategy_step.details))
|
||||
self.assertTrue("test" in str(new_strategy_step.details))
|
||||
|
||||
def test_prestage_pre_check_validate_failed_with_orch_skip_true(self):
|
||||
"""Test prestage pre check validate failed with orch skip as true"""
|
||||
|
||||
self.mock_prestage_subcloud.side_effect = \
|
||||
self.mock_prestage_subcloud.side_effect = (
|
||||
exceptions.PrestagePreCheckFailedException(
|
||||
subcloud=None, orch_skip=True, details='test'
|
||||
subcloud=None, orch_skip=True, details="test"
|
||||
)
|
||||
|
||||
self._setup_and_assert(
|
||||
STRATEGY_STATE_COMPLETE, extra_args=REQUIRED_EXTRA_ARGS
|
||||
)
|
||||
|
||||
new_strategy_step = db_api.strategy_step_get(
|
||||
self.ctx, self.subcloud.id
|
||||
)
|
||||
self._setup_and_assert(STRATEGY_STATE_COMPLETE, extra_args=REQUIRED_EXTRA_ARGS)
|
||||
|
||||
new_strategy_step = db_api.strategy_step_get(self.ctx, self.subcloud.id)
|
||||
|
||||
# The strategy step details field should be updated with the Exception string
|
||||
self.assertTrue("test" in str(new_strategy_step.details))
|
||||
@ -155,18 +146,16 @@ class TestPrestagePreCheckState(TestPrestage):
|
||||
|
||||
self._setup_and_assert(
|
||||
STRATEGY_STATE_PRESTAGE_PACKAGES,
|
||||
extra_args=self.required_extra_args_with_oam
|
||||
extra_args=self.required_extra_args_with_oam,
|
||||
)
|
||||
|
||||
def test_prestage_pre_check_succeds_with_prestage_software_version(self):
|
||||
"""Test prestage pre check succeeds with prestage software version"""
|
||||
|
||||
extra_args = copy.copy(REQUIRED_EXTRA_ARGS)
|
||||
extra_args['prestage-software-version'] = '22.3'
|
||||
extra_args["prestage-software-version"] = "22.3"
|
||||
|
||||
self._setup_and_assert(
|
||||
STRATEGY_STATE_PRESTAGE_PACKAGES, extra_args=extra_args
|
||||
)
|
||||
self._setup_and_assert(STRATEGY_STATE_PRESTAGE_PACKAGES, extra_args=extra_args)
|
||||
|
||||
|
||||
class TestPrestagePackagesState(TestPrestage):
|
||||
@ -179,7 +168,7 @@ class TestPrestagePackagesState(TestPrestage):
|
||||
self._mock_ansible_playbook()
|
||||
|
||||
def _mock_ansible_playbook(self):
|
||||
mock_patch_object = mock.patch.object(AnsiblePlaybook, 'run_playbook')
|
||||
mock_patch_object = mock.patch.object(AnsiblePlaybook, "run_playbook")
|
||||
self.mock_ansible_playbook = mock_patch_object.start()
|
||||
self.addCleanup(mock_patch_object.stop)
|
||||
|
||||
@ -187,19 +176,16 @@ class TestPrestagePackagesState(TestPrestage):
|
||||
"""Test prestage package succeeds"""
|
||||
|
||||
self._setup_and_assert(
|
||||
STRATEGY_STATE_PRESTAGE_IMAGES,
|
||||
extra_args=self.required_extra_args_with_oam
|
||||
STRATEGY_STATE_PRESTAGE_IMAGES, extra_args=self.required_extra_args_with_oam
|
||||
)
|
||||
|
||||
def test_prestage_package_succeeds_with_prestage_software_version(self):
|
||||
"""Test prestage package succeeds with prestage software version"""
|
||||
|
||||
extra_args = copy.copy(self.required_extra_args_with_oam)
|
||||
extra_args['prestage-software-version'] = '22.3'
|
||||
extra_args["prestage-software-version"] = "22.3"
|
||||
|
||||
self._setup_and_assert(
|
||||
STRATEGY_STATE_PRESTAGE_IMAGES, extra_args=extra_args
|
||||
)
|
||||
self._setup_and_assert(STRATEGY_STATE_PRESTAGE_IMAGES, extra_args=extra_args)
|
||||
|
||||
|
||||
class TestPrestageImagesState(TestPrestage):
|
||||
@ -222,6 +208,6 @@ class TestPrestageImagesState(TestPrestage):
|
||||
"""Test prestage images succeeds with prestage software version"""
|
||||
|
||||
extra_args = copy.copy(self.required_extra_args_with_oam)
|
||||
extra_args['prestage-software-version'] = '22.3'
|
||||
extra_args["prestage-software-version"] = "22.3"
|
||||
|
||||
self._setup_and_assert(STRATEGY_STATE_COMPLETE, extra_args=extra_args)
|
||||
|
@ -10,15 +10,19 @@ from keystoneauth1 import exceptions as keystone_exceptions
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.common.exceptions import InvalidParameterValue
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_LICENSE_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_LICENSE_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache import clients
|
||||
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import \
|
||||
SharedCacheRepository
|
||||
from dcmanager.orchestrator.states.software.cache.shared_cache_repository import (
|
||||
SharedCacheRepository,
|
||||
)
|
||||
from dcmanager.tests import base
|
||||
|
||||
SOFTWARE_CLIENT_QUERY_RETURN = [
|
||||
@ -61,34 +65,32 @@ class TestSharedCacheRepository(base.DCManagerTestCase):
|
||||
self.software_client().list.return_value = SOFTWARE_CLIENT_QUERY_RETURN
|
||||
|
||||
def _mock_software_client(self):
|
||||
mock_patch = mock.patch.object(clients, 'SoftwareClient')
|
||||
mock_patch = mock.patch.object(clients, "SoftwareClient")
|
||||
self.software_client = mock_patch.start()
|
||||
self.addCleanup(mock_patch.stop)
|
||||
|
||||
def test_read_succeeds_with_license_cache_type(self):
|
||||
"""Test read cache succeeds when using the REGION_ONE_LICENSE_CACHE_TYPE"""
|
||||
|
||||
self.mock_sysinv_client().get_license.return_value = 'fake license'
|
||||
self.mock_sysinv_client().get_license.return_value = "fake license"
|
||||
|
||||
response = self.shared_cache_repository.read(REGION_ONE_LICENSE_CACHE_TYPE)
|
||||
|
||||
self.assertEqual(response, 'fake license')
|
||||
self.assertEqual(response, "fake license")
|
||||
|
||||
def test_read_succeeds_with_system_info_cache_type(self):
|
||||
"""Test read cache succeeds when using REGION_ONE_SYSTEM_INFO_CACHE_TYPE"""
|
||||
|
||||
self.mock_sysinv_client().get_system.return_value = 'fake system info'
|
||||
self.mock_sysinv_client().get_system.return_value = "fake system info"
|
||||
|
||||
response = self.shared_cache_repository.read(
|
||||
REGION_ONE_SYSTEM_INFO_CACHE_TYPE)
|
||||
response = self.shared_cache_repository.read(REGION_ONE_SYSTEM_INFO_CACHE_TYPE)
|
||||
|
||||
self.assertEqual(response, 'fake system info')
|
||||
self.assertEqual(response, "fake system info")
|
||||
|
||||
def test_read_succeeds_with_release_usm_cache_type(self):
|
||||
"""Test read cache succeeds when using REGION_ONE_RELEASE_USM_CACHE_TYPE"""
|
||||
|
||||
response = self.shared_cache_repository.read(
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE)
|
||||
response = self.shared_cache_repository.read(REGION_ONE_RELEASE_USM_CACHE_TYPE)
|
||||
|
||||
self.assertEqual(response, SOFTWARE_CLIENT_QUERY_RETURN)
|
||||
|
||||
@ -96,9 +98,7 @@ class TestSharedCacheRepository(base.DCManagerTestCase):
|
||||
"""Test read cache fails when using an invalid cache type"""
|
||||
|
||||
self.assertRaises(
|
||||
InvalidParameterValue,
|
||||
self.shared_cache_repository.read,
|
||||
'fake parameter'
|
||||
InvalidParameterValue, self.shared_cache_repository.read, "fake parameter"
|
||||
)
|
||||
|
||||
def test_read_fails_when_openstack_driver_raises_exception(self):
|
||||
@ -109,15 +109,14 @@ class TestSharedCacheRepository(base.DCManagerTestCase):
|
||||
self.assertRaises(
|
||||
keystone_exceptions.ConnectFailure,
|
||||
self.shared_cache_repository.read,
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE,
|
||||
)
|
||||
|
||||
def test_read_succeeds_with_filter_params(self):
|
||||
"""Test read cache succeeds when filter_params is sent"""
|
||||
|
||||
response = self.shared_cache_repository.read(
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE,
|
||||
state='available'
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE, state="available"
|
||||
)
|
||||
|
||||
expected_response = copy.copy(SOFTWARE_CLIENT_QUERY_RETURN)
|
||||
@ -132,5 +131,5 @@ class TestSharedCacheRepository(base.DCManagerTestCase):
|
||||
InvalidParameterValue,
|
||||
self.shared_cache_repository.read,
|
||||
REGION_ONE_RELEASE_USM_CACHE_TYPE,
|
||||
invalid='available'
|
||||
invalid="available",
|
||||
)
|
||||
|
@ -7,15 +7,19 @@ import socket
|
||||
|
||||
import mock
|
||||
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
CacheSpecification
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_LICENSE_CACHE_SPECIFICATION
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import \
|
||||
REGION_ONE_LICENSE_CACHE_TYPE
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
CacheSpecification,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_LICENSE_CACHE_SPECIFICATION,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache.cache_specifications import (
|
||||
REGION_ONE_LICENSE_CACHE_TYPE,
|
||||
)
|
||||
from dcmanager.orchestrator.states.software.cache import clients
|
||||
from dcmanager.orchestrator.states.software.cache.shared_client_cache import \
|
||||
SharedClientCache
|
||||
from dcmanager.orchestrator.states.software.cache.shared_client_cache import (
|
||||
SharedClientCache,
|
||||
)
|
||||
from dcmanager.tests import base
|
||||
|
||||
SOFTWARE_CLIENT_QUERY_RETURN = {
|
||||
@ -28,7 +32,7 @@ SOFTWARE_CLIENT_QUERY_RETURN = {
|
||||
"sw_version": "23.09.1",
|
||||
"state": "available",
|
||||
"reboot_required": "N",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -52,19 +56,19 @@ class TestSharedClientCache(base.DCManagerTestCase):
|
||||
REGION_ONE_LICENSE_CACHE_TYPE, REGION_ONE_LICENSE_CACHE_SPECIFICATION
|
||||
)
|
||||
|
||||
self.mock_sysinv_client().get_license.return_value = 'fake license'
|
||||
self.mock_sysinv_client().get_license.return_value = "fake license"
|
||||
|
||||
self.assertIsNone(shared_client_cache._cache)
|
||||
|
||||
response = shared_client_cache.read()
|
||||
|
||||
self.assertEqual(response, 'fake license')
|
||||
self.assertEqual(response, "fake license")
|
||||
self.mock_sysinv_client().get_license.assert_called_once()
|
||||
self.assertIsNotNone(shared_client_cache._cache)
|
||||
|
||||
response = shared_client_cache.read()
|
||||
|
||||
self.assertEqual(response, 'fake license')
|
||||
self.assertEqual(response, "fake license")
|
||||
self.mock_sysinv_client().get_license.assert_called_once()
|
||||
|
||||
def test_read_fails_when_client_lock_is_writer_and_cache_is_not_stored(self):
|
||||
@ -81,18 +85,17 @@ class TestSharedClientCache(base.DCManagerTestCase):
|
||||
"""Test read cache succeeds without retry on exception"""
|
||||
|
||||
cache_specification = CacheSpecification(
|
||||
lambda: clients.get_sysinv_client().get_license(),
|
||||
retry_on_exception=False
|
||||
lambda: clients.get_sysinv_client().get_license(), retry_on_exception=False
|
||||
)
|
||||
self.shared_client_cache = SharedClientCache(
|
||||
REGION_ONE_LICENSE_CACHE_TYPE, cache_specification
|
||||
)
|
||||
|
||||
self.mock_sysinv_client().get_license.return_value = 'fake license'
|
||||
self.mock_sysinv_client().get_license.return_value = "fake license"
|
||||
|
||||
response = self.shared_client_cache.read()
|
||||
|
||||
self.assertEqual(response, 'fake license')
|
||||
self.assertEqual(response, "fake license")
|
||||
self.mock_sysinv_client().get_license.assert_called_once()
|
||||
|
||||
def test_read_fails_with_retry_on_exception(self):
|
||||
|
@ -26,8 +26,7 @@ class TestSoftwareOrchestrator(TestSwUpdate):
|
||||
return_value=self.software_client,
|
||||
)
|
||||
self.sysinv_cache_client_mock = mock.patch(
|
||||
f"{CACHE_CLIENT_PATH}.get_sysinv_client",
|
||||
return_value=self.sysinv_client
|
||||
f"{CACHE_CLIENT_PATH}.get_sysinv_client", return_value=self.sysinv_client
|
||||
)
|
||||
self.software_cache_client_mock.start()
|
||||
self.sysinv_cache_client_mock.start()
|
||||
|
@ -8,10 +8,10 @@ import mock
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.base import BaseState
|
||||
from dcmanager.orchestrator.states.software.finish_strategy import \
|
||||
FinishStrategyState
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import \
|
||||
TestSoftwareOrchestrator
|
||||
from dcmanager.orchestrator.states.software.finish_strategy import FinishStrategyState
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import (
|
||||
TestSoftwareOrchestrator,
|
||||
)
|
||||
|
||||
|
||||
REGION_ONE_RELEASES = [
|
||||
@ -79,7 +79,8 @@ class TestFinishStrategyState(TestSoftwareOrchestrator):
|
||||
|
||||
# Add the strategy_step state being processed by this unit test
|
||||
self.strategy_step = self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_SW_FINISH_STRATEGY)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_SW_FINISH_STRATEGY
|
||||
)
|
||||
|
||||
# Add mock API endpoints for software client calls
|
||||
# invoked by this state
|
||||
@ -106,8 +107,7 @@ class TestFinishStrategyState(TestSoftwareOrchestrator):
|
||||
self.software_client.deploy_delete.assert_called_once()
|
||||
|
||||
# On success, the state should transition to the next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_finish_strategy_no_operation_required(self):
|
||||
"""Test software finish strategy when no operation is required."""
|
||||
@ -124,8 +124,7 @@ class TestFinishStrategyState(TestSoftwareOrchestrator):
|
||||
self.software_client.commit_patch.assert_not_called()
|
||||
|
||||
# On success, the state should transition to the next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_finish_strategy_fails_when_query_exception(self):
|
||||
"""Test finish strategy fails when software client query raises exception"""
|
||||
@ -154,7 +153,7 @@ class TestFinishStrategyState(TestSoftwareOrchestrator):
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
@mock.patch.object(BaseState, 'stopped')
|
||||
@mock.patch.object(BaseState, "stopped")
|
||||
def test_finish_strategy_fails_when_stopped(self, mock_base_stopped):
|
||||
"""Test finish strategy fails when stopped"""
|
||||
self.mock_read_from_cache.return_value = REGION_ONE_RELEASES
|
||||
|
@ -9,8 +9,9 @@ import mock
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.db import api as db_api
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import \
|
||||
TestSoftwareOrchestrator
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import (
|
||||
TestSoftwareOrchestrator,
|
||||
)
|
||||
|
||||
MISSING_LICENSE_RESPONSE = {
|
||||
"content": "",
|
||||
@ -99,9 +100,7 @@ class TestInstallLicenseState(TestSoftwareOrchestrator):
|
||||
self.sysinv_client.install_license.assert_called()
|
||||
|
||||
# On success, the next state after installing license is importing load
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_install_license_skip_existing(self):
|
||||
"""Test the install license step skipped due to license up to date"""
|
||||
@ -122,9 +121,7 @@ class TestInstallLicenseState(TestSoftwareOrchestrator):
|
||||
self.sysinv_client.install_license.assert_not_called()
|
||||
|
||||
# On success, the next state after installing license is importing load
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_install_license_overrides_mismatched_license(self):
|
||||
"""Test the install license overrides a mismatched license"""
|
||||
@ -147,9 +144,7 @@ class TestInstallLicenseState(TestSoftwareOrchestrator):
|
||||
self.sysinv_client.install_license.assert_called()
|
||||
|
||||
# Verify it successfully moves to the next step
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_install_license_skips_with_sys_controller_without_license(self):
|
||||
"""Test license install skips when sys controller doesn't have a license"""
|
||||
@ -164,9 +159,7 @@ class TestInstallLicenseState(TestSoftwareOrchestrator):
|
||||
self.sysinv_client.install_license.assert_not_called()
|
||||
|
||||
# Verify it successfully moves to the next step
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_install_license_fails_with_generic_error_response(self):
|
||||
"""Test license install fails with generic error response"""
|
||||
@ -180,9 +173,10 @@ class TestInstallLicenseState(TestSoftwareOrchestrator):
|
||||
subcloud = db_api.subcloud_get(self.ctx, self.subcloud.id)
|
||||
|
||||
self.assertEqual(
|
||||
subcloud.error_description, "An unexpected error occurred querying the "
|
||||
f"license {dccommon_consts.SYSTEM_CONTROLLER_NAME}. "
|
||||
f"Detail: {GENERIC_ERROR_RESPONSE['error']}"
|
||||
subcloud.error_description,
|
||||
"An unexpected error occurred querying the license "
|
||||
f"{dccommon_consts.SYSTEM_CONTROLLER_NAME}. "
|
||||
f"Detail: {GENERIC_ERROR_RESPONSE['error']}",
|
||||
)
|
||||
|
||||
# Should skip install_license API call
|
||||
|
@ -9,8 +9,9 @@ import mock
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.orchestrator.states.software import pre_check
|
||||
from dcmanager.tests.unit.common import fake_strategy
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import \
|
||||
TestSoftwareOrchestrator
|
||||
from dcmanager.tests.unit.orchestrator.states.software.test_base import (
|
||||
TestSoftwareOrchestrator,
|
||||
)
|
||||
|
||||
# TODO(nicodemos): Change strategy name after sw-deploy is created
|
||||
FAKE_VALID_CURRENT_STRATEGY = {"sw-upgrade": "build-failed"}
|
||||
|
@ -25,18 +25,14 @@ STRATEGY_APPLY_FAILED = FakeVimStrategy(state=vim.STATE_APPLY_FAILED)
|
||||
# method "ApplyingVIMStrategyState.__init__" don't change. To fix this, we patch
|
||||
# these default values in "ApplyingVIMStrategyState.__init__.__defaults__".
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy."
|
||||
"DEFAULT_MAX_FAILED_QUERIES",
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy.DEFAULT_MAX_FAILED_QUERIES",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy."
|
||||
"DEFAULT_MAX_WAIT_ATTEMPTS",
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy.DEFAULT_MAX_WAIT_ATTEMPTS",
|
||||
3,
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy.WAIT_INTERVAL", 1
|
||||
)
|
||||
@mock.patch("dcmanager.orchestrator.states.applying_vim_strategy.WAIT_INTERVAL", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.applying_vim_strategy."
|
||||
"ApplyingVIMStrategyState.__init__.__defaults__",
|
||||
@ -75,9 +71,7 @@ class ApplyingVIMStrategyMixin(object):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_applying_vim_strategy_raises_exception(self):
|
||||
"""Test applying a VIM strategy that raises an exception"""
|
||||
@ -182,9 +176,7 @@ class ApplyingVIMStrategyMixin(object):
|
||||
self.vim_client.apply_strategy.assert_not_called()
|
||||
|
||||
# SUCCESS case
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, self.on_success_state
|
||||
)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_applying_vim_strategy_already_exists_and_is_broken(self):
|
||||
"""Test applying a VIM strategy while a broken strategy exists"""
|
||||
|
@ -17,10 +17,12 @@ STRATEGY_DONE_BUILDING = FakeVimStrategy(state=vim.STATE_READY_TO_APPLY)
|
||||
STRATEGY_FAILED_BUILDING = FakeVimStrategy(state=vim.STATE_BUILD_FAILED)
|
||||
|
||||
|
||||
@mock.patch("dcmanager.orchestrator.states.creating_vim_strategy."
|
||||
"DEFAULT_MAX_QUERIES", 3)
|
||||
@mock.patch("dcmanager.orchestrator.states.creating_vim_strategy."
|
||||
"DEFAULT_SLEEP_DURATION", 1)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.creating_vim_strategy.DEFAULT_MAX_QUERIES", 3
|
||||
)
|
||||
@mock.patch(
|
||||
"dcmanager.orchestrator.states.creating_vim_strategy.DEFAULT_SLEEP_DURATION", 1
|
||||
)
|
||||
class CreatingVIMStrategyStageMixin(object):
|
||||
|
||||
def set_state(self, state, success_state):
|
||||
@ -56,8 +58,7 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Successful promotion to next state
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_creating_vim_strategy_raises_exception(self):
|
||||
"""Test creating a VIM strategy that raises an exception"""
|
||||
@ -66,15 +67,17 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.vim_client.get_strategy.return_value = None
|
||||
|
||||
# raise an exception during create_strategy
|
||||
self.vim_client.create_strategy.side_effect =\
|
||||
Exception("HTTPBadRequest: this is a fake exception")
|
||||
self.vim_client.create_strategy.side_effect = Exception(
|
||||
"HTTPBadRequest: this is a fake exception"
|
||||
)
|
||||
|
||||
# invoke the strategy state operation on the orch thread
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_fails_create_immediately(self):
|
||||
"""Test creating a VIM strategy that returns a failed create"""
|
||||
@ -89,8 +92,9 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_fails_create_later(self):
|
||||
"""Test creating a VIM strategy that starts to build but then fails"""
|
||||
@ -109,15 +113,20 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_timeout(self):
|
||||
"""Test creating a VIM strategy that times out"""
|
||||
|
||||
# first api query is before the create
|
||||
self.vim_client.get_strategy.side_effect = itertools.chain(
|
||||
[None, ], itertools.repeat(STRATEGY_BUILDING))
|
||||
[
|
||||
None,
|
||||
],
|
||||
itertools.repeat(STRATEGY_BUILDING),
|
||||
)
|
||||
|
||||
# API calls acts as expected
|
||||
self.vim_client.create_strategy.return_value = STRATEGY_BUILDING
|
||||
@ -126,12 +135,15 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.worker.perform_state_action(self.strategy_step)
|
||||
|
||||
# verify the max number of queries was attempted (plus 1)
|
||||
self.assertEqual(creating_vim_strategy.DEFAULT_MAX_QUERIES + 1,
|
||||
self.vim_client.get_strategy.call_count)
|
||||
self.assertEqual(
|
||||
creating_vim_strategy.DEFAULT_MAX_QUERIES + 1,
|
||||
self.vim_client.get_strategy.call_count,
|
||||
)
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
||||
def test_creating_vim_strategy_already_exists_and_completes(self):
|
||||
"""Test creating a VIM strategy while one already exists"""
|
||||
@ -157,8 +169,7 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.assertEqual(1, self.vim_client.create_strategy.call_count)
|
||||
|
||||
# SUCCESS case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
self.on_success_state)
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id, self.on_success_state)
|
||||
|
||||
def test_creating_vim_strategy_already_exists_and_is_broken(self):
|
||||
"""Test creating a VIM strategy while a broken strategy exists"""
|
||||
@ -176,5 +187,6 @@ class CreatingVIMStrategyStageMixin(object):
|
||||
self.vim_client.create_strategy.assert_not_called()
|
||||
|
||||
# Failure case
|
||||
self.assert_step_updated(self.strategy_step.subcloud_id,
|
||||
consts.STRATEGY_STATE_FAILED)
|
||||
self.assert_step_updated(
|
||||
self.strategy_step.subcloud_id, consts.STRATEGY_STATE_FAILED
|
||||
)
|
||||
|
@ -49,49 +49,49 @@ class TestSwUpdate(base.DCManagerTestCase):
|
||||
|
||||
# Mock the context
|
||||
self.ctxt = utils.dummy_context()
|
||||
p = mock.patch.object(context, 'get_admin_context')
|
||||
p = mock.patch.object(context, "get_admin_context")
|
||||
self.mock_get_admin_context = p.start()
|
||||
self.mock_get_admin_context.return_value = self.ctx
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the keystone client defined in the base state class
|
||||
self.keystone_client = FakeKeystoneClient()
|
||||
p = mock.patch.object(BaseState, 'get_keystone_client')
|
||||
p = mock.patch.object(BaseState, "get_keystone_client")
|
||||
self.mock_keystone_client = p.start()
|
||||
self.mock_keystone_client.return_value = self.keystone_client
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the sysinv client defined in the base state class
|
||||
self.sysinv_client = FakeSysinvClient()
|
||||
p = mock.patch.object(BaseState, 'get_sysinv_client')
|
||||
p = mock.patch.object(BaseState, "get_sysinv_client")
|
||||
self.mock_sysinv_client = p.start()
|
||||
self.mock_sysinv_client.return_value = self.sysinv_client
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the software client defined in the base state class
|
||||
self.software_client = FakeSoftwareClient()
|
||||
p = mock.patch.object(BaseState, 'get_software_client')
|
||||
p = mock.patch.object(BaseState, "get_software_client")
|
||||
self.mock_software_client = p.start()
|
||||
self.mock_software_client.return_value = self.software_client
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the patching client defined in the base state class
|
||||
self.patching_client = FakePatchingClient()
|
||||
p = mock.patch.object(BaseState, 'get_patching_client')
|
||||
p = mock.patch.object(BaseState, "get_patching_client")
|
||||
self.mock_patching_client = p.start()
|
||||
self.mock_patching_client.return_value = self.patching_client
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the vim client defined in the base state class
|
||||
self.vim_client = FakeVimClient()
|
||||
p = mock.patch.object(BaseState, 'get_vim_client')
|
||||
p = mock.patch.object(BaseState, "get_vim_client")
|
||||
self.mock_vim_client = p.start()
|
||||
self.mock_vim_client.return_value = self.vim_client
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the fm client defined in the base state class
|
||||
self.fm_client = FakeFmClient()
|
||||
p = mock.patch.object(BaseState, 'get_fm_client')
|
||||
p = mock.patch.object(BaseState, "get_fm_client")
|
||||
self.mock_fm_client = p.start()
|
||||
self.mock_fm_client.return_value = self.fm_client
|
||||
self.addCleanup(p.stop)
|
||||
@ -105,108 +105,108 @@ class TestSwUpdate(base.DCManagerTestCase):
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_SOFTWARE:
|
||||
sw_update_manager.SoftwareOrchThread.stopped = lambda x: False
|
||||
worker = sw_update_manager.SoftwareOrchThread(
|
||||
mock_strategy_lock, mock_dcmanager_audit_api)
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the software orch thread
|
||||
self.fake_software_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager, 'SoftwareOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "SoftwareOrchThread")
|
||||
self.mock_software_orch_thread = p.start()
|
||||
self.mock_software_orch_thread.return_value = \
|
||||
self.fake_software_orch_thread
|
||||
self.mock_software_orch_thread.return_value = self.fake_software_orch_thread
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_PATCH:
|
||||
sw_update_manager.PatchOrchThread.stopped = lambda x: False
|
||||
worker = \
|
||||
sw_update_manager.PatchOrchThread(mock_strategy_lock,
|
||||
mock_dcmanager_audit_api)
|
||||
worker = sw_update_manager.PatchOrchThread(
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the patch orch thread
|
||||
self.fake_sw_patch_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager, 'PatchOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "PatchOrchThread")
|
||||
self.mock_sw_patch_orch_thread = p.start()
|
||||
self.mock_sw_patch_orch_thread.return_value = \
|
||||
self.fake_sw_patch_orch_thread
|
||||
self.mock_sw_patch_orch_thread.return_value = self.fake_sw_patch_orch_thread
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_FIRMWARE:
|
||||
sw_update_manager.FwUpdateOrchThread.stopped = lambda x: False
|
||||
worker = \
|
||||
sw_update_manager.FwUpdateOrchThread(mock_strategy_lock,
|
||||
mock_dcmanager_audit_api)
|
||||
worker = sw_update_manager.FwUpdateOrchThread(
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the firmware orch thread
|
||||
self.fake_fw_update_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager, 'FwUpdateOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "FwUpdateOrchThread")
|
||||
self.mock_fw_update_orch_thread = p.start()
|
||||
self.mock_fw_update_orch_thread.return_value = \
|
||||
self.mock_fw_update_orch_thread.return_value = (
|
||||
self.fake_fw_update_orch_thread
|
||||
)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_KUBERNETES:
|
||||
sw_update_manager.KubeUpgradeOrchThread.stopped = lambda x: False
|
||||
worker = sw_update_manager.KubeUpgradeOrchThread(
|
||||
mock_strategy_lock,
|
||||
mock_dcmanager_audit_api)
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the kube upgrade orch thread
|
||||
self.fake_kube_upgrade_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager, 'KubeUpgradeOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "KubeUpgradeOrchThread")
|
||||
self.mock_kube_upgrade_orch_thread = p.start()
|
||||
self.mock_kube_upgrade_orch_thread.return_value = \
|
||||
self.mock_kube_upgrade_orch_thread.return_value = (
|
||||
self.fake_kube_upgrade_orch_thread
|
||||
)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_KUBE_ROOTCA_UPDATE:
|
||||
sw_update_manager.KubeRootcaUpdateOrchThread.stopped = \
|
||||
lambda x: False
|
||||
sw_update_manager.KubeRootcaUpdateOrchThread.stopped = lambda x: False
|
||||
worker = sw_update_manager.KubeRootcaUpdateOrchThread(
|
||||
mock_strategy_lock,
|
||||
mock_dcmanager_audit_api)
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the kube rootca update orch thread
|
||||
self.fake_kube_rootca_update_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager,
|
||||
'KubeRootcaUpdateOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "KubeRootcaUpdateOrchThread")
|
||||
self.mock_kube_rootca_update_orch_thread = p.start()
|
||||
self.mock_kube_rootca_update_orch_thread.return_value = \
|
||||
self.mock_kube_rootca_update_orch_thread.return_value = (
|
||||
self.fake_kube_rootca_update_orch_thread
|
||||
)
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
if strategy_type == consts.SW_UPDATE_TYPE_PRESTAGE:
|
||||
sw_update_manager.PrestageOrchThread.stopped = lambda x: False
|
||||
worker = \
|
||||
sw_update_manager.PrestageOrchThread(mock_strategy_lock,
|
||||
mock_dcmanager_audit_api)
|
||||
worker = sw_update_manager.PrestageOrchThread(
|
||||
mock_strategy_lock, mock_dcmanager_audit_api
|
||||
)
|
||||
else:
|
||||
# mock the prestage orch thread
|
||||
self.fake_prestage_orch_thread = FakeOrchThread()
|
||||
p = mock.patch.object(sw_update_manager, 'PrestageOrchThread')
|
||||
p = mock.patch.object(sw_update_manager, "PrestageOrchThread")
|
||||
self.mock_prestage_orch_thread = p.start()
|
||||
self.mock_prestage_orch_thread.return_value = \
|
||||
self.fake_prestage_orch_thread
|
||||
self.mock_prestage_orch_thread.return_value = self.fake_prestage_orch_thread
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
return worker
|
||||
|
||||
def setup_subcloud(self, deploy_status=consts.DEPLOY_STATE_INSTALLED):
|
||||
subcloud_id = fake_subcloud.create_fake_subcloud(
|
||||
self.ctx, deploy_status=deploy_status,
|
||||
self.ctx,
|
||||
deploy_status=deploy_status,
|
||||
).id
|
||||
return db_api.subcloud_update(
|
||||
self.ctx,
|
||||
subcloud_id,
|
||||
management_state=dccommon_consts.MANAGEMENT_MANAGED,
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE)
|
||||
availability_status=dccommon_consts.AVAILABILITY_ONLINE,
|
||||
)
|
||||
|
||||
def delete_subcloud(self, subcloud_id):
|
||||
return db_api.subcloud_destroy(self.ctx, subcloud_id)
|
||||
|
||||
def setup_strategy_step(self, subcloud_id, strategy_state):
|
||||
fake_strategy.create_fake_strategy_step(
|
||||
self.ctx,
|
||||
subcloud_id=subcloud_id,
|
||||
state=strategy_state)
|
||||
self.ctx, subcloud_id=subcloud_id, state=strategy_state
|
||||
)
|
||||
return db_api.strategy_step_get(self.ctx, subcloud_id)
|
||||
|
||||
def clean_strategy_steps(self):
|
||||
@ -226,11 +226,7 @@ class TestSwUpdate(base.DCManagerTestCase):
|
||||
self.assertEqual(expected_val, subcloud[attr_name])
|
||||
|
||||
def assert_subcloud_software_version(self, subcloud_id, expected_val):
|
||||
self.assert_subcloud_attribute(subcloud_id,
|
||||
'software_version',
|
||||
expected_val)
|
||||
self.assert_subcloud_attribute(subcloud_id, "software_version", expected_val)
|
||||
|
||||
def assert_subcloud_deploy_status(self, subcloud_id, expected_val):
|
||||
self.assert_subcloud_attribute(subcloud_id,
|
||||
'deploy_status',
|
||||
expected_val)
|
||||
self.assert_subcloud_attribute(subcloud_id, "deploy_status", expected_val)
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2020-2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2022, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -32,23 +32,22 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
"name": name,
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
'software_version': "18.03",
|
||||
"software_version": "18.03",
|
||||
"management_subnet": "192.168.101.0/24",
|
||||
"management_gateway_ip": "192.168.101.1",
|
||||
"management_start_ip": "192.168.101.3",
|
||||
"management_end_ip": "192.168.101.4",
|
||||
"systemcontroller_gateway_ip": "192.168.204.101",
|
||||
'deploy_status': "not-deployed",
|
||||
'error_description': 'No errors present',
|
||||
'region_name': uuidutils.generate_uuid().replace("-", ""),
|
||||
'openstack_installed': False,
|
||||
'group_id': group_id,
|
||||
'data_install': 'data from install',
|
||||
"deploy_status": "not-deployed",
|
||||
"error_description": "No errors present",
|
||||
"region_name": uuidutils.generate_uuid().replace("-", ""),
|
||||
"openstack_installed": False,
|
||||
"group_id": group_id,
|
||||
"data_install": "data from install",
|
||||
}
|
||||
subcloud = db_api.subcloud_create(ctxt, **values)
|
||||
state = dccommon_consts.MANAGEMENT_MANAGED
|
||||
subcloud = db_api.subcloud_update(ctxt, subcloud.id,
|
||||
management_state=state)
|
||||
subcloud = db_api.subcloud_update(ctxt, subcloud.id, management_state=state)
|
||||
return subcloud
|
||||
|
||||
# Setting DEFAULT_STRATEGY_TYPE to firmware will setup the firmware
|
||||
@ -60,7 +59,7 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
|
||||
# Mock the vim client defined in the base state class
|
||||
self.vim_client = FakeVimClient()
|
||||
p = mock.patch.object(OrchThread, 'get_vim_client')
|
||||
p = mock.patch.object(OrchThread, "get_vim_client")
|
||||
self.mock_vim_client = p.start()
|
||||
self.mock_vim_client.return_value = self.vim_client
|
||||
self.addCleanup(p.stop)
|
||||
@ -74,12 +73,12 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE,
|
||||
max_parallel_subclouds=2,
|
||||
state=state)
|
||||
state=state,
|
||||
)
|
||||
|
||||
def test_delete_strategy_no_steps(self):
|
||||
# The 'strategy'should be 'deleting'
|
||||
self.strategy = self.setup_strategy(
|
||||
state=consts.SW_UPDATE_STATE_DELETING)
|
||||
self.strategy = self.setup_strategy(state=consts.SW_UPDATE_STATE_DELETING)
|
||||
|
||||
# invoke the strategy (not strategy step) operation on the orch thread
|
||||
self.worker.delete(self.strategy)
|
||||
@ -88,28 +87,25 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.vim_client.get_strategy.assert_not_called()
|
||||
|
||||
# Verify the strategy was deleted
|
||||
self.assertRaises(exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE)
|
||||
self.assertRaises(
|
||||
exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE,
|
||||
)
|
||||
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, 'start')
|
||||
@mock.patch.object(OrchThread, 'perform_state_action')
|
||||
def test_apply_strategy(self, mock_perform_state_action,
|
||||
mock_start):
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, "start")
|
||||
@mock.patch.object(OrchThread, "perform_state_action")
|
||||
def test_apply_strategy(self, mock_perform_state_action, mock_start):
|
||||
mock_start.side_effect = non_threaded_start
|
||||
self.strategy = self.setup_strategy(
|
||||
state=consts.SW_UPDATE_STATE_APPLYING)
|
||||
subcloud2 = self.create_subcloud(self.ctxt, 'subcloud2', 1)
|
||||
subcloud3 = self.create_subcloud(self.ctxt, 'subcloud3', 1)
|
||||
subcloud4 = self.create_subcloud(self.ctxt, 'subcloud4', 1)
|
||||
self.strategy = self.setup_strategy(state=consts.SW_UPDATE_STATE_APPLYING)
|
||||
subcloud2 = self.create_subcloud(self.ctxt, "subcloud2", 1)
|
||||
subcloud3 = self.create_subcloud(self.ctxt, "subcloud3", 1)
|
||||
subcloud4 = self.create_subcloud(self.ctxt, "subcloud4", 1)
|
||||
|
||||
self.setup_strategy_step(
|
||||
subcloud2.id, consts.STRATEGY_STATE_INITIAL)
|
||||
self.setup_strategy_step(
|
||||
subcloud3.id, consts.STRATEGY_STATE_INITIAL)
|
||||
self.setup_strategy_step(
|
||||
subcloud4.id, consts.STRATEGY_STATE_INITIAL)
|
||||
self.setup_strategy_step(subcloud2.id, consts.STRATEGY_STATE_INITIAL)
|
||||
self.setup_strategy_step(subcloud3.id, consts.STRATEGY_STATE_INITIAL)
|
||||
self.setup_strategy_step(subcloud4.id, consts.STRATEGY_STATE_INITIAL)
|
||||
|
||||
self.worker.apply(self.strategy)
|
||||
|
||||
@ -121,9 +117,9 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.assertEqual(steps[2].state, consts.STRATEGY_STATE_INITIAL)
|
||||
|
||||
# subcloud3 orchestration finished first
|
||||
db_api.strategy_step_update(self.ctx,
|
||||
subcloud3.id,
|
||||
state=consts.STRATEGY_STATE_COMPLETE)
|
||||
db_api.strategy_step_update(
|
||||
self.ctx, subcloud3.id, state=consts.STRATEGY_STATE_COMPLETE
|
||||
)
|
||||
|
||||
self.worker.apply(self.strategy)
|
||||
|
||||
@ -134,15 +130,15 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.assertEqual(steps[1].state, consts.STRATEGY_STATE_COMPLETE)
|
||||
self.assertEqual(steps[2].state, consts.STRATEGY_STATE_IMPORTING_FIRMWARE)
|
||||
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, 'start')
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, "start")
|
||||
def test_delete_strategy_single_step_no_vim_strategy(self, mock_start):
|
||||
# The 'strategy' needs to be in 'deleting'
|
||||
self.strategy = self.setup_strategy(
|
||||
state=consts.SW_UPDATE_STATE_DELETING)
|
||||
self.strategy = self.setup_strategy(state=consts.SW_UPDATE_STATE_DELETING)
|
||||
|
||||
self.subcloud = self.setup_subcloud()
|
||||
self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY
|
||||
)
|
||||
|
||||
# If the subcloud does not have a vim strategy, it raises an exception
|
||||
self.vim_client.get_strategy.side_effect = Exception
|
||||
@ -156,27 +152,29 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.vim_client.get_strategy.assert_called()
|
||||
|
||||
# Verify the strategy was deleted
|
||||
self.assertRaises(exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE)
|
||||
self.assertRaises(
|
||||
exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE,
|
||||
)
|
||||
|
||||
# Verify the steps were deleted
|
||||
steps = db_api.strategy_step_get_all(self.ctx)
|
||||
self.assertEqual(steps, [])
|
||||
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, 'start')
|
||||
@mock.patch.object(scheduler.ThreadGroupManager, "start")
|
||||
def test_delete_strategy_single_step_with_vim_strategy(self, mock_start):
|
||||
|
||||
mock_start.side_effect = non_threaded_start
|
||||
|
||||
# The 'strategy' needs to be in 'deleting'
|
||||
self.strategy = self.setup_strategy(
|
||||
state=consts.SW_UPDATE_STATE_DELETING)
|
||||
self.strategy = self.setup_strategy(state=consts.SW_UPDATE_STATE_DELETING)
|
||||
|
||||
self.subcloud = self.setup_subcloud()
|
||||
self.setup_strategy_step(
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY)
|
||||
self.subcloud.id, consts.STRATEGY_STATE_CREATING_FW_UPDATE_STRATEGY
|
||||
)
|
||||
|
||||
# the subcloud returns a vim strategy
|
||||
vim_strategy = FakeVimStrategy(state=vim.STATE_APPLIED)
|
||||
@ -190,10 +188,12 @@ class TestFwOrchThread(TestSwUpdate):
|
||||
self.vim_client.delete_strategy.assert_called()
|
||||
|
||||
# Verify the strategy was deleted
|
||||
self.assertRaises(exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE)
|
||||
self.assertRaises(
|
||||
exception.NotFound,
|
||||
db_api.sw_update_strategy_get,
|
||||
self.ctx,
|
||||
consts.SW_UPDATE_TYPE_FIRMWARE,
|
||||
)
|
||||
|
||||
# Verify the steps were deleted
|
||||
steps = db_api.strategy_step_get_all(self.ctx)
|
||||
|
@ -9,11 +9,13 @@ Firmware strategy validation tests
|
||||
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.firmware_validator import (
|
||||
FirmwareStrategyValidator
|
||||
FirmwareStrategyValidator,
|
||||
)
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
StrategyRequirementsMixin, BaseMixin, BuildExtraArgsMixin
|
||||
StrategyRequirementsMixin,
|
||||
BaseMixin,
|
||||
BuildExtraArgsMixin,
|
||||
)
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
|
||||
|
@ -10,11 +10,13 @@ Kube root-ca strategy validation tests
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.kube_root_ca_validator import (
|
||||
KubeRootCaStrategyValidator
|
||||
KubeRootCaStrategyValidator,
|
||||
)
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
StrategyRequirementsMixin, BaseMixin, BuildExtraArgsMixin
|
||||
StrategyRequirementsMixin,
|
||||
BaseMixin,
|
||||
BuildExtraArgsMixin,
|
||||
)
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
|
||||
|
@ -10,11 +10,13 @@ Kubernetes strategy validation tests
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.kubernetes_validator import (
|
||||
KubernetesStrategyValidator
|
||||
KubernetesStrategyValidator,
|
||||
)
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
StrategyRequirementsMixin, BaseMixin, BuildExtraArgsMixin
|
||||
StrategyRequirementsMixin,
|
||||
BaseMixin,
|
||||
BuildExtraArgsMixin,
|
||||
)
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
|
||||
|
@ -9,12 +9,12 @@ Patch strategy validation tests
|
||||
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.patch_validator import (
|
||||
PatchStrategyValidator
|
||||
)
|
||||
from dcmanager.orchestrator.validators.patch_validator import PatchStrategyValidator
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
StrategyRequirementsMixin, BaseMixin, BuildExtraArgsMixin
|
||||
StrategyRequirementsMixin,
|
||||
BaseMixin,
|
||||
BuildExtraArgsMixin,
|
||||
)
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
|
||||
@ -39,10 +39,7 @@ class TestPatchValidator(
|
||||
return self.validator
|
||||
|
||||
def _get_build_extra_args_payload(self):
|
||||
return {
|
||||
consts.EXTRA_ARGS_UPLOAD_ONLY: True,
|
||||
consts.EXTRA_ARGS_PATCH: None
|
||||
}
|
||||
return {consts.EXTRA_ARGS_UPLOAD_ONLY: True, consts.EXTRA_ARGS_PATCH: None}
|
||||
|
||||
def _get_expected_extra_args(self):
|
||||
|
||||
|
@ -9,13 +9,11 @@ Prestage strategy validation tests
|
||||
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.prestage_validator import (
|
||||
PrestageStrategyValidator
|
||||
PrestageStrategyValidator,
|
||||
)
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
BaseMixin
|
||||
)
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import BaseMixin
|
||||
|
||||
|
||||
# TODO(rlima): add the mixins once prestage logic is moved to the validator
|
||||
|
@ -10,11 +10,13 @@ Software deploy strategy validation tests
|
||||
from dcmanager.common import consts
|
||||
from dcmanager.db.sqlalchemy import api as db_api
|
||||
from dcmanager.orchestrator.validators.sw_deploy_validator import (
|
||||
SoftwareDeployStrategyValidator
|
||||
SoftwareDeployStrategyValidator,
|
||||
)
|
||||
from dcmanager.tests.base import DCManagerTestCase
|
||||
from dcmanager.tests.unit.orchestrator.validators.validators_mixin import (
|
||||
StrategyRequirementsMixin, BaseMixin, BuildExtraArgsMixin
|
||||
StrategyRequirementsMixin,
|
||||
BaseMixin,
|
||||
BuildExtraArgsMixin,
|
||||
)
|
||||
from dcmanager.tests.unit.common.fake_subcloud import create_fake_subcloud
|
||||
|
||||
|
@ -81,8 +81,10 @@ class StrategyRequirementsMixin(object):
|
||||
"""Test validate_strategy_requirements succeeds with endpoint out of sync"""
|
||||
|
||||
db_api.subcloud_status_update(
|
||||
self.ctx, self.subcloud.id, self._get_validator().endpoint_type,
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC
|
||||
self.ctx,
|
||||
self.subcloud.id,
|
||||
self._get_validator().endpoint_type,
|
||||
dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
||||
)
|
||||
|
||||
self._get_validator().validate_strategy_requirements(
|
||||
@ -97,14 +99,19 @@ class StrategyRequirementsMixin(object):
|
||||
"""Test validate_strategy_requirements fails with endpoint in sync"""
|
||||
|
||||
db_api.subcloud_status_update(
|
||||
self.ctx, self.subcloud.id, self._get_validator().endpoint_type,
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC
|
||||
self.ctx,
|
||||
self.subcloud.id,
|
||||
self._get_validator().endpoint_type,
|
||||
dccommon_consts.SYNC_STATUS_IN_SYNC,
|
||||
)
|
||||
|
||||
self.assertRaises(
|
||||
exceptions.BadRequest,
|
||||
self._get_validator().validate_strategy_requirements,
|
||||
self.ctx, self.subcloud.id, self.subcloud.name, False
|
||||
self.ctx,
|
||||
self.subcloud.id,
|
||||
self.subcloud.name,
|
||||
False,
|
||||
)
|
||||
|
||||
self._get_mock_db_api().assert_called_with(
|
||||
|
@ -1,5 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,7 +12,6 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
|
||||
import mock
|
||||
|
||||
from dcmanager.common import config
|
||||
@ -35,13 +33,13 @@ class ManagerRpcAPITestCase(base.DCManagerTestCase):
|
||||
self.rpcapi = rpc_client.ManagerClient()
|
||||
super(ManagerRpcAPITestCase, self).setUp()
|
||||
|
||||
@mock.patch.object(messaging, 'get_rpc_client')
|
||||
@mock.patch.object(messaging, "get_rpc_client")
|
||||
def test_call(self, mock_client):
|
||||
client = mock.Mock()
|
||||
mock_client.return_value = client
|
||||
|
||||
method = 'fake_method'
|
||||
kwargs = {'key': 'value'}
|
||||
method = "fake_method"
|
||||
kwargs = {"key": "value"}
|
||||
rpcapi = rpc_client.ManagerClient()
|
||||
msg = rpcapi.make_msg(method, **kwargs)
|
||||
|
||||
@ -49,25 +47,25 @@ class ManagerRpcAPITestCase(base.DCManagerTestCase):
|
||||
res = rpcapi.call(self.context, msg)
|
||||
|
||||
self.assertEqual(client, rpcapi._client)
|
||||
client.call.assert_called_once_with(self.context, 'fake_method',
|
||||
key='value')
|
||||
client.call.assert_called_once_with(self.context, "fake_method", key="value")
|
||||
self.assertEqual(res, client.call.return_value)
|
||||
|
||||
# with version
|
||||
res = rpcapi.call(self.context, msg, version='123')
|
||||
client.prepare.assert_called_once_with(version='123')
|
||||
res = rpcapi.call(self.context, msg, version="123")
|
||||
client.prepare.assert_called_once_with(version="123")
|
||||
new_client = client.prepare.return_value
|
||||
new_client.call.assert_called_once_with(self.context, 'fake_method',
|
||||
key='value')
|
||||
new_client.call.assert_called_once_with(
|
||||
self.context, "fake_method", key="value"
|
||||
)
|
||||
self.assertEqual(res, new_client.call.return_value)
|
||||
|
||||
@mock.patch.object(messaging, 'get_rpc_client')
|
||||
@mock.patch.object(messaging, "get_rpc_client")
|
||||
def test_cast(self, mock_client):
|
||||
client = mock.Mock()
|
||||
mock_client.return_value = client
|
||||
|
||||
method = 'fake_method'
|
||||
kwargs = {'key': 'value'}
|
||||
method = "fake_method"
|
||||
kwargs = {"key": "value"}
|
||||
rpcapi = rpc_client.ManagerClient()
|
||||
msg = rpcapi.make_msg(method, **kwargs)
|
||||
|
||||
@ -75,14 +73,14 @@ class ManagerRpcAPITestCase(base.DCManagerTestCase):
|
||||
res = rpcapi.cast(self.context, msg)
|
||||
|
||||
self.assertEqual(client, rpcapi._client)
|
||||
client.cast.assert_called_once_with(self.context, 'fake_method',
|
||||
key='value')
|
||||
client.cast.assert_called_once_with(self.context, "fake_method", key="value")
|
||||
self.assertEqual(res, client.cast.return_value)
|
||||
|
||||
# with version
|
||||
res = rpcapi.cast(self.context, msg, version='123')
|
||||
client.prepare.assert_called_once_with(fanout=None, version='123')
|
||||
res = rpcapi.cast(self.context, msg, version="123")
|
||||
client.prepare.assert_called_once_with(fanout=None, version="123")
|
||||
new_client = client.prepare.return_value
|
||||
new_client.cast.assert_called_once_with(self.context, 'fake_method',
|
||||
key='value')
|
||||
new_client.cast.assert_called_once_with(
|
||||
self.context, "fake_method", key="value"
|
||||
)
|
||||
self.assertEqual(res, new_client.cast.return_value)
|
||||
|
@ -1,5 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
|
@ -34,9 +34,7 @@ class TestUtils(base.DCManagerTestCase):
|
||||
"admin_subnet": "192.168.205.0/24",
|
||||
"management_subnet": "192.168.204.0/24",
|
||||
}
|
||||
self.assertEqual(
|
||||
utils.get_management_subnet(payload), payload["admin_subnet"]
|
||||
)
|
||||
self.assertEqual(utils.get_management_subnet(payload), payload["admin_subnet"])
|
||||
|
||||
def test_get_management_start_address(self):
|
||||
payload = {"management_start_address": "192.168.204.2"}
|
||||
|
@ -40,24 +40,25 @@ class UUIDStub(object):
|
||||
uuid.uuid4 = self.uuid4
|
||||
|
||||
|
||||
UUIDs = (UUID1, UUID2, UUID3, UUID4, UUID5) = sorted([str(uuid.uuid4())
|
||||
for x in range(5)])
|
||||
UUIDs = (UUID1, UUID2, UUID3, UUID4, UUID5) = sorted(
|
||||
[str(uuid.uuid4()) for x in range(5)]
|
||||
)
|
||||
|
||||
|
||||
def random_name():
|
||||
return ''.join(random.choice(string.ascii_uppercase)
|
||||
for x in range(10))
|
||||
return "".join(random.choice(string.ascii_uppercase) for x in range(10))
|
||||
|
||||
|
||||
def dummy_context(user='test_username', tenant='test_project_id',
|
||||
region_name=None):
|
||||
return context.RequestContext.from_dict({
|
||||
'auth_token': 'abcd1234',
|
||||
'user': user,
|
||||
'project': tenant,
|
||||
'is_admin': True,
|
||||
'region_name': region_name
|
||||
})
|
||||
def dummy_context(user="test_username", tenant="test_project_id", region_name=None):
|
||||
return context.RequestContext.from_dict(
|
||||
{
|
||||
"auth_token": "abcd1234",
|
||||
"user": user,
|
||||
"project": tenant,
|
||||
"is_admin": True,
|
||||
"region_name": region_name,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
|
||||
@ -67,31 +68,33 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
|
||||
|
||||
|
||||
def create_subcloud_dict(data_list):
|
||||
return {'id': data_list[0],
|
||||
'name': data_list[1],
|
||||
'description': data_list[2],
|
||||
'location': data_list[3],
|
||||
'software-version': data_list[4],
|
||||
'management-state': data_list[5],
|
||||
'availability-status': data_list[6],
|
||||
'management_subnet': data_list[7],
|
||||
'management_gateway_address': data_list[8],
|
||||
'management_start_address': data_list[9],
|
||||
'management_end_address': data_list[10],
|
||||
'systemcontroller_gateway_address': data_list[11],
|
||||
'audit-fail-count': data_list[12],
|
||||
'reserved-1': data_list[13],
|
||||
'reserved-2': data_list[14],
|
||||
'created-at': data_list[15],
|
||||
'updated-at': data_list[16],
|
||||
'deleted-at': data_list[17],
|
||||
'deleted': data_list[18],
|
||||
'external_oam_subnet': data_list[19],
|
||||
'external_oam_gateway_address': data_list[20],
|
||||
'external_oam_floating_address': data_list[21],
|
||||
'sysadmin_password': data_list[22],
|
||||
'group_id': data_list[23],
|
||||
'deploy_status': data_list[24],
|
||||
'error_description': data_list[25],
|
||||
'region_name': data_list[26],
|
||||
'data_install': data_list[27]}
|
||||
return {
|
||||
"id": data_list[0],
|
||||
"name": data_list[1],
|
||||
"description": data_list[2],
|
||||
"location": data_list[3],
|
||||
"software-version": data_list[4],
|
||||
"management-state": data_list[5],
|
||||
"availability-status": data_list[6],
|
||||
"management_subnet": data_list[7],
|
||||
"management_gateway_address": data_list[8],
|
||||
"management_start_address": data_list[9],
|
||||
"management_end_address": data_list[10],
|
||||
"systemcontroller_gateway_address": data_list[11],
|
||||
"audit-fail-count": data_list[12],
|
||||
"reserved-1": data_list[13],
|
||||
"reserved-2": data_list[14],
|
||||
"created-at": data_list[15],
|
||||
"updated-at": data_list[16],
|
||||
"deleted-at": data_list[17],
|
||||
"deleted": data_list[18],
|
||||
"external_oam_subnet": data_list[19],
|
||||
"external_oam_gateway_address": data_list[20],
|
||||
"external_oam_floating_address": data_list[21],
|
||||
"sysadmin_password": data_list[22],
|
||||
"group_id": data_list[23],
|
||||
"deploy_status": data_list[24],
|
||||
"error_description": data_list[25],
|
||||
"region_name": data_list[26],
|
||||
"data_install": data_list[27],
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ formatted_modules = [
|
||||
"dcmanager/common",
|
||||
"dcmanager/db",
|
||||
"dcmanager/orchestrator",
|
||||
"dcmanager/tests",
|
||||
]
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user