diff --git a/distributedcloud/dcmanager/db/api.py b/distributedcloud/dcmanager/db/api.py index 999b21900..8248c2f09 100644 --- a/distributedcloud/dcmanager/db/api.py +++ b/distributedcloud/dcmanager/db/api.py @@ -29,7 +29,7 @@ from dcmanager.db.sqlalchemy import models CONF = cfg.CONF -_BACKEND_MAPPING = {'sqlalchemy': 'dcmanager.db.sqlalchemy.api'} +_BACKEND_MAPPING = {"sqlalchemy": "dcmanager.db.sqlalchemy.api"} IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) @@ -46,6 +46,7 @@ def get_session(): ########################## + def subcloud_audits_get(context, subcloud_id): """Get subcloud_audits info for a subcloud.""" return IMPL.subcloud_audits_get(context, subcloud_id) @@ -57,12 +58,12 @@ def subcloud_audits_get_all(context): def subcloud_audits_update_all(context, values): - """"Mark sub-audits as needed for all subclouds.""" + """Mark sub-audits as needed for all subclouds.""" return IMPL.subcloud_audits_update_all(context, values) def subcloud_audits_create(context, subcloud_id): - """"Create subcloud_audits info for a subcloud.""" + """Create subcloud_audits info for a subcloud.""" return IMPL.subcloud_audits_create(context, subcloud_id) @@ -79,6 +80,7 @@ def subcloud_audits_get_all_need_audit(context, last_audit_threshold): # In the functions below it would be cleaner if the timestamp were calculated # by the DB server. If server time is in UTC func.now() might work. + def subcloud_audits_get_and_start_audit(context, subcloud_id): """Set the 'audit started' timestamp for the main audit.""" return IMPL.subcloud_audits_get_and_start_audit(context, subcloud_id) @@ -94,61 +96,89 @@ def subcloud_audits_bulk_end_audit(context, subcloud_ids): return IMPL.subcloud_audits_bulk_end_audit(context, subcloud_ids) -def subcloud_audits_fix_expired_audits(context, last_audit_threshold, - trigger_audits=False): - return IMPL.subcloud_audits_fix_expired_audits(context, - last_audit_threshold, - trigger_audits) +def subcloud_audits_fix_expired_audits( + context, last_audit_threshold, trigger_audits=False +): + return IMPL.subcloud_audits_fix_expired_audits( + context, last_audit_threshold, trigger_audits + ) # subcloud db methods ################### + def subcloud_db_model_to_dict(subcloud): """Convert subcloud db model to dictionary.""" - result = {"id": subcloud.id, - "name": subcloud.name, - "description": subcloud.description, - "location": subcloud.location, - "software-version": subcloud.software_version, - "management-state": subcloud.management_state, - "availability-status": subcloud.availability_status, - "deploy-status": subcloud.deploy_status, - "backup-status": subcloud.backup_status, - "backup-datetime": subcloud.backup_datetime, - "error-description": subcloud.error_description, - 'region-name': subcloud.region_name, - "management-subnet": subcloud.management_subnet, - "management-start-ip": subcloud.management_start_ip, - "management-end-ip": subcloud.management_end_ip, - "management-gateway-ip": subcloud.management_gateway_ip, - "openstack-installed": subcloud.openstack_installed, - "prestage-status": subcloud.prestage_status, - "prestage-versions": subcloud.prestage_versions, - "systemcontroller-gateway-ip": - subcloud.systemcontroller_gateway_ip, - "data_install": subcloud.data_install, - "data_upgrade": subcloud.data_upgrade, - "created-at": subcloud.created_at, - "updated-at": subcloud.updated_at, - "group_id": subcloud.group_id, - "peer_group_id": subcloud.peer_group_id, - "rehome_data": subcloud.rehome_data} + result = { + "id": subcloud.id, + "name": subcloud.name, + "description": subcloud.description, + "location": subcloud.location, + "software-version": subcloud.software_version, + "management-state": subcloud.management_state, + "availability-status": subcloud.availability_status, + "deploy-status": subcloud.deploy_status, + "backup-status": subcloud.backup_status, + "backup-datetime": subcloud.backup_datetime, + "error-description": subcloud.error_description, + "region-name": subcloud.region_name, + "management-subnet": subcloud.management_subnet, + "management-start-ip": subcloud.management_start_ip, + "management-end-ip": subcloud.management_end_ip, + "management-gateway-ip": subcloud.management_gateway_ip, + "openstack-installed": subcloud.openstack_installed, + "prestage-status": subcloud.prestage_status, + "prestage-versions": subcloud.prestage_versions, + "systemcontroller-gateway-ip": subcloud.systemcontroller_gateway_ip, + "data_install": subcloud.data_install, + "data_upgrade": subcloud.data_upgrade, + "created-at": subcloud.created_at, + "updated-at": subcloud.updated_at, + "group_id": subcloud.group_id, + "peer_group_id": subcloud.peer_group_id, + "rehome_data": subcloud.rehome_data, + } return result -def subcloud_create(context, name, description, location, software_version, - management_subnet, management_gateway_ip, - management_start_ip, management_end_ip, - systemcontroller_gateway_ip, deploy_status, error_description, - region_name, openstack_installed, group_id, data_install=None): +def subcloud_create( + context, + name, + description, + location, + software_version, + management_subnet, + management_gateway_ip, + management_start_ip, + management_end_ip, + systemcontroller_gateway_ip, + deploy_status, + error_description, + region_name, + openstack_installed, + group_id, + data_install=None, +): """Create a subcloud.""" return IMPL.subcloud_create( - context, name, description, location, software_version, management_subnet, - management_gateway_ip, management_start_ip, management_end_ip, - systemcontroller_gateway_ip, deploy_status, error_description, region_name, - openstack_installed, group_id, data_install + context, + name, + description, + location, + software_version, + management_subnet, + management_gateway_ip, + management_start_ip, + management_end_ip, + systemcontroller_gateway_ip, + deploy_status, + error_description, + region_name, + openstack_installed, + group_id, + data_install, ) @@ -198,13 +228,21 @@ def subcloud_get_all_with_status(context): def subcloud_get_all_valid_for_strategy_step_creation( - context, endpoint_type, group_id=None, subcloud_name=None, - availability_status=None, sync_status=None + context, + endpoint_type, + group_id=None, + subcloud_name=None, + availability_status=None, + sync_status=None, ): """Queries all the subclouds that are valid for the strategy step to create""" return IMPL.subcloud_get_all_valid_for_strategy_step_creation( - context, endpoint_type, group_id, subcloud_name, availability_status, - sync_status + context, + endpoint_type, + group_id, + subcloud_name, + availability_status, + sync_status, ) @@ -218,25 +256,67 @@ def subcloud_count_invalid_for_strategy_type( def subcloud_update( - context, subcloud_id, management_state=None, availability_status=None, - software_version=None, name=None, description=None, management_subnet=None, - management_gateway_ip=None, management_start_ip=None, management_end_ip=None, - location=None, audit_fail_count=None, deploy_status=None, backup_status=None, - backup_datetime=None, error_description=None, openstack_installed=None, - group_id=None, data_install=None, data_upgrade=None, - first_identity_sync_complete=None, systemcontroller_gateway_ip=None, - peer_group_id=None, rehome_data=None, rehomed=None, - prestage_status=None, prestage_versions=None, region_name=None + context, + subcloud_id, + management_state=None, + availability_status=None, + software_version=None, + name=None, + description=None, + management_subnet=None, + management_gateway_ip=None, + management_start_ip=None, + management_end_ip=None, + location=None, + audit_fail_count=None, + deploy_status=None, + backup_status=None, + backup_datetime=None, + error_description=None, + openstack_installed=None, + group_id=None, + data_install=None, + data_upgrade=None, + first_identity_sync_complete=None, + systemcontroller_gateway_ip=None, + peer_group_id=None, + rehome_data=None, + rehomed=None, + prestage_status=None, + prestage_versions=None, + region_name=None, ): """Update a subcloud or raise if it does not exist.""" return IMPL.subcloud_update( - context, subcloud_id, management_state, availability_status, - software_version, name, description, management_subnet, - management_gateway_ip, management_start_ip, management_end_ip, location, - audit_fail_count, deploy_status, backup_status, backup_datetime, - error_description, openstack_installed, group_id, data_install, data_upgrade, - first_identity_sync_complete, systemcontroller_gateway_ip, peer_group_id, - rehome_data, rehomed, prestage_status, prestage_versions, region_name + context, + subcloud_id, + management_state, + availability_status, + software_version, + name, + description, + management_subnet, + management_gateway_ip, + management_start_ip, + management_end_ip, + location, + audit_fail_count, + deploy_status, + backup_status, + backup_datetime, + error_description, + openstack_installed, + group_id, + data_install, + data_upgrade, + first_identity_sync_complete, + systemcontroller_gateway_ip, + peer_group_id, + rehome_data, + rehomed, + prestage_status, + prestage_versions, + region_name, ) @@ -268,11 +348,12 @@ def subcloud_status_delete(context, subcloud_id, endpoint_type): def subcloud_status_db_model_to_dict(subcloud_status): """Convert subcloud status db model to dictionary.""" if subcloud_status: - result = {"subcloud_id": subcloud_status.subcloud_id, - "sync_status": subcloud_status.sync_status} + result = { + "subcloud_id": subcloud_status.subcloud_id, + "sync_status": subcloud_status.sync_status, + } else: - result = {"subcloud_id": 0, - "sync_status": "unknown"} + result = {"subcloud_id": 0, "sync_status": "unknown"} return result @@ -280,8 +361,10 @@ def subcloud_status_db_model_to_dict(subcloud_status): def subcloud_endpoint_status_db_model_to_dict(subcloud_status): """Convert endpoint subcloud db model to dictionary.""" if subcloud_status: - result = {"endpoint_type": subcloud_status.endpoint_type, - "sync_status": subcloud_status.sync_status} + result = { + "endpoint_type": subcloud_status.endpoint_type, + "sync_status": subcloud_status.sync_status, + } else: result = {} @@ -309,16 +392,17 @@ def subcloud_status_get_all_by_name(context, name): def subcloud_status_update(context, subcloud_id, endpoint_type, sync_status): """Update the status of a subcloud or raise if it does not exist.""" - return IMPL.subcloud_status_update(context, subcloud_id, endpoint_type, - sync_status) + return IMPL.subcloud_status_update(context, subcloud_id, endpoint_type, sync_status) -def subcloud_status_update_endpoints(context, subcloud_id, - endpoint_type_list, sync_status): +def subcloud_status_update_endpoints( + context, subcloud_id, endpoint_type_list, sync_status +): """Update all statuses of the endpoints in endpoint_type_list of a subcloud.""" - return IMPL.subcloud_status_update_endpoints(context, subcloud_id, - endpoint_type_list, sync_status) + return IMPL.subcloud_status_update_endpoints( + context, subcloud_id, endpoint_type_list, sync_status + ) def subcloud_status_bulk_update_endpoints(context, subcloud_id, endpoint_list): @@ -341,26 +425,28 @@ def subcloud_status_destroy_all(context, subcloud_id): ################### # subcloud_group + def subcloud_group_db_model_to_dict(subcloud_group): """Convert subcloud_group db model to dictionary.""" - result = {"id": subcloud_group.id, - "name": subcloud_group.name, - "description": subcloud_group.description, - "update_apply_type": subcloud_group.update_apply_type, - "max_parallel_subclouds": subcloud_group.max_parallel_subclouds, - "created-at": subcloud_group.created_at, - "updated-at": subcloud_group.updated_at} + result = { + "id": subcloud_group.id, + "name": subcloud_group.name, + "description": subcloud_group.description, + "update_apply_type": subcloud_group.update_apply_type, + "max_parallel_subclouds": subcloud_group.max_parallel_subclouds, + "created-at": subcloud_group.created_at, + "updated-at": subcloud_group.updated_at, + } return result -def subcloud_group_create(context, name, description, update_apply_type, - max_parallel_subclouds): +def subcloud_group_create( + context, name, description, update_apply_type, max_parallel_subclouds +): """Create a subcloud_group.""" - return IMPL.subcloud_group_create(context, - name, - description, - update_apply_type, - max_parallel_subclouds) + return IMPL.subcloud_group_create( + context, name, description, update_apply_type, max_parallel_subclouds + ) def subcloud_group_get(context, group_id): @@ -383,15 +469,13 @@ def subcloud_get_for_group(context, group_id): return IMPL.subcloud_get_for_group(context, group_id) -def subcloud_group_update(context, group_id, name, description, - update_apply_type, max_parallel_subclouds): +def subcloud_group_update( + context, group_id, name, description, update_apply_type, max_parallel_subclouds +): """Update the subcloud group or raise if it does not exist.""" - return IMPL.subcloud_group_update(context, - group_id, - name, - description, - update_apply_type, - max_parallel_subclouds) + return IMPL.subcloud_group_update( + context, group_id, name, description, update_apply_type, max_parallel_subclouds + ) def subcloud_group_destroy(context, group_id): @@ -403,45 +487,54 @@ def subcloud_group_destroy(context, group_id): # system_peer def system_peer_db_model_to_dict(system_peer): """Convert system_peer db model to dictionary.""" - result = {"id": system_peer.id, - "peer-uuid": system_peer.peer_uuid, - "peer-name": system_peer.peer_name, - "manager-endpoint": system_peer.manager_endpoint, - "manager-username": system_peer.manager_username, - "peer-controller-gateway-address": system_peer. - peer_controller_gateway_ip, - "administrative-state": system_peer.administrative_state, - "heartbeat-interval": system_peer.heartbeat_interval, - "heartbeat-failure-threshold": system_peer. - heartbeat_failure_threshold, - "heartbeat-failure-policy": system_peer.heartbeat_failure_policy, - "heartbeat-maintenance-timeout": system_peer. - heartbeat_maintenance_timeout, - "availability-state": system_peer.availability_state, - "created-at": system_peer.created_at, - "updated-at": system_peer.updated_at} + result = { + "id": system_peer.id, + "peer-uuid": system_peer.peer_uuid, + "peer-name": system_peer.peer_name, + "manager-endpoint": system_peer.manager_endpoint, + "manager-username": system_peer.manager_username, + "peer-controller-gateway-address": system_peer.peer_controller_gateway_ip, + "administrative-state": system_peer.administrative_state, + "heartbeat-interval": system_peer.heartbeat_interval, + "heartbeat-failure-threshold": system_peer.heartbeat_failure_threshold, + "heartbeat-failure-policy": system_peer.heartbeat_failure_policy, + "heartbeat-maintenance-timeout": system_peer.heartbeat_maintenance_timeout, + "availability-state": system_peer.availability_state, + "created-at": system_peer.created_at, + "updated-at": system_peer.updated_at, + } return result -def system_peer_create(context, - peer_uuid, peer_name, - endpoint, username, password, - gateway_ip, - administrative_state, - heartbeat_interval, - heartbeat_failure_threshold, - heartbeat_failure_policy, - heartbeat_maintenance_timeout): +def system_peer_create( + context, + peer_uuid, + peer_name, + endpoint, + username, + password, + gateway_ip, + administrative_state, + heartbeat_interval, + heartbeat_failure_threshold, + heartbeat_failure_policy, + heartbeat_maintenance_timeout, +): """Create a system_peer.""" - return IMPL.system_peer_create(context, - peer_uuid, peer_name, - endpoint, username, password, - gateway_ip, - administrative_state, - heartbeat_interval, - heartbeat_failure_threshold, - heartbeat_failure_policy, - heartbeat_maintenance_timeout) + return IMPL.system_peer_create( + context, + peer_uuid, + peer_name, + endpoint, + username, + password, + gateway_ip, + administrative_state, + heartbeat_interval, + heartbeat_failure_threshold, + heartbeat_failure_policy, + heartbeat_maintenance_timeout, + ) def system_peer_get(context, peer_id): @@ -469,32 +562,46 @@ def peer_group_get_for_system_peer(context, peer_id): return IMPL.peer_group_get_for_system_peer(context, peer_id) -def system_peer_update(context, peer_id, - peer_uuid=None, peer_name=None, - endpoint=None, username=None, password=None, - gateway_ip=None, - administrative_state=None, - heartbeat_interval=None, - heartbeat_failure_threshold=None, - heartbeat_failure_policy=None, - heartbeat_maintenance_timeout=None, - availability_state=None): +def system_peer_update( + context, + peer_id, + peer_uuid=None, + peer_name=None, + endpoint=None, + username=None, + password=None, + gateway_ip=None, + administrative_state=None, + heartbeat_interval=None, + heartbeat_failure_threshold=None, + heartbeat_failure_policy=None, + heartbeat_maintenance_timeout=None, + availability_state=None, +): """Update the system peer or raise if it does not exist.""" - return IMPL.system_peer_update(context, peer_id, - peer_uuid, peer_name, - endpoint, username, password, - gateway_ip, - administrative_state, - heartbeat_interval, - heartbeat_failure_threshold, - heartbeat_failure_policy, - heartbeat_maintenance_timeout, - availability_state) + return IMPL.system_peer_update( + context, + peer_id, + peer_uuid, + peer_name, + endpoint, + username, + password, + gateway_ip, + administrative_state, + heartbeat_interval, + heartbeat_failure_threshold, + heartbeat_failure_policy, + heartbeat_maintenance_timeout, + availability_state, + ) def system_peer_destroy(context, peer_id): """Destroy the system peer or raise if it does not exist.""" return IMPL.system_peer_destroy(context, peer_id) + + ################### @@ -502,32 +609,42 @@ def system_peer_destroy(context, peer_id): # subcloud_peer_group def subcloud_peer_group_db_model_to_dict(subcloud_peer_group): """Convert subcloud_peer_group db model to dictionary.""" - result = {"id": subcloud_peer_group.id, - "peer_group_name": subcloud_peer_group.peer_group_name, - "group_priority": subcloud_peer_group.group_priority, - "group_state": subcloud_peer_group.group_state, - "max_subcloud_rehoming": subcloud_peer_group.max_subcloud_rehoming, - "system_leader_id": subcloud_peer_group.system_leader_id, - "system_leader_name": subcloud_peer_group.system_leader_name, - "migration_status": subcloud_peer_group.migration_status, - "created-at": subcloud_peer_group.created_at, - "updated-at": subcloud_peer_group.updated_at} + result = { + "id": subcloud_peer_group.id, + "peer_group_name": subcloud_peer_group.peer_group_name, + "group_priority": subcloud_peer_group.group_priority, + "group_state": subcloud_peer_group.group_state, + "max_subcloud_rehoming": subcloud_peer_group.max_subcloud_rehoming, + "system_leader_id": subcloud_peer_group.system_leader_id, + "system_leader_name": subcloud_peer_group.system_leader_name, + "migration_status": subcloud_peer_group.migration_status, + "created-at": subcloud_peer_group.created_at, + "updated-at": subcloud_peer_group.updated_at, + } return result -def subcloud_peer_group_create(context, peer_group_name, group_priority, - group_state, max_subcloud_rehoming, - system_leader_id, system_leader_name, - migration_status=None): +def subcloud_peer_group_create( + context, + peer_group_name, + group_priority, + group_state, + max_subcloud_rehoming, + system_leader_id, + system_leader_name, + migration_status=None, +): """Create a subcloud_peer_group.""" - return IMPL.subcloud_peer_group_create(context, - peer_group_name, - group_priority, - group_state, - max_subcloud_rehoming, - system_leader_id, - system_leader_name, - migration_status) + return IMPL.subcloud_peer_group_create( + context, + peer_group_name, + group_priority, + group_state, + max_subcloud_rehoming, + system_leader_id, + system_leader_name, + migration_status, + ) def subcloud_peer_group_destroy(context, group_id): @@ -563,22 +680,31 @@ def subcloud_peer_group_get_all(context): return IMPL.subcloud_peer_group_get_all(context) -def subcloud_peer_group_update(context, group_id, peer_group_name=None, - group_priority=None, group_state=None, - max_subcloud_rehoming=None, - system_leader_id=None, - system_leader_name=None, - migration_status=None): +def subcloud_peer_group_update( + context, + group_id, + peer_group_name=None, + group_priority=None, + group_state=None, + max_subcloud_rehoming=None, + system_leader_id=None, + system_leader_name=None, + migration_status=None, +): """Update the subcloud peer group or raise if it does not exist.""" - return IMPL.subcloud_peer_group_update(context, - group_id, - peer_group_name, - group_priority, - group_state, - max_subcloud_rehoming, - system_leader_id, - system_leader_name, - migration_status) + return IMPL.subcloud_peer_group_update( + context, + group_id, + peer_group_name, + group_priority, + group_state, + max_subcloud_rehoming, + system_leader_id, + system_leader_name, + migration_status, + ) + + ################### @@ -586,36 +712,48 @@ def subcloud_peer_group_update(context, group_id, peer_group_name=None, # peer_group_association def peer_group_association_db_model_to_dict(peer_group_association): """Convert peer_group_association db model to dictionary.""" - result = {"id": peer_group_association.id, - "peer-group-id": peer_group_association.peer_group_id, - "system-peer-id": peer_group_association.system_peer_id, - "peer-group-priority": peer_group_association.peer_group_priority, - "association-type": peer_group_association.association_type, - "sync-status": peer_group_association.sync_status, - "sync-message": peer_group_association.sync_message, - "created-at": peer_group_association.created_at, - "updated-at": peer_group_association.updated_at} + result = { + "id": peer_group_association.id, + "peer-group-id": peer_group_association.peer_group_id, + "system-peer-id": peer_group_association.system_peer_id, + "peer-group-priority": peer_group_association.peer_group_priority, + "association-type": peer_group_association.association_type, + "sync-status": peer_group_association.sync_status, + "sync-message": peer_group_association.sync_message, + "created-at": peer_group_association.created_at, + "updated-at": peer_group_association.updated_at, + } return result -def peer_group_association_create(context, peer_group_id, system_peer_id, - peer_group_priority, association_type=None, - sync_status=None, sync_message=None): +def peer_group_association_create( + context, + peer_group_id, + system_peer_id, + peer_group_priority, + association_type=None, + sync_status=None, + sync_message=None, +): """Create a peer_group_association.""" - return IMPL.peer_group_association_create(context, - peer_group_id, - system_peer_id, - peer_group_priority, - association_type, - sync_status, - sync_message) + return IMPL.peer_group_association_create( + context, + peer_group_id, + system_peer_id, + peer_group_priority, + association_type, + sync_status, + sync_message, + ) -def peer_group_association_update(context, id, peer_group_priority=None, - sync_status=None, sync_message=None): +def peer_group_association_update( + context, id, peer_group_priority=None, sync_status=None, sync_message=None +): """Update the system peer or raise if it does not exist.""" - return IMPL.peer_group_association_update(context, id, peer_group_priority, - sync_status, sync_message) + return IMPL.peer_group_association_update( + context, id, peer_group_priority, sync_status, sync_message + ) def peer_group_association_destroy(context, id): @@ -633,50 +771,63 @@ def peer_group_association_get_all(context): return IMPL.peer_group_association_get_all(context) -def peer_group_association_get_by_peer_group_and_system_peer_id(context, - peer_group_id, - system_peer_id): +def peer_group_association_get_by_peer_group_and_system_peer_id( + context, peer_group_id, system_peer_id +): """Get peer group associations by peer_group_id and system_peer_id.""" return IMPL.peer_group_association_get_by_peer_group_and_system_peer_id( - context, peer_group_id, system_peer_id) + context, peer_group_id, system_peer_id + ) def peer_group_association_get_by_peer_group_id(context, peer_group_id): """Get the peer_group_association list by peer_group_id""" - return IMPL.peer_group_association_get_by_peer_group_id(context, - peer_group_id) + return IMPL.peer_group_association_get_by_peer_group_id(context, peer_group_id) def peer_group_association_get_by_system_peer_id(context, system_peer_id): """Get the peer_group_association list by system_peer_id""" - return IMPL.peer_group_association_get_by_system_peer_id(context, - system_peer_id) + return IMPL.peer_group_association_get_by_system_peer_id(context, system_peer_id) + + ################### def sw_update_strategy_db_model_to_dict(sw_update_strategy): """Convert sw update db model to dictionary.""" - result = {"id": sw_update_strategy.id, - "type": sw_update_strategy.type, - "subcloud-apply-type": sw_update_strategy.subcloud_apply_type, - "max-parallel-subclouds": - sw_update_strategy.max_parallel_subclouds, - "stop-on-failure": sw_update_strategy.stop_on_failure, - "state": sw_update_strategy.state, - "created-at": sw_update_strategy.created_at, - "updated-at": sw_update_strategy.updated_at, - "extra-args": sw_update_strategy.extra_args} + result = { + "id": sw_update_strategy.id, + "type": sw_update_strategy.type, + "subcloud-apply-type": sw_update_strategy.subcloud_apply_type, + "max-parallel-subclouds": sw_update_strategy.max_parallel_subclouds, + "stop-on-failure": sw_update_strategy.stop_on_failure, + "state": sw_update_strategy.state, + "created-at": sw_update_strategy.created_at, + "updated-at": sw_update_strategy.updated_at, + "extra-args": sw_update_strategy.extra_args, + } return result -def sw_update_strategy_create(context, type, subcloud_apply_type, - max_parallel_subclouds, stop_on_failure, state, - extra_args=None): +def sw_update_strategy_create( + context, + type, + subcloud_apply_type, + max_parallel_subclouds, + stop_on_failure, + state, + extra_args=None, +): """Create a sw update.""" - return IMPL.sw_update_strategy_create(context, type, subcloud_apply_type, - max_parallel_subclouds, - stop_on_failure, state, - extra_args=extra_args) + return IMPL.sw_update_strategy_create( + context, + type, + subcloud_apply_type, + max_parallel_subclouds, + stop_on_failure, + state, + extra_args=extra_args, + ) def sw_update_strategy_get(context, update_type=None): @@ -684,13 +835,13 @@ def sw_update_strategy_get(context, update_type=None): return IMPL.sw_update_strategy_get(context, update_type=update_type) -def sw_update_strategy_update(context, state=None, - update_type=None, additional_args=None): +def sw_update_strategy_update( + context, state=None, update_type=None, additional_args=None +): """Update a sw update or raise if it does not exist.""" - return IMPL.sw_update_strategy_update(context, - state, - update_type=update_type, - additional_args=additional_args) + return IMPL.sw_update_strategy_update( + context, state, update_type=update_type, additional_args=additional_args + ) def sw_update_strategy_destroy(context, update_type=None): @@ -700,21 +851,24 @@ def sw_update_strategy_destroy(context, update_type=None): ################### + def strategy_step_db_model_to_dict(strategy_step): """Convert patch strategy db model to dictionary.""" if strategy_step.subcloud is not None: cloud = strategy_step.subcloud.name else: cloud = dccommon_consts.SYSTEM_CONTROLLER_NAME - result = {"id": strategy_step.id, - "cloud": cloud, - "stage": strategy_step.stage, - "state": strategy_step.state, - "details": strategy_step.details, - "started-at": strategy_step.started_at, - "finished-at": strategy_step.finished_at, - "created-at": strategy_step.created_at, - "updated-at": strategy_step.updated_at} + result = { + "id": strategy_step.id, + "cloud": cloud, + "stage": strategy_step.stage, + "state": strategy_step.state, + "details": strategy_step.details, + "started-at": strategy_step.started_at, + "finished-at": strategy_step.finished_at, + "created-at": strategy_step.created_at, + "updated-at": strategy_step.updated_at, + } return result @@ -739,22 +893,27 @@ def strategy_step_get_all(context): def strategy_step_bulk_create(context, subcloud_ids, stage, state, details): """Creates the strategy step for a list of subclouds""" - return IMPL.strategy_step_bulk_create( - context, subcloud_ids, stage, state, details - ) + return IMPL.strategy_step_bulk_create(context, subcloud_ids, stage, state, details) def strategy_step_create(context, subcloud_id, stage, state, details): """Create a patch strategy step.""" - return IMPL.strategy_step_create(context, subcloud_id, stage, state, - details) + return IMPL.strategy_step_create(context, subcloud_id, stage, state, details) -def strategy_step_update(context, subcloud_id, stage=None, state=None, - details=None, started_at=None, finished_at=None): +def strategy_step_update( + context, + subcloud_id, + stage=None, + state=None, + details=None, + started_at=None, + finished_at=None, +): """Update a patch strategy step or raise if it does not exist.""" - return IMPL.strategy_step_update(context, subcloud_id, stage, state, - details, started_at, finished_at) + return IMPL.strategy_step_update( + context, subcloud_id, stage, state, details, started_at, finished_at + ) def strategy_step_destroy_all(context): @@ -764,32 +923,43 @@ def strategy_step_destroy_all(context): ################### + def sw_update_opts_w_name_db_model_to_dict(sw_update_opts, subcloud_name): """Convert sw update options db model plus subcloud name to dictionary.""" - result = {"id": sw_update_opts.id, - "name": subcloud_name, - "subcloud-id": sw_update_opts.subcloud_id, - "storage-apply-type": sw_update_opts.storage_apply_type, - "worker-apply-type": sw_update_opts.worker_apply_type, - "max-parallel-workers": sw_update_opts.max_parallel_workers, - "alarm-restriction-type": sw_update_opts.alarm_restriction_type, - "default-instance-action": - sw_update_opts.default_instance_action, - "created-at": sw_update_opts.created_at, - "updated-at": sw_update_opts.updated_at} + result = { + "id": sw_update_opts.id, + "name": subcloud_name, + "subcloud-id": sw_update_opts.subcloud_id, + "storage-apply-type": sw_update_opts.storage_apply_type, + "worker-apply-type": sw_update_opts.worker_apply_type, + "max-parallel-workers": sw_update_opts.max_parallel_workers, + "alarm-restriction-type": sw_update_opts.alarm_restriction_type, + "default-instance-action": sw_update_opts.default_instance_action, + "created-at": sw_update_opts.created_at, + "updated-at": sw_update_opts.updated_at, + } return result -def sw_update_opts_create(context, subcloud_id, storage_apply_type, - worker_apply_type, max_parallel_workers, - alarm_restriction_type, default_instance_action): +def sw_update_opts_create( + context, + subcloud_id, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, +): """Create sw update options.""" - return IMPL.sw_update_opts_create(context, subcloud_id, - storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action) + return IMPL.sw_update_opts_create( + context, + subcloud_id, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, + ) def sw_update_opts_get(context, subcloud_id): @@ -802,19 +972,25 @@ def sw_update_opts_get_all_plus_subcloud_info(context): return IMPL.sw_update_opts_get_all_plus_subcloud_info(context) -def sw_update_opts_update(context, subcloud_id, - storage_apply_type=None, - worker_apply_type=None, - max_parallel_workers=None, - alarm_restriction_type=None, - default_instance_action=None): +def sw_update_opts_update( + context, + subcloud_id, + storage_apply_type=None, + worker_apply_type=None, + max_parallel_workers=None, + alarm_restriction_type=None, + default_instance_action=None, +): """Update sw update options or raise if it does not exist.""" - return IMPL.sw_update_opts_update(context, subcloud_id, - storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action) + return IMPL.sw_update_opts_update( + context, + subcloud_id, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, + ) def sw_update_opts_destroy(context, subcloud_id): @@ -823,17 +999,23 @@ def sw_update_opts_destroy(context, subcloud_id): ################### -def sw_update_opts_default_create(context, storage_apply_type, - worker_apply_type, max_parallel_workers, - alarm_restriction_type, - default_instance_action): +def sw_update_opts_default_create( + context, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, +): """Create default sw update options.""" - return IMPL.sw_update_opts_default_create(context, - storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action) + return IMPL.sw_update_opts_default_create( + context, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, + ) def sw_update_opts_default_get(context): @@ -841,19 +1023,23 @@ def sw_update_opts_default_get(context): return IMPL.sw_update_opts_default_get(context) -def sw_update_opts_default_update(context, - storage_apply_type=None, - worker_apply_type=None, - max_parallel_workers=None, - alarm_restriction_type=None, - default_instance_action=None): +def sw_update_opts_default_update( + context, + storage_apply_type=None, + worker_apply_type=None, + max_parallel_workers=None, + alarm_restriction_type=None, + default_instance_action=None, +): """Update default sw update options.""" - return IMPL.sw_update_opts_default_update(context, - storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action) + return IMPL.sw_update_opts_default_update( + context, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, + ) def sw_update_opts_default_destroy(context): @@ -863,6 +1049,7 @@ def sw_update_opts_default_destroy(context): ################### + def db_sync(engine, version=None): """Migrate the database to `version` or the most recent version.""" return IMPL.db_sync(engine, version=version) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/api.py b/distributedcloud/dcmanager/db/sqlalchemy/api.py index e485f4cc8..b3ae0ba45 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/api.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/api.py @@ -85,7 +85,7 @@ def get_backend(): def model_query(context, *args): with read_session() as session: - query = session.query(*args).options(joinedload_all('*')) + query = session.query(*args).options(joinedload_all("*")) return query @@ -96,9 +96,8 @@ def _session(context): def is_admin_context(context): """Indicate if the request context is an administrator.""" if not context: - LOG.warning(_('Use of empty request context is deprecated'), - DeprecationWarning) - raise Exception('die') + LOG.warning(_("Use of empty request context is deprecated"), DeprecationWarning) + raise Exception("die") return context.is_admin @@ -118,6 +117,7 @@ def require_admin_context(f): The first argument to the wrapped function must be the context. """ + def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() @@ -135,6 +135,7 @@ def require_context(f): The first argument to the wrapped function must be the context. """ + def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() @@ -148,10 +149,12 @@ def require_context(f): @require_context def subcloud_audits_get(context, subcloud_id): - result = model_query(context, models.SubcloudAudits). \ - filter_by(deleted=0). \ - filter_by(subcloud_id=subcloud_id). \ - first() + result = ( + model_query(context, models.SubcloudAudits) + .filter_by(deleted=0) + .filter_by(subcloud_id=subcloud_id) + .first() + ) if not result: raise exception.SubcloudNotFound(subcloud_id=subcloud_id) @@ -161,17 +164,15 @@ def subcloud_audits_get(context, subcloud_id): @require_context def subcloud_audits_get_all(context): - return model_query(context, models.SubcloudAudits). \ - filter_by(deleted=0). \ - all() + return model_query(context, models.SubcloudAudits).filter_by(deleted=0).all() @require_context def subcloud_audits_update_all(context, values): with write_session() as session: - result = session.query(models.SubcloudAudits).\ - filter_by(deleted=0).\ - update(values) + result = ( + session.query(models.SubcloudAudits).filter_by(deleted=0).update(values) + ) return result @@ -196,30 +197,39 @@ def subcloud_audits_update(context, subcloud_id, values): @require_context def subcloud_audits_get_all_need_audit(context, last_audit_threshold): with read_session() as session: - result = session.query(models.SubcloudAudits, - models.Subcloud.name, - models.Subcloud.deploy_status, - models.Subcloud.availability_status).\ - join(models.Subcloud, - models.Subcloud.id == models.SubcloudAudits.subcloud_id).\ - filter_by(deleted=0).\ - filter(models.SubcloudAudits.audit_started_at <= - models.SubcloudAudits.audit_finished_at).\ - filter((models.SubcloudAudits.audit_finished_at < last_audit_threshold) | - (models.SubcloudAudits.patch_audit_requested == true()) | - (models.SubcloudAudits.firmware_audit_requested == true()) | - (models.SubcloudAudits.load_audit_requested == true()) | - (models.SubcloudAudits.kube_rootca_update_audit_requested == - true()) | - (models.SubcloudAudits.kubernetes_audit_requested == true()) | - (models.SubcloudAudits.spare_audit_requested == true())).\ - all() + result = ( + session.query( + models.SubcloudAudits, + models.Subcloud.name, + models.Subcloud.deploy_status, + models.Subcloud.availability_status, + ) + .join( + models.Subcloud, models.Subcloud.id == models.SubcloudAudits.subcloud_id + ) + .filter_by(deleted=0) + .filter( + models.SubcloudAudits.audit_started_at + <= models.SubcloudAudits.audit_finished_at + ) + .filter( + (models.SubcloudAudits.audit_finished_at < last_audit_threshold) + | (models.SubcloudAudits.patch_audit_requested == true()) + | (models.SubcloudAudits.firmware_audit_requested == true()) + | (models.SubcloudAudits.load_audit_requested == true()) + | (models.SubcloudAudits.kube_rootca_update_audit_requested == true()) + | (models.SubcloudAudits.kubernetes_audit_requested == true()) + | (models.SubcloudAudits.spare_audit_requested == true()) + ) + .all() + ) return result # In the functions below it would be cleaner if the timestamp were calculated # by the DB server. If server time is in UTC func.now() might work. + @require_context def subcloud_audits_get_and_start_audit(context, subcloud_id): with write_session() as session: @@ -255,14 +265,11 @@ def subcloud_audits_end_audit(context, subcloud_id, audits_done): @require_context def subcloud_audits_bulk_end_audit(context, subcloud_ids): - values = { - "audit_finished_at": datetime.datetime.utcnow() - } + values = {"audit_finished_at": datetime.datetime.utcnow()} with write_session(): - model_query(context, models.SubcloudAudits). \ - filter_by(deleted=0). \ - filter(models.SubcloudAudits.subcloud_id.in_(subcloud_ids)). \ - update(values, synchronize_session='fetch') + model_query(context, models.SubcloudAudits).filter_by(deleted=0).filter( + models.SubcloudAudits.subcloud_id.in_(subcloud_ids) + ).update(values, synchronize_session="fetch") # Find and fix up subcloud audits where the audit has taken too long. @@ -270,29 +277,30 @@ def subcloud_audits_bulk_end_audit(context, subcloud_ids): # it and update the "finished at" timestamp to be the same as # the "started at" timestamp. Returns the number of rows updated. @require_context -def subcloud_audits_fix_expired_audits(context, last_audit_threshold, - trigger_audits=False): - values = { - "audit_finished_at": models.SubcloudAudits.audit_started_at - } +def subcloud_audits_fix_expired_audits( + context, last_audit_threshold, trigger_audits=False +): + values = {"audit_finished_at": models.SubcloudAudits.audit_started_at} if trigger_audits: # request all the special audits - values['patch_audit_requested'] = True - values['firmware_audit_requested'] = True - values['load_audit_requested'] = True - values['kubernetes_audit_requested'] = True - values['kube_rootca_update_audit_requested'] = True - values['spare_audit_requested'] = True + values["patch_audit_requested"] = True + values["firmware_audit_requested"] = True + values["load_audit_requested"] = True + values["kubernetes_audit_requested"] = True + values["kube_rootca_update_audit_requested"] = True + values["spare_audit_requested"] = True with write_session() as session: - result = session.query(models.SubcloudAudits).\ - options(load_only("deleted", "audit_started_at", - "audit_finished_at")).\ - filter_by(deleted=0).\ - filter(models.SubcloudAudits.audit_finished_at < - last_audit_threshold).\ - filter(models.SubcloudAudits.audit_started_at > - models.SubcloudAudits.audit_finished_at).\ - update(values, synchronize_session=False) + result = ( + session.query(models.SubcloudAudits) + .options(load_only("deleted", "audit_started_at", "audit_finished_at")) + .filter_by(deleted=0) + .filter(models.SubcloudAudits.audit_finished_at < last_audit_threshold) + .filter( + models.SubcloudAudits.audit_started_at + > models.SubcloudAudits.audit_finished_at + ) + .update(values, synchronize_session=False) + ) return result @@ -301,10 +309,12 @@ def subcloud_audits_fix_expired_audits(context, last_audit_threshold, @require_context def subcloud_get(context, subcloud_id): - result = model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(id=subcloud_id). \ - first() + result = ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(id=subcloud_id) + .first() + ) if not result: raise exception.SubcloudNotFound(subcloud_id=subcloud_id) @@ -314,14 +324,18 @@ def subcloud_get(context, subcloud_id): @require_context def subcloud_get_with_status(context, subcloud_id): - result = model_query(context, models.Subcloud, models.SubcloudStatus). \ - outerjoin(models.SubcloudStatus, - (models.Subcloud.id == models.SubcloudStatus.subcloud_id) | - (not models.SubcloudStatus.subcloud_id)). \ - filter(models.Subcloud.id == subcloud_id). \ - filter(models.Subcloud.deleted == 0). \ - order_by(models.SubcloudStatus.endpoint_type). \ - all() + result = ( + model_query(context, models.Subcloud, models.SubcloudStatus) + .outerjoin( + models.SubcloudStatus, + (models.Subcloud.id == models.SubcloudStatus.subcloud_id) + | (not models.SubcloudStatus.subcloud_id), + ) + .filter(models.Subcloud.id == subcloud_id) + .filter(models.Subcloud.deleted == 0) + .order_by(models.SubcloudStatus.endpoint_type) + .all() + ) if not result: raise exception.SubcloudNotFound(subcloud_id=subcloud_id) @@ -331,10 +345,12 @@ def subcloud_get_with_status(context, subcloud_id): @require_context def subcloud_get_by_name(context, name): - result = model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(name=name). \ - first() + result = ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(name=name) + .first() + ) if not result: raise exception.SubcloudNameNotFound(name=name) @@ -344,10 +360,12 @@ def subcloud_get_by_name(context, name): @require_context def subcloud_get_by_region_name(context, region_name): - result = model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(region_name=region_name). \ - first() + result = ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(region_name=region_name) + .first() + ) if not result: raise exception.SubcloudRegionNameNotFound(region_name=region_name) @@ -357,9 +375,12 @@ def subcloud_get_by_region_name(context, region_name): @require_context def subcloud_get_by_name_or_region_name(context, name): - result = model_query(context, models.Subcloud).filter_by(deleted=0).filter( - or_(models.Subcloud.name == name, models.Subcloud.region_name == name) - ).first() + result = ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter(or_(models.Subcloud.name == name, models.Subcloud.region_name == name)) + .first() + ) if not result: raise exception.SubcloudNameOrRegionNameNotFound(name=name) @@ -369,49 +390,59 @@ def subcloud_get_by_name_or_region_name(context, name): @require_context def subcloud_get_all(context): - return model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - all() + return model_query(context, models.Subcloud).filter_by(deleted=0).all() @require_context def subcloud_get_all_by_group_id(context, group_id): """Retrieve all subclouds that belong to the specified group id""" - return model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(group_id=group_id). \ - all() + return ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(group_id=group_id) + .all() + ) def subcloud_get_all_ordered_by_id(context): - return model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - order_by(models.Subcloud.id). \ - all() + return ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .order_by(models.Subcloud.id) + .all() + ) @require_context def subcloud_get_all_with_status(context): - result = model_query( - context, - models.Subcloud, - models.SubcloudStatus.endpoint_type, - models.SubcloudStatus.sync_status - ).join( - models.SubcloudStatus, - models.Subcloud.id == models.SubcloudStatus.subcloud_id - ).filter( - models.Subcloud.deleted == 0 - ).order_by(models.Subcloud.id).all() + result = ( + model_query( + context, + models.Subcloud, + models.SubcloudStatus.endpoint_type, + models.SubcloudStatus.sync_status, + ) + .join( + models.SubcloudStatus, + models.Subcloud.id == models.SubcloudStatus.subcloud_id, + ) + .filter(models.Subcloud.deleted == 0) + .order_by(models.Subcloud.id) + .all() + ) return result @require_context def subcloud_get_all_valid_for_strategy_step_creation( - context, endpoint_type, group_id=None, subcloud_name=None, - availability_status=None, sync_status=None + context, + endpoint_type, + group_id=None, + subcloud_name=None, + availability_status=None, + sync_status=None, ): """Queries all the subclouds that are valid for the strategy step to create @@ -431,7 +462,7 @@ def subcloud_get_all_valid_for_strategy_step_creation( models.Subcloud, models.SubcloudStatus.sync_status ).filter( models.Subcloud.deleted == 0, - models.Subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED + models.Subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED, ) if group_id: @@ -446,15 +477,13 @@ def subcloud_get_all_valid_for_strategy_step_creation( query = query.join( models.SubcloudStatus, - models.Subcloud.id == models.SubcloudStatus.subcloud_id + models.Subcloud.id == models.SubcloudStatus.subcloud_id, ).filter( models.SubcloudStatus.endpoint_type == endpoint_type, ) if sync_status: - query = query.filter( - models.SubcloudStatus.sync_status.in_(sync_status) - ) + query = query.filter(models.SubcloudStatus.sync_status.in_(sync_status)) return query.all() @@ -478,7 +507,7 @@ def subcloud_count_invalid_for_strategy_type( with read_session() as session: query = session.query(models.Subcloud).filter( models.Subcloud.deleted == 0, - models.Subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED + models.Subcloud.management_state == dccommon_consts.MANAGEMENT_MANAGED, ) if group_id: @@ -488,28 +517,40 @@ def subcloud_count_invalid_for_strategy_type( if not force: query = query.filter( - models.Subcloud.availability_status == - dccommon_consts.AVAILABILITY_ONLINE + models.Subcloud.availability_status + == dccommon_consts.AVAILABILITY_ONLINE ) query = query.join( models.SubcloudStatus, - models.Subcloud.id == models.SubcloudStatus.subcloud_id + models.Subcloud.id == models.SubcloudStatus.subcloud_id, ).filter( models.SubcloudStatus.endpoint_type == endpoint_type, - models.SubcloudStatus.sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN + models.SubcloudStatus.sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN, ) return query.count() @require_admin_context -def subcloud_create(context, name, description, location, software_version, - management_subnet, management_gateway_ip, - management_start_ip, management_end_ip, - systemcontroller_gateway_ip, deploy_status, error_description, - region_name, openstack_installed, group_id, - data_install=None): +def subcloud_create( + context, + name, + description, + location, + software_version, + management_subnet, + management_gateway_ip, + management_start_ip, + management_end_ip, + systemcontroller_gateway_ip, + deploy_status, + error_description, + region_name, + openstack_installed, + group_id, + data_install=None, +): with write_session() as session: subcloud_ref = models.Subcloud() subcloud_ref.name = name @@ -539,23 +580,37 @@ def subcloud_create(context, name, description, location, software_version, @require_admin_context -def subcloud_update(context, subcloud_id, management_state=None, - availability_status=None, software_version=None, - name=None, description=None, management_subnet=None, - management_gateway_ip=None, management_start_ip=None, - management_end_ip=None, location=None, audit_fail_count=None, - deploy_status=None, backup_status=None, - backup_datetime=None, error_description=None, - openstack_installed=None, - group_id=None, - data_install=None, - data_upgrade=None, - first_identity_sync_complete=None, - systemcontroller_gateway_ip=None, - peer_group_id=None, - rehome_data=None, rehomed=None, - prestage_status=None, prestage_versions=None, - region_name=None): +def subcloud_update( + context, + subcloud_id, + management_state=None, + availability_status=None, + software_version=None, + name=None, + description=None, + management_subnet=None, + management_gateway_ip=None, + management_start_ip=None, + management_end_ip=None, + location=None, + audit_fail_count=None, + deploy_status=None, + backup_status=None, + backup_datetime=None, + error_description=None, + openstack_installed=None, + group_id=None, + data_install=None, + data_upgrade=None, + first_identity_sync_complete=None, + systemcontroller_gateway_ip=None, + peer_group_id=None, + rehome_data=None, + rehomed=None, + prestage_status=None, + prestage_versions=None, + region_name=None, +): with write_session() as session: subcloud_ref = subcloud_get(context, subcloud_id) if management_state is not None: @@ -599,10 +654,9 @@ def subcloud_update(context, subcloud_id, management_state=None, if first_identity_sync_complete is not None: subcloud_ref.first_identity_sync_complete = first_identity_sync_complete if systemcontroller_gateway_ip is not None: - subcloud_ref.systemcontroller_gateway_ip = \ - systemcontroller_gateway_ip + subcloud_ref.systemcontroller_gateway_ip = systemcontroller_gateway_ip if peer_group_id is not None: - if str(peer_group_id).lower() == 'none': + if str(peer_group_id).lower() == "none": subcloud_ref.peer_group_id = None else: subcloud_ref.peer_group_id = peer_group_id @@ -623,10 +677,9 @@ def subcloud_update(context, subcloud_id, management_state=None, @require_admin_context def subcloud_bulk_update_by_ids(context, subcloud_ids, update_form): with write_session(): - model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter(models.Subcloud.id.in_(subcloud_ids)). \ - update(update_form, synchronize_session="fetch") + model_query(context, models.Subcloud).filter_by(deleted=0).filter( + models.Subcloud.id.in_(subcloud_ids) + ).update(update_form, synchronize_session="fetch") @require_admin_context @@ -638,45 +691,56 @@ def subcloud_destroy(context, subcloud_id): ########################## + @require_context def subcloud_status_get(context, subcloud_id, endpoint_type): - result = model_query(context, models.SubcloudStatus). \ - filter_by(deleted=0). \ - filter_by(subcloud_id=subcloud_id). \ - filter_by(endpoint_type=endpoint_type). \ - first() + result = ( + model_query(context, models.SubcloudStatus) + .filter_by(deleted=0) + .filter_by(subcloud_id=subcloud_id) + .filter_by(endpoint_type=endpoint_type) + .first() + ) if not result: - raise exception.SubcloudStatusNotFound(subcloud_id=subcloud_id, - endpoint_type=endpoint_type) + raise exception.SubcloudStatusNotFound( + subcloud_id=subcloud_id, endpoint_type=endpoint_type + ) return result @require_context def subcloud_status_get_all(context, subcloud_id): - return model_query(context, models.SubcloudStatus). \ - filter_by(deleted=0). \ - join(models.Subcloud, - models.SubcloudStatus.subcloud_id == models.Subcloud.id). \ - filter(models.Subcloud.id == subcloud_id).all() + return ( + model_query(context, models.SubcloudStatus) + .filter_by(deleted=0) + .join(models.Subcloud, models.SubcloudStatus.subcloud_id == models.Subcloud.id) + .filter(models.Subcloud.id == subcloud_id) + .all() + ) @require_context def _subcloud_status_get_by_endpoint_types(context, subcloud_id, endpoint_types): - return model_query(context, models.SubcloudStatus). \ - filter_by(deleted=0). \ - filter(models.SubcloudStatus.subcloud_id == subcloud_id).\ - filter(models.SubcloudStatus.endpoint_type.in_(endpoint_types)).all() + return ( + model_query(context, models.SubcloudStatus) + .filter_by(deleted=0) + .filter(models.SubcloudStatus.subcloud_id == subcloud_id) + .filter(models.SubcloudStatus.endpoint_type.in_(endpoint_types)) + .all() + ) @require_context def subcloud_status_get_all_by_name(context, name): - return model_query(context, models.SubcloudStatus). \ - filter_by(deleted=0). \ - join(models.Subcloud, - models.SubcloudStatus.subcloud_id == models.Subcloud.id). \ - filter(models.Subcloud.name == name).all() + return ( + model_query(context, models.SubcloudStatus) + .filter_by(deleted=0) + .join(models.Subcloud, models.SubcloudStatus.subcloud_id == models.Subcloud.id) + .filter(models.Subcloud.name == name) + .all() + ) @require_admin_context @@ -704,37 +768,39 @@ def subcloud_status_create_all(context, subcloud_id): @require_admin_context def subcloud_status_delete(context, subcloud_id, endpoint_type): with write_session() as session: - subcloud_status_ref = subcloud_status_get(context, subcloud_id, - endpoint_type) + subcloud_status_ref = subcloud_status_get(context, subcloud_id, endpoint_type) session.delete(subcloud_status_ref) @require_admin_context def subcloud_status_update(context, subcloud_id, endpoint_type, sync_status): with write_session() as session: - subcloud_status_ref = subcloud_status_get(context, subcloud_id, - endpoint_type) + subcloud_status_ref = subcloud_status_get(context, subcloud_id, endpoint_type) subcloud_status_ref.sync_status = sync_status subcloud_status_ref.save(session) return subcloud_status_ref @require_admin_context -def subcloud_status_update_endpoints(context, subcloud_id, - endpoint_type_list, sync_status): +def subcloud_status_update_endpoints( + context, subcloud_id, endpoint_type_list, sync_status +): """Update all statuses of endpoints in endpoint_type_list of a subcloud. Will raise if subcloud status does not exist. """ value = {"sync_status": sync_status} with write_session() as session: - result = session.query(models.SubcloudStatus). \ - filter_by(subcloud_id=subcloud_id). \ - filter(models.SubcloudStatus.endpoint_type.in_(endpoint_type_list)). \ - update(value, synchronize_session=False) + result = ( + session.query(models.SubcloudStatus) + .filter_by(subcloud_id=subcloud_id) + .filter(models.SubcloudStatus.endpoint_type.in_(endpoint_type_list)) + .update(value, synchronize_session=False) + ) if not result: - raise exception.SubcloudStatusNotFound(subcloud_id=subcloud_id, - endpoint_type="any") + raise exception.SubcloudStatusNotFound( + subcloud_id=subcloud_id, endpoint_type="any" + ) return result @@ -755,10 +821,12 @@ def subcloud_status_bulk_update_endpoints(context, subcloud_id, endpoint_list): # its respective sync_status update_list = list() for subcloud_status in subcloud_statuses: - update_list.append({ - "_id": subcloud_status.id, - "sync_status": endpoint_list[subcloud_status.endpoint_type] - }) + update_list.append( + { + "_id": subcloud_status.id, + "sync_status": endpoint_list[subcloud_status.endpoint_type], + } + ) # Bindparam associates keys from update_list to columns in the database # query. This way, for each of the items that needs update, it's possible to @@ -766,9 +834,11 @@ def subcloud_status_bulk_update_endpoints(context, subcloud_id, endpoint_list): # endpoints with each of them having one of three values: # in-sync, out-of-sync and unknown. with write_session() as session: - statement = update(models.SubcloudStatus).\ - where(models.SubcloudStatus.id == bindparam("_id")).\ - values(sync_status=bindparam("sync_status")) + statement = ( + update(models.SubcloudStatus) + .where(models.SubcloudStatus.id == bindparam("_id")) + .values(sync_status=bindparam("sync_status")) + ) result = session.execute(statement, update_list) if not result: @@ -787,8 +857,9 @@ def subcloud_status_destroy_all(context, subcloud_id): for subcloud_status_ref in subcloud_statuses: session.delete(subcloud_status_ref) else: - raise exception.SubcloudStatusNotFound(subcloud_id=subcloud_id, - endpoint_type="any") + raise exception.SubcloudStatusNotFound( + subcloud_id=subcloud_id, endpoint_type="any" + ) ################### @@ -807,9 +878,15 @@ def sw_update_strategy_get(context, update_type=None): @require_admin_context -def sw_update_strategy_create(context, type, subcloud_apply_type, - max_parallel_subclouds, stop_on_failure, state, - extra_args=None): +def sw_update_strategy_create( + context, + type, + subcloud_apply_type, + max_parallel_subclouds, + stop_on_failure, + state, + extra_args=None, +): with write_session() as session: sw_update_strategy_ref = models.SwUpdateStrategy() sw_update_strategy_ref.type = type @@ -824,11 +901,13 @@ def sw_update_strategy_create(context, type, subcloud_apply_type, @require_admin_context -def sw_update_strategy_update(context, state=None, - update_type=None, additional_args=None): +def sw_update_strategy_update( + context, state=None, update_type=None, additional_args=None +): with write_session() as session: - sw_update_strategy_ref = \ - sw_update_strategy_get(context, update_type=update_type) + sw_update_strategy_ref = sw_update_strategy_get( + context, update_type=update_type + ) if state is not None: sw_update_strategy_ref.state = state if additional_args is not None: @@ -837,7 +916,8 @@ def sw_update_strategy_update(context, state=None, else: # extend the existing dictionary sw_update_strategy_ref.extra_args = dict( - sw_update_strategy_ref.extra_args, **additional_args) + sw_update_strategy_ref.extra_args, **additional_args + ) sw_update_strategy_ref.save(session) return sw_update_strategy_ref @@ -845,8 +925,9 @@ def sw_update_strategy_update(context, state=None, @require_admin_context def sw_update_strategy_destroy(context, update_type=None): with write_session() as session: - sw_update_strategy_ref = \ - sw_update_strategy_get(context, update_type=update_type) + sw_update_strategy_ref = sw_update_strategy_get( + context, update_type=update_type + ) session.delete(sw_update_strategy_ref) @@ -855,10 +936,12 @@ def sw_update_strategy_destroy(context, update_type=None): @require_context def sw_update_opts_get(context, subcloud_id): - result = model_query(context, models.SwUpdateOpts). \ - filter_by(deleted=0). \ - filter_by(subcloud_id=subcloud_id). \ - first() + result = ( + model_query(context, models.SwUpdateOpts) + .filter_by(deleted=0) + .filter_by(subcloud_id=subcloud_id) + .first() + ) # Note we will return None if not found return result @@ -866,23 +949,31 @@ def sw_update_opts_get(context, subcloud_id): @require_context def sw_update_opts_get_all_plus_subcloud_info(context): - result = model_query(context, models.Subcloud, models.SwUpdateOpts). \ - outerjoin(models.SwUpdateOpts, - (models.Subcloud.id == models.SwUpdateOpts.subcloud_id) | - (not models.SubcloudStatus.subcloud_id)). \ - filter(models.Subcloud.deleted == 0). \ - order_by(models.Subcloud.id). \ - all() + result = ( + model_query(context, models.Subcloud, models.SwUpdateOpts) + .outerjoin( + models.SwUpdateOpts, + (models.Subcloud.id == models.SwUpdateOpts.subcloud_id) + | (not models.SubcloudStatus.subcloud_id), + ) + .filter(models.Subcloud.deleted == 0) + .order_by(models.Subcloud.id) + .all() + ) return result @require_admin_context -def sw_update_opts_create(context, subcloud_id, storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action): +def sw_update_opts_create( + context, + subcloud_id, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, +): with write_session() as session: sw_update_opts_ref = models.SwUpdateOpts() sw_update_opts_ref.subcloud_id = subcloud_id @@ -896,10 +987,15 @@ def sw_update_opts_create(context, subcloud_id, storage_apply_type, @require_admin_context -def sw_update_opts_update(context, subcloud_id, storage_apply_type=None, - worker_apply_type=None, max_parallel_workers=None, - alarm_restriction_type=None, - default_instance_action=None): +def sw_update_opts_update( + context, + subcloud_id, + storage_apply_type=None, + worker_apply_type=None, + max_parallel_workers=None, + alarm_restriction_type=None, + default_instance_action=None, +): with write_session() as session: sw_update_opts_ref = sw_update_opts_get(context, subcloud_id) if storage_apply_type is not None: @@ -911,8 +1007,7 @@ def sw_update_opts_update(context, subcloud_id, storage_apply_type=None, if alarm_restriction_type is not None: sw_update_opts_ref.alarm_restriction_type = alarm_restriction_type if default_instance_action is not None: - sw_update_opts_ref.default_instance_action = \ - default_instance_action + sw_update_opts_ref.default_instance_action = default_instance_action sw_update_opts_ref.save(session) return sw_update_opts_ref @@ -929,41 +1024,44 @@ def sw_update_opts_destroy(context, subcloud_id): @require_context def sw_update_opts_default_get(context): - result = model_query(context, models.SwUpdateOptsDefault). \ - filter_by(deleted=0). \ - first() + result = ( + model_query(context, models.SwUpdateOptsDefault).filter_by(deleted=0).first() + ) # Note we will return None if not found return result @require_admin_context -def sw_update_opts_default_create(context, storage_apply_type, - worker_apply_type, - max_parallel_workers, - alarm_restriction_type, - default_instance_action): +def sw_update_opts_default_create( + context, + storage_apply_type, + worker_apply_type, + max_parallel_workers, + alarm_restriction_type, + default_instance_action, +): with write_session() as session: sw_update_opts_default_ref = models.SwUpdateOptsDefault() sw_update_opts_default_ref.subcloud_id = 0 sw_update_opts_default_ref.storage_apply_type = storage_apply_type sw_update_opts_default_ref.worker_apply_type = worker_apply_type - sw_update_opts_default_ref.max_parallel_workers = \ - max_parallel_workers - sw_update_opts_default_ref.alarm_restriction_type = \ - alarm_restriction_type - sw_update_opts_default_ref.default_instance_action = \ - default_instance_action + sw_update_opts_default_ref.max_parallel_workers = max_parallel_workers + sw_update_opts_default_ref.alarm_restriction_type = alarm_restriction_type + sw_update_opts_default_ref.default_instance_action = default_instance_action session.add(sw_update_opts_default_ref) return sw_update_opts_default_ref @require_admin_context -def sw_update_opts_default_update(context, storage_apply_type=None, - worker_apply_type=None, - max_parallel_workers=None, - alarm_restriction_type=None, - default_instance_action=None): +def sw_update_opts_default_update( + context, + storage_apply_type=None, + worker_apply_type=None, + max_parallel_workers=None, + alarm_restriction_type=None, + default_instance_action=None, +): with write_session() as session: sw_update_opts_default_ref = sw_update_opts_default_get(context) if storage_apply_type is not None: @@ -971,14 +1069,11 @@ def sw_update_opts_default_update(context, storage_apply_type=None, if worker_apply_type is not None: sw_update_opts_default_ref.worker_apply_type = worker_apply_type if max_parallel_workers is not None: - sw_update_opts_default_ref.max_parallel_workers = \ - max_parallel_workers + sw_update_opts_default_ref.max_parallel_workers = max_parallel_workers if alarm_restriction_type is not None: - sw_update_opts_default_ref.alarm_restriction_type = \ - alarm_restriction_type + sw_update_opts_default_ref.alarm_restriction_type = alarm_restriction_type if default_instance_action is not None: - sw_update_opts_default_ref.default_instance_action = \ - default_instance_action + sw_update_opts_default_ref.default_instance_action = default_instance_action sw_update_opts_default_ref.save(session) return sw_update_opts_default_ref @@ -996,15 +1091,18 @@ def sw_update_opts_default_destroy(context): @require_context def system_peer_get(context, peer_id): try: - result = model_query(context, models.SystemPeer). \ - filter_by(deleted=0). \ - filter_by(id=peer_id). \ - one() + result = ( + model_query(context, models.SystemPeer) + .filter_by(deleted=0) + .filter_by(id=peer_id) + .one() + ) except exc.NoResultFound: raise exception.SystemPeerNotFound(peer_id=peer_id) except exc.MultipleResultsFound: raise exception.InvalidParameterValue( - err="Multiple entries found for system peer %s" % peer_id) + err="Multiple entries found for system peer %s" % peer_id + ) return result @@ -1012,16 +1110,19 @@ def system_peer_get(context, peer_id): @require_context def system_peer_get_by_name(context, name): try: - result = model_query(context, models.SystemPeer). \ - filter_by(deleted=0). \ - filter_by(peer_name=name). \ - one() + result = ( + model_query(context, models.SystemPeer) + .filter_by(deleted=0) + .filter_by(peer_name=name) + .one() + ) except exc.NoResultFound: raise exception.SystemPeerNameNotFound(name=name) except exc.MultipleResultsFound: # This exception should never happen due to the UNIQUE setting for name raise exception.InvalidParameterValue( - err="Multiple entries found for system peer %s" % name) + err="Multiple entries found for system peer %s" % name + ) return result @@ -1029,26 +1130,31 @@ def system_peer_get_by_name(context, name): @require_context def system_peer_get_by_uuid(context, uuid): try: - result = model_query(context, models.SystemPeer). \ - filter_by(deleted=0). \ - filter_by(peer_uuid=uuid). \ - one() + result = ( + model_query(context, models.SystemPeer) + .filter_by(deleted=0) + .filter_by(peer_uuid=uuid) + .one() + ) except exc.NoResultFound: raise exception.SystemPeerUUIDNotFound(uuid=uuid) except exc.MultipleResultsFound: # This exception should never happen due to the UNIQUE setting for uuid raise exception.InvalidParameterValue( - err="Multiple entries found for system peer %s" % uuid) + err="Multiple entries found for system peer %s" % uuid + ) return result @require_context def system_peer_get_all(context): - result = model_query(context, models.SystemPeer). \ - filter_by(deleted=0). \ - order_by(models.SystemPeer.id). \ - all() + result = ( + model_query(context, models.SystemPeer) + .filter_by(deleted=0) + .order_by(models.SystemPeer.id) + .all() + ) return result @@ -1056,26 +1162,35 @@ def system_peer_get_all(context): # This method returns all subcloud peer groups for a particular system peer @require_context def peer_group_get_for_system_peer(context, peer_id): - return model_query(context, models.SubcloudPeerGroup). \ - join(models.PeerGroupAssociation, models.SubcloudPeerGroup.id == - models.PeerGroupAssociation.peer_group_id). \ - filter(models.SubcloudPeerGroup.deleted == 0). \ - filter(models.PeerGroupAssociation.system_peer_id == peer_id). \ - order_by(models.SubcloudPeerGroup.id). \ - all() + return ( + model_query(context, models.SubcloudPeerGroup) + .join( + models.PeerGroupAssociation, + models.SubcloudPeerGroup.id == models.PeerGroupAssociation.peer_group_id, + ) + .filter(models.SubcloudPeerGroup.deleted == 0) + .filter(models.PeerGroupAssociation.system_peer_id == peer_id) + .order_by(models.SubcloudPeerGroup.id) + .all() + ) @require_admin_context -def system_peer_create(context, - peer_uuid, peer_name, - endpoint, username, password, - gateway_ip, - administrative_state="enabled", - heartbeat_interval=60, - heartbeat_failure_threshold=3, - heartbeat_failure_policy="alarm", - heartbeat_maintenance_timeout=600, - availability_state="created"): +def system_peer_create( + context, + peer_uuid, + peer_name, + endpoint, + username, + password, + gateway_ip, + administrative_state="enabled", + heartbeat_interval=60, + heartbeat_failure_threshold=3, + heartbeat_failure_policy="alarm", + heartbeat_maintenance_timeout=600, + availability_state="created", +): with write_session() as session: system_peer_ref = models.SystemPeer() system_peer_ref.peer_uuid = peer_uuid @@ -1086,27 +1201,31 @@ def system_peer_create(context, system_peer_ref.peer_controller_gateway_ip = gateway_ip system_peer_ref.administrative_state = administrative_state system_peer_ref.heartbeat_interval = heartbeat_interval - system_peer_ref.heartbeat_failure_threshold = \ - heartbeat_failure_threshold + system_peer_ref.heartbeat_failure_threshold = heartbeat_failure_threshold system_peer_ref.heartbeat_failure_policy = heartbeat_failure_policy - system_peer_ref.heartbeat_maintenance_timeout = \ - heartbeat_maintenance_timeout + system_peer_ref.heartbeat_maintenance_timeout = heartbeat_maintenance_timeout system_peer_ref.availability_state = availability_state session.add(system_peer_ref) return system_peer_ref @require_admin_context -def system_peer_update(context, peer_id, - peer_uuid=None, peer_name=None, - endpoint=None, username=None, password=None, - gateway_ip=None, - administrative_state=None, - heartbeat_interval=None, - heartbeat_failure_threshold=None, - heartbeat_failure_policy=None, - heartbeat_maintenance_timeout=None, - availability_state=None): +def system_peer_update( + context, + peer_id, + peer_uuid=None, + peer_name=None, + endpoint=None, + username=None, + password=None, + gateway_ip=None, + administrative_state=None, + heartbeat_interval=None, + heartbeat_failure_threshold=None, + heartbeat_failure_policy=None, + heartbeat_maintenance_timeout=None, + availability_state=None, +): with write_session() as session: system_peer_ref = system_peer_get(context, peer_id) if peer_uuid is not None: @@ -1126,13 +1245,13 @@ def system_peer_update(context, peer_id, if heartbeat_interval is not None: system_peer_ref.heartbeat_interval = heartbeat_interval if heartbeat_failure_threshold is not None: - system_peer_ref.heartbeat_failure_threshold = \ - heartbeat_failure_threshold + system_peer_ref.heartbeat_failure_threshold = heartbeat_failure_threshold if heartbeat_failure_policy is not None: system_peer_ref.heartbeat_failure_policy = heartbeat_failure_policy if heartbeat_maintenance_timeout is not None: - system_peer_ref.heartbeat_maintenance_timeout = \ + system_peer_ref.heartbeat_maintenance_timeout = ( heartbeat_maintenance_timeout + ) if availability_state is not None: system_peer_ref.availability_state = availability_state system_peer_ref.save(session) @@ -1152,15 +1271,18 @@ def system_peer_destroy(context, peer_id): @require_context def subcloud_group_get(context, group_id): try: - result = model_query(context, models.SubcloudGroup). \ - filter_by(deleted=0). \ - filter_by(id=group_id). \ - one() + result = ( + model_query(context, models.SubcloudGroup) + .filter_by(deleted=0) + .filter_by(id=group_id) + .one() + ) except exc.NoResultFound: raise exception.SubcloudGroupNotFound(group_id=group_id) except exc.MultipleResultsFound: raise exception.InvalidParameterValue( - err="Multiple entries found for subcloud group %s" % group_id) + err="Multiple entries found for subcloud group %s" % group_id + ) return result @@ -1168,16 +1290,19 @@ def subcloud_group_get(context, group_id): @require_context def subcloud_group_get_by_name(context, name): try: - result = model_query(context, models.SubcloudGroup). \ - filter_by(deleted=0). \ - filter_by(name=name). \ - one() + result = ( + model_query(context, models.SubcloudGroup) + .filter_by(deleted=0) + .filter_by(name=name) + .one() + ) except exc.NoResultFound: raise exception.SubcloudGroupNameNotFound(name=name) except exc.MultipleResultsFound: # This exception should never happen due to the UNIQUE setting for name raise exception.InvalidParameterValue( - err="Multiple entries found for subcloud group %s" % name) + err="Multiple entries found for subcloud group %s" % name + ) return result @@ -1185,29 +1310,31 @@ def subcloud_group_get_by_name(context, name): # This method returns all subclouds for a particular subcloud group @require_context def subcloud_get_for_group(context, group_id): - return model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(group_id=group_id). \ - order_by(models.Subcloud.id). \ - all() + return ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(group_id=group_id) + .order_by(models.Subcloud.id) + .all() + ) @require_context def subcloud_group_get_all(context): - result = model_query(context, models.SubcloudGroup). \ - filter_by(deleted=0). \ - order_by(models.SubcloudGroup.id). \ - all() + result = ( + model_query(context, models.SubcloudGroup) + .filter_by(deleted=0) + .order_by(models.SubcloudGroup.id) + .all() + ) return result @require_admin_context -def subcloud_group_create(context, - name, - description, - update_apply_type, - max_parallel_subclouds): +def subcloud_group_create( + context, name, description, update_apply_type, max_parallel_subclouds +): with write_session() as session: subcloud_group_ref = models.SubcloudGroup() subcloud_group_ref.name = name @@ -1219,12 +1346,14 @@ def subcloud_group_create(context, @require_admin_context -def subcloud_group_update(context, - group_id, - name=None, - description=None, - update_apply_type=None, - max_parallel_subclouds=None): +def subcloud_group_update( + context, + group_id, + name=None, + description=None, + update_apply_type=None, + max_parallel_subclouds=None, +): with write_session() as session: subcloud_group_ref = subcloud_group_get(context, group_id) if name is not None: @@ -1256,29 +1385,33 @@ def subcloud_group_destroy(context, group_id): def initialize_subcloud_group_default(engine): try: + # Assign a constant to avoid line too long + default_sc_group_max_parallel_subclouds = ( + consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS + ) default_group = { "id": consts.DEFAULT_SUBCLOUD_GROUP_ID, "name": consts.DEFAULT_SUBCLOUD_GROUP_NAME, "description": consts.DEFAULT_SUBCLOUD_GROUP_DESCRIPTION, - "update_apply_type": - consts.DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE, - "max_parallel_subclouds": - consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS, - "deleted": 0 + "update_apply_type": consts.DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE, + "max_parallel_subclouds": default_sc_group_max_parallel_subclouds, + "deleted": 0, } meta = sqlalchemy.MetaData(bind=engine) - subcloud_group = sqlalchemy.Table('subcloud_group', meta, autoload=True) + subcloud_group = sqlalchemy.Table("subcloud_group", meta, autoload=True) try: with engine.begin() as conn: conn.execute( - subcloud_group.insert(), # pylint: disable=E1120 - default_group) + subcloud_group.insert(), default_group # pylint: disable=E1120 + ) LOG.info("Default Subcloud Group created") except DBDuplicateEntry: # The default already exists. pass except Exception as ex: LOG.error("Exception occurred setting up default subcloud group", ex) + + ########################## @@ -1288,15 +1421,18 @@ def initialize_subcloud_group_default(engine): @require_context def subcloud_peer_group_get(context, group_id): try: - result = model_query(context, models.SubcloudPeerGroup). \ - filter_by(deleted=0). \ - filter_by(id=group_id). \ - one() + result = ( + model_query(context, models.SubcloudPeerGroup) + .filter_by(deleted=0) + .filter_by(id=group_id) + .one() + ) except exc.NoResultFound: raise exception.SubcloudPeerGroupNotFound(group_id=group_id) except exc.MultipleResultsFound: raise exception.InvalidParameterValue( - err="Multiple entries found for subcloud peer group %s" % group_id) + err="Multiple entries found for subcloud peer group %s" % group_id + ) return result @@ -1308,19 +1444,23 @@ def subcloud_get_for_peer_group(context, peer_group_id): :param context: request context object :param peer_group_id: ID of the subcloud peer group """ - return model_query(context, models.Subcloud). \ - filter_by(deleted=0). \ - filter_by(peer_group_id=peer_group_id). \ - order_by(models.Subcloud.id). \ - all() + return ( + model_query(context, models.Subcloud) + .filter_by(deleted=0) + .filter_by(peer_group_id=peer_group_id) + .order_by(models.Subcloud.id) + .all() + ) @require_context def subcloud_peer_group_get_all(context): - result = model_query(context, models.SubcloudPeerGroup). \ - filter_by(deleted=0). \ - order_by(models.SubcloudPeerGroup.id). \ - all() + result = ( + model_query(context, models.SubcloudPeerGroup) + .filter_by(deleted=0) + .order_by(models.SubcloudPeerGroup.id) + .all() + ) return result @@ -1328,40 +1468,47 @@ def subcloud_peer_group_get_all(context): @require_context def subcloud_peer_group_get_by_name(context, name): try: - result = model_query(context, models.SubcloudPeerGroup). \ - filter_by(deleted=0). \ - filter_by(peer_group_name=name). \ - one() + result = ( + model_query(context, models.SubcloudPeerGroup) + .filter_by(deleted=0) + .filter_by(peer_group_name=name) + .one() + ) except exc.NoResultFound: raise exception.SubcloudPeerGroupNameNotFound(name=name) except exc.MultipleResultsFound: # This exception should never happen due to the UNIQUE setting for name raise exception.InvalidParameterValue( - err="Multiple entries found for subcloud peer group %s" % name) + err="Multiple entries found for subcloud peer group %s" % name + ) return result @require_context def subcloud_peer_group_get_by_leader_id(context, system_leader_id): - result = model_query(context, models.SubcloudPeerGroup). \ - filter_by(deleted=0). \ - filter_by(system_leader_id=system_leader_id). \ - order_by(models.SubcloudPeerGroup.id). \ - all() + result = ( + model_query(context, models.SubcloudPeerGroup) + .filter_by(deleted=0) + .filter_by(system_leader_id=system_leader_id) + .order_by(models.SubcloudPeerGroup.id) + .all() + ) return result @require_admin_context -def subcloud_peer_group_create(context, - peer_group_name, - group_priority, - group_state, - max_subcloud_rehoming, - system_leader_id, - system_leader_name, - migration_status): +def subcloud_peer_group_create( + context, + peer_group_name, + group_priority, + group_state, + max_subcloud_rehoming, + system_leader_id, + system_leader_name, + migration_status, +): with write_session() as session: subcloud_peer_group_ref = models.SubcloudPeerGroup() subcloud_peer_group_ref.peer_group_name = peer_group_name @@ -1383,15 +1530,17 @@ def subcloud_peer_group_destroy(context, group_id): @require_admin_context -def subcloud_peer_group_update(context, - group_id, - peer_group_name=None, - group_priority=None, - group_state=None, - max_subcloud_rehoming=None, - system_leader_id=None, - system_leader_name=None, - migration_status=None): +def subcloud_peer_group_update( + context, + group_id, + peer_group_name=None, + group_priority=None, + group_state=None, + max_subcloud_rehoming=None, + system_leader_id=None, + system_leader_name=None, + migration_status=None, +): with write_session() as session: subcloud_peer_group_ref = subcloud_peer_group_get(context, group_id) if peer_group_name is not None: @@ -1407,12 +1556,14 @@ def subcloud_peer_group_update(context, if system_leader_name is not None: subcloud_peer_group_ref.system_leader_name = system_leader_name if migration_status is not None: - if str(migration_status).lower() == 'none': + if str(migration_status).lower() == "none": subcloud_peer_group_ref.migration_status = None else: subcloud_peer_group_ref.migration_status = migration_status subcloud_peer_group_ref.save(session) return subcloud_peer_group_ref + + ########################## @@ -1420,13 +1571,15 @@ def subcloud_peer_group_update(context, # peer group association ########################## @require_admin_context -def peer_group_association_create(context, - peer_group_id, - system_peer_id, - peer_group_priority, - association_type, - sync_status, - sync_message): +def peer_group_association_create( + context, + peer_group_id, + system_peer_id, + peer_group_priority, + association_type, + sync_status, + sync_message, +): with write_session() as session: peer_group_association_ref = models.PeerGroupAssociation() peer_group_association_ref.peer_group_id = peer_group_id @@ -1440,11 +1593,9 @@ def peer_group_association_create(context, @require_admin_context -def peer_group_association_update(context, - associate_id, - peer_group_priority=None, - sync_status=None, - sync_message=None): +def peer_group_association_update( + context, associate_id, peer_group_priority=None, sync_status=None, sync_message=None +): with write_session() as session: association_ref = peer_group_association_get(context, associate_id) if peer_group_priority is not None: @@ -1452,7 +1603,7 @@ def peer_group_association_update(context, if sync_status is not None: association_ref.sync_status = sync_status if sync_message is not None: - if str(sync_message).lower() == 'none': + if str(sync_message).lower() == "none": association_ref.sync_message = None else: association_ref.sync_message = sync_message @@ -1470,27 +1621,30 @@ def peer_group_association_destroy(context, association_id): @require_context def peer_group_association_get(context, association_id): try: - result = model_query(context, models.PeerGroupAssociation). \ - filter_by(deleted=0). \ - filter_by(id=association_id). \ - one() + result = ( + model_query(context, models.PeerGroupAssociation) + .filter_by(deleted=0) + .filter_by(id=association_id) + .one() + ) except exc.NoResultFound: - raise exception.PeerGroupAssociationNotFound( - association_id=association_id) + raise exception.PeerGroupAssociationNotFound(association_id=association_id) except exc.MultipleResultsFound: raise exception.InvalidParameterValue( - err="Multiple entries found for peer group association %s" % - association_id) + err="Multiple entries found for peer group association %s" % association_id + ) return result @require_context def peer_group_association_get_all(context): - result = model_query(context, models.PeerGroupAssociation). \ - filter_by(deleted=0). \ - order_by(models.PeerGroupAssociation.id). \ - all() + result = ( + model_query(context, models.PeerGroupAssociation) + .filter_by(deleted=0) + .order_by(models.PeerGroupAssociation.id) + .all() + ) return result @@ -1498,55 +1652,67 @@ def peer_group_association_get_all(context): # Each combination of 'peer_group_id' and 'system_peer_id' is unique # and appears only once in the entries. @require_context -def peer_group_association_get_by_peer_group_and_system_peer_id(context, - peer_group_id, - system_peer_id): +def peer_group_association_get_by_peer_group_and_system_peer_id( + context, peer_group_id, system_peer_id +): try: - result = model_query(context, models.PeerGroupAssociation). \ - filter_by(deleted=0). \ - filter_by(peer_group_id=peer_group_id). \ - filter_by(system_peer_id=system_peer_id). \ - one() + result = ( + model_query(context, models.PeerGroupAssociation) + .filter_by(deleted=0) + .filter_by(peer_group_id=peer_group_id) + .filter_by(system_peer_id=system_peer_id) + .one() + ) except exc.NoResultFound: raise exception.PeerGroupAssociationCombinationNotFound( - peer_group_id=peer_group_id, system_peer_id=system_peer_id) + peer_group_id=peer_group_id, system_peer_id=system_peer_id + ) except exc.MultipleResultsFound: # This exception should never happen due to the UNIQUE setting for name raise exception.InvalidParameterValue( - err="Multiple entries found for peer group association %s,%s" % - (peer_group_id, system_peer_id)) + err="Multiple entries found for peer group association %s,%s" + % (peer_group_id, system_peer_id) + ) return result @require_context def peer_group_association_get_by_peer_group_id(context, peer_group_id): - result = model_query(context, models.PeerGroupAssociation). \ - filter_by(deleted=0). \ - filter_by(peer_group_id=peer_group_id). \ - order_by(models.PeerGroupAssociation.id). \ - all() + result = ( + model_query(context, models.PeerGroupAssociation) + .filter_by(deleted=0) + .filter_by(peer_group_id=peer_group_id) + .order_by(models.PeerGroupAssociation.id) + .all() + ) return result @require_context def peer_group_association_get_by_system_peer_id(context, system_peer_id): - result = model_query(context, models.PeerGroupAssociation). \ - filter_by(deleted=0). \ - filter_by(system_peer_id=system_peer_id). \ - order_by(models.PeerGroupAssociation.id). \ - all() + result = ( + model_query(context, models.PeerGroupAssociation) + .filter_by(deleted=0) + .filter_by(system_peer_id=system_peer_id) + .order_by(models.PeerGroupAssociation.id) + .all() + ) return result + + ########################## @require_context def strategy_step_get(context, subcloud_id): - result = model_query(context, models.StrategyStep). \ - filter_by(deleted=0). \ - filter_by(subcloud_id=subcloud_id). \ - first() + result = ( + model_query(context, models.StrategyStep) + .filter_by(deleted=0) + .filter_by(subcloud_id=subcloud_id) + .first() + ) if not result: raise exception.StrategyStepNotFound(subcloud_id=subcloud_id) @@ -1556,11 +1722,13 @@ def strategy_step_get(context, subcloud_id): @require_context def strategy_step_get_by_name(context, name): - result = model_query(context, models.StrategyStep). \ - filter_by(deleted=0). \ - join(models.Subcloud, - models.StrategyStep.subcloud_id == models.Subcloud.id). \ - filter(models.Subcloud.name == name).first() + result = ( + model_query(context, models.StrategyStep) + .filter_by(deleted=0) + .join(models.Subcloud, models.StrategyStep.subcloud_id == models.Subcloud.id) + .filter(models.Subcloud.name == name) + .first() + ) if not result: raise exception.StrategyStepNameNotFound(name=name) @@ -1570,10 +1738,12 @@ def strategy_step_get_by_name(context, name): @require_context def strategy_step_get_all(context): - result = model_query(context, models.StrategyStep). \ - filter_by(deleted=0). \ - order_by(models.StrategyStep.id). \ - all() + result = ( + model_query(context, models.StrategyStep) + .filter_by(deleted=0) + .order_by(models.StrategyStep.id) + .all() + ) return result @@ -1592,12 +1762,14 @@ def strategy_step_bulk_create(context, subcloud_ids, stage, state, details): strategy_steps = list() for subcloud_id in subcloud_ids: - strategy_steps.append({ - "subcloud_id": subcloud_id, - "stage": stage, - "state": state, - "details": details - }) + strategy_steps.append( + { + "subcloud_id": subcloud_id, + "stage": stage, + "state": state, + "details": details, + } + ) with write_session() as session: return session.execute(insert(models.StrategyStep), strategy_steps) @@ -1616,8 +1788,15 @@ def strategy_step_create(context, subcloud_id, stage, state, details): @require_admin_context -def strategy_step_update(context, subcloud_id, stage=None, state=None, - details=None, started_at=None, finished_at=None): +def strategy_step_update( + context, + subcloud_id, + stage=None, + state=None, + details=None, + started_at=None, + finished_at=None, +): with write_session() as session: strategy_step_ref = strategy_step_get(context, subcloud_id) if stage is not None: @@ -1664,8 +1843,7 @@ def db_version(engine): ########################## -def add_identity_filter(query, value, - use_name=None): +def add_identity_filter(query, value, use_name=None): """Adds an identity filter to a query. Filters results by 'id', if supplied value is a valid integer. @@ -1690,8 +1868,7 @@ def add_identity_filter(query, value, @require_context def _subcloud_alarms_get(context, name): - query = model_query(context, models.SubcloudAlarmSummary). \ - filter_by(deleted=0) + query = model_query(context, models.SubcloudAlarmSummary).filter_by(deleted=0) query = add_identity_filter(query, name, use_name=True) try: @@ -1700,7 +1877,8 @@ def _subcloud_alarms_get(context, name): raise exception.SubcloudNameNotFound(name=name) except exc.MultipleResultsFound: raise exception.InvalidParameterValue( - err="Multiple entries found for subcloud %s" % name) + err="Multiple entries found for subcloud %s" % name + ) @require_context @@ -1710,8 +1888,7 @@ def subcloud_alarms_get(context, name): @require_context def subcloud_alarms_get_all(context, name=None): - query = model_query(context, models.SubcloudAlarmSummary). \ - filter_by(deleted=0) + query = model_query(context, models.SubcloudAlarmSummary).filter_by(deleted=0) if name: query = add_identity_filter(query, name, use_name=True) @@ -1724,8 +1901,8 @@ def subcloud_alarms_create(context, name, values): with write_session() as session: result = models.SubcloudAlarmSummary() result.name = name - if not values.get('uuid'): - values['uuid'] = uuidutils.generate_uuid() + if not values.get("uuid"): + values["uuid"] = uuidutils.generate_uuid() result.update(values) try: session.add(result) @@ -1746,8 +1923,7 @@ def subcloud_alarms_update(context, name, values): @require_admin_context def subcloud_alarms_delete(context, name): with write_session() as session: - session.query(models.SubcloudAlarmSummary).\ - filter_by(name=name).delete() + session.query(models.SubcloudAlarmSummary).filter_by(name=name).delete() @require_admin_context diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/manage.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/manage.py index 39fa3892e..61dd84621 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/manage.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/manage.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +# Copyright (c) 2024 Wind River Systems, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + from migrate.versioning.shell import main -if __name__ == '__main__': - main(debug='False') +if __name__ == "__main__": + main(debug="False") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py index 0e5256358..66d47784c 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/001_first_version.py @@ -25,133 +25,138 @@ def upgrade(migrate_engine): meta.bind = migrate_engine subclouds = sqlalchemy.Table( - 'subclouds', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('name', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('description', sqlalchemy.String(255)), - sqlalchemy.Column('location', sqlalchemy.String(255)), - sqlalchemy.Column('software_version', sqlalchemy.String(255)), - sqlalchemy.Column('management_state', sqlalchemy.String(255)), - sqlalchemy.Column('availability_status', sqlalchemy.String(255)), - sqlalchemy.Column('management_subnet', sqlalchemy.String(255)), - sqlalchemy.Column('management_gateway_ip', sqlalchemy.String(255)), - sqlalchemy.Column('management_start_ip', sqlalchemy.String(255)), - sqlalchemy.Column('management_end_ip', sqlalchemy.String(255)), - sqlalchemy.Column('systemcontroller_gateway_ip', - sqlalchemy.String(255)), - sqlalchemy.Column('audit_fail_count', sqlalchemy.Integer, default=0), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "subclouds", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column("name", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("description", sqlalchemy.String(255)), + sqlalchemy.Column("location", sqlalchemy.String(255)), + sqlalchemy.Column("software_version", sqlalchemy.String(255)), + sqlalchemy.Column("management_state", sqlalchemy.String(255)), + sqlalchemy.Column("availability_status", sqlalchemy.String(255)), + sqlalchemy.Column("management_subnet", sqlalchemy.String(255)), + sqlalchemy.Column("management_gateway_ip", sqlalchemy.String(255)), + sqlalchemy.Column("management_start_ip", sqlalchemy.String(255)), + sqlalchemy.Column("management_end_ip", sqlalchemy.String(255)), + sqlalchemy.Column("systemcontroller_gateway_ip", sqlalchemy.String(255)), + sqlalchemy.Column("audit_fail_count", sqlalchemy.Integer, default=0), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) subcloud_status = sqlalchemy.Table( - 'subcloud_status', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('subcloud_id', sqlalchemy.Integer, - sqlalchemy.ForeignKey('subclouds.id', - ondelete='CASCADE')), - sqlalchemy.Column('endpoint_type', sqlalchemy.String(255)), - sqlalchemy.Column('sync_status', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "subcloud_status", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column( + "subcloud_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("subclouds.id", ondelete="CASCADE"), + ), + sqlalchemy.Column("endpoint_type", sqlalchemy.String(255)), + sqlalchemy.Column("sync_status", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) sw_update_strategy = sqlalchemy.Table( - 'sw_update_strategy', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('type', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('subcloud_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('max_parallel_subclouds', sqlalchemy.Integer), - sqlalchemy.Column('stop_on_failure', sqlalchemy.Boolean), - sqlalchemy.Column('state', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "sw_update_strategy", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column("type", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("subcloud_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("max_parallel_subclouds", sqlalchemy.Integer), + sqlalchemy.Column("stop_on_failure", sqlalchemy.Boolean), + sqlalchemy.Column("state", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) sw_update_opts_default = sqlalchemy.Table( - 'sw_update_opts_default', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('subcloud_id', sqlalchemy.Integer), - sqlalchemy.Column('storage_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('compute_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('max_parallel_computes', sqlalchemy.Integer), - sqlalchemy.Column('default_instance_action', sqlalchemy.String(255)), - sqlalchemy.Column('alarm_restriction_type', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "sw_update_opts_default", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column("subcloud_id", sqlalchemy.Integer), + sqlalchemy.Column("storage_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("compute_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("max_parallel_computes", sqlalchemy.Integer), + sqlalchemy.Column("default_instance_action", sqlalchemy.String(255)), + sqlalchemy.Column("alarm_restriction_type", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) sw_update_opts = sqlalchemy.Table( - 'sw_update_opts', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('subcloud_id', sqlalchemy.Integer, - sqlalchemy.ForeignKey('subclouds.id', - ondelete='CASCADE')), - sqlalchemy.Column('storage_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('compute_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('max_parallel_computes', sqlalchemy.Integer), - sqlalchemy.Column('default_instance_action', sqlalchemy.String(255)), - sqlalchemy.Column('alarm_restriction_type', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "sw_update_opts", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column( + "subcloud_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("subclouds.id", ondelete="CASCADE"), + ), + sqlalchemy.Column("storage_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("compute_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("max_parallel_computes", sqlalchemy.Integer), + sqlalchemy.Column("default_instance_action", sqlalchemy.String(255)), + sqlalchemy.Column("alarm_restriction_type", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) strategy_steps = sqlalchemy.Table( - 'strategy_steps', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('subcloud_id', sqlalchemy.Integer, - sqlalchemy.ForeignKey('subclouds.id', - ondelete='CASCADE'), - unique=True), - sqlalchemy.Column('stage', sqlalchemy.Integer), - sqlalchemy.Column('state', sqlalchemy.String(255)), - sqlalchemy.Column('details', sqlalchemy.String(255)), - sqlalchemy.Column('started_at', sqlalchemy.DateTime), - sqlalchemy.Column('finished_at', sqlalchemy.DateTime), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - mysql_engine='InnoDB', - mysql_charset='utf8' + "strategy_steps", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column( + "subcloud_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("subclouds.id", ondelete="CASCADE"), + unique=True, + ), + sqlalchemy.Column("stage", sqlalchemy.Integer), + sqlalchemy.Column("state", sqlalchemy.String(255)), + sqlalchemy.Column("details", sqlalchemy.String(255)), + sqlalchemy.Column("started_at", sqlalchemy.DateTime), + sqlalchemy.Column("finished_at", sqlalchemy.DateTime), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) tables = ( @@ -160,7 +165,7 @@ def upgrade(migrate_engine): sw_update_strategy, strategy_steps, sw_update_opts, - sw_update_opts_default + sw_update_opts_default, ) for index, table in enumerate(tables): @@ -176,18 +181,21 @@ def upgrade(migrate_engine): # populate the sw_update_opts_default with the default values. con = migrate_engine.connect() - con.execute(sw_update_opts_default.insert(), # pylint: disable=E1120 - storage_apply_type=vim.APPLY_TYPE_PARALLEL, - compute_apply_type=vim.APPLY_TYPE_PARALLEL, - max_parallel_computes=10, - default_instance_action=vim.INSTANCE_ACTION_MIGRATE, - alarm_restriction_type=vim.ALARM_RESTRICTIONS_RELAXED, - deleted=0) + con.execute( + sw_update_opts_default.insert(), # pylint: disable=E1120 + storage_apply_type=vim.APPLY_TYPE_PARALLEL, + compute_apply_type=vim.APPLY_TYPE_PARALLEL, + max_parallel_computes=10, + default_instance_action=vim.INSTANCE_ACTION_MIGRATE, + alarm_restriction_type=vim.ALARM_RESTRICTIONS_RELAXED, + deleted=0, + ) except Exception: # We can survive if this fails. pass def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade not supported - ' - 'would drop all tables') + raise NotImplementedError( + "Database downgrade not supported - would drop all tables" + ) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/002_rename_compute_to_worker.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/002_rename_compute_to_worker.py index 59227818b..2201e1162 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/002_rename_compute_to_worker.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/002_rename_compute_to_worker.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2021 Wind River Systems, Inc. +# Copyright (c) 2019-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -20,12 +20,13 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - sw_update_opts_default = Table('sw_update_opts_default', meta, - autoload=True) - sw_update_opts = Table('sw_update_opts', meta, autoload=True) + sw_update_opts_default = Table("sw_update_opts_default", meta, autoload=True) + sw_update_opts = Table("sw_update_opts", meta, autoload=True) - columns_to_rename = {'compute_apply_type': 'worker_apply_type', - 'max_parallel_computes': 'max_parallel_workers'} + columns_to_rename = { + "compute_apply_type": "worker_apply_type", + "max_parallel_computes": "max_parallel_workers", + } for k, v in columns_to_rename.items(): getattr(sw_update_opts_default.c, k).alter(name=v) getattr(sw_update_opts.c, k).alter(name=v) @@ -34,4 +35,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/003_add_deploy_status_column.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/003_add_deploy_status_column.py index c586029bf..5b9785b5b 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/003_add_deploy_status_column.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/003_add_deploy_status_column.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2021 Wind River Systems, Inc. +# Copyright (c) 2019-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,13 +19,13 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'deploy_status' column to the subclouds table. - subclouds.create_column(Column('deploy_status', String(255))) + subclouds.create_column(Column("deploy_status", String(255))) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/004_add_openstack_installed_column.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/004_add_openstack_installed_column.py index 4cfb42b9a..fb8228d1a 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/004_add_openstack_installed_column.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/004_add_openstack_installed_column.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2021 Wind River Systems, Inc. +# Copyright (c) 2019-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,15 +19,21 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'openstack_installed' column to the subclouds table. - subclouds.create_column(Column('openstack_installed', Boolean, - nullable=False, default=False, - server_default='0')) + subclouds.create_column( + Column( + "openstack_installed", + Boolean, + nullable=False, + default=False, + server_default="0", + ) + ) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/005_add_subcloud_alarms.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/005_add_subcloud_alarms.py index 159dff5ca..2006f7048 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/005_add_subcloud_alarms.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/005_add_subcloud_alarms.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -20,30 +20,28 @@ def upgrade(migrate_engine): meta.bind = migrate_engine subcloud_alarms = sqlalchemy.Table( - 'subcloud_alarms', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, nullable=False), - sqlalchemy.Column('uuid', sqlalchemy.String(36), unique=True), - - sqlalchemy.Column('name', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('critical_alarms', sqlalchemy.Integer), - sqlalchemy.Column('major_alarms', sqlalchemy.Integer), - sqlalchemy.Column('minor_alarms', sqlalchemy.Integer), - sqlalchemy.Column('warnings', sqlalchemy.Integer), - sqlalchemy.Column('cloud_status', sqlalchemy.String(64)), - - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer), - - mysql_engine='InnoDB', - mysql_charset='utf8' + "subcloud_alarms", + meta, + sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True, nullable=False), + sqlalchemy.Column("uuid", sqlalchemy.String(36), unique=True), + sqlalchemy.Column("name", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("critical_alarms", sqlalchemy.Integer), + sqlalchemy.Column("major_alarms", sqlalchemy.Integer), + sqlalchemy.Column("minor_alarms", sqlalchemy.Integer), + sqlalchemy.Column("warnings", sqlalchemy.Integer), + sqlalchemy.Column("cloud_status", sqlalchemy.String(64)), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer), + mysql_engine="InnoDB", + mysql_charset="utf8", ) subcloud_alarms.create() def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade not supported - ' - 'would drop all tables') + raise NotImplementedError( + "Database downgrade not supported - would drop all tables" + ) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py index 3faac6be6..c770e94d5 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/006_add_subcloud_group_table.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -17,8 +17,8 @@ import sqlalchemy from dcmanager.common import consts -ENGINE = 'InnoDB', -CHARSET = 'utf8' +ENGINE = ("InnoDB",) +CHARSET = "utf8" def upgrade(migrate_engine): @@ -26,27 +26,31 @@ def upgrade(migrate_engine): # Declare the new subcloud_group table subcloud_group = sqlalchemy.Table( - 'subcloud_group', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, - autoincrement=True, - nullable=False), - sqlalchemy.Column('name', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('description', sqlalchemy.String(255)), - sqlalchemy.Column('update_apply_type', sqlalchemy.String(255)), - sqlalchemy.Column('max_parallel_subclouds', sqlalchemy.Integer), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer, default=0), + "subcloud_group", + meta, + sqlalchemy.Column( + "id", + sqlalchemy.Integer, + primary_key=True, + autoincrement=True, + nullable=False, + ), + sqlalchemy.Column("name", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("description", sqlalchemy.String(255)), + sqlalchemy.Column("update_apply_type", sqlalchemy.String(255)), + sqlalchemy.Column("max_parallel_subclouds", sqlalchemy.Integer), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer, default=0), mysql_engine=ENGINE, - mysql_charset=CHARSET + mysql_charset=CHARSET, ) subcloud_group.create() - subclouds = sqlalchemy.Table('subclouds', meta, autoload=True) + subclouds = sqlalchemy.Table("subclouds", meta, autoload=True) # TODO(abailey) do we want to fix the missing constraint for strategy_steps # strat_steps = sqlalchemy.Table('strategy_steps', meta, autoload=True) @@ -62,37 +66,36 @@ def upgrade(migrate_engine): "name": consts.DEFAULT_SUBCLOUD_GROUP_NAME, "description": consts.DEFAULT_SUBCLOUD_GROUP_DESCRIPTION, "update_apply_type": consts.DEFAULT_SUBCLOUD_GROUP_UPDATE_APPLY_TYPE, - "max_parallel_subclouds": - consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS, - "deleted": 0 + "max_parallel_subclouds": consts.DEFAULT_SUBCLOUD_GROUP_MAX_PARALLEL_SUBCLOUDS, + "deleted": 0, } # Inserting the GROUP as ID 1, # This should increment the pkey to 2 with migrate_engine.begin() as conn: - conn.execute( - subcloud_group.insert(), # pylint: disable=E1120 - default_group) + conn.execute(subcloud_group.insert(), default_group) # pylint: disable=E1120 # postgres does not increment the subcloud group id sequence # after the insert above as part of the migrate. # Note: use different SQL syntax if using mysql or sqlite - if migrate_engine.name == 'postgresql': + if migrate_engine.name == "postgresql": with migrate_engine.begin() as conn: conn.execute("ALTER SEQUENCE subcloud_group_id_seq RESTART WITH 2") # Add group_id column to subclouds table - group_id = \ - sqlalchemy.Column('group_id', - sqlalchemy.Integer, - server_default=str(consts.DEFAULT_SUBCLOUD_GROUP_ID)) + group_id = sqlalchemy.Column( + "group_id", + sqlalchemy.Integer, + server_default=str(consts.DEFAULT_SUBCLOUD_GROUP_ID), + ) group_id.create(subclouds) subcloud_fkey = constraint.ForeignKeyConstraint( columns=[subclouds.c.group_id], refcolumns=[subcloud_group.c.id], - name='subclouds_group_ref') + name="subclouds_group_ref", + ) subclouds.append_constraint(subcloud_fkey) def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/007_add_subcloud_install.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/007_add_subcloud_install.py index c59c5f55d..871503d0b 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/007_add_subcloud_install.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/007_add_subcloud_install.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2021 Wind River Systems, Inc. +# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -22,14 +22,14 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'data_install' to persist data_install data - subclouds.create_column(Column('data_install', Text)) + subclouds.create_column(Column("data_install", Text)) # Add the data_upgrade which persist over an upgrade - subclouds.create_column(Column('data_upgrade', Text)) + subclouds.create_column(Column("data_upgrade", Text)) def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py index 55c4900c6..7a8f954a8 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/008_add_subcloud_audits_table.py @@ -29,40 +29,47 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) subcloud_audits = Table( - 'subcloud_audits', meta, - Column('id', Integer, primary_key=True, - autoincrement=True, nullable=False), - Column('subcloud_id', Integer, - ForeignKey('subclouds.id', ondelete='CASCADE'), - unique=True), - Column('created_at', DateTime), - Column('updated_at', DateTime), - Column('deleted_at', DateTime), - Column('deleted', Integer, default=0), - Column('audit_started_at', DateTime, default=datetime.datetime.min), - Column('audit_finished_at', DateTime, default=datetime.datetime.min), - Column('state_update_requested', Boolean, nullable=False, default=False), - Column('patch_audit_requested', Boolean, nullable=False, default=False), - Column('load_audit_requested', Boolean, nullable=False, default=False), - Column('firmware_audit_requested', Boolean, nullable=False, default=False), - Column('kubernetes_audit_requested', Boolean, nullable=False, default=False), - Column('spare_audit_requested', Boolean, nullable=False, default=False), - Column('spare2_audit_requested', Boolean, nullable=False, default=False), - Column('reserved', Text), - mysql_engine='InnoDB', - mysql_charset='utf8' + "subcloud_audits", + meta, + Column("id", Integer, primary_key=True, autoincrement=True, nullable=False), + Column( + "subcloud_id", + Integer, + ForeignKey("subclouds.id", ondelete="CASCADE"), + unique=True, + ), + Column("created_at", DateTime), + Column("updated_at", DateTime), + Column("deleted_at", DateTime), + Column("deleted", Integer, default=0), + Column("audit_started_at", DateTime, default=datetime.datetime.min), + Column("audit_finished_at", DateTime, default=datetime.datetime.min), + Column("state_update_requested", Boolean, nullable=False, default=False), + Column("patch_audit_requested", Boolean, nullable=False, default=False), + Column("load_audit_requested", Boolean, nullable=False, default=False), + Column("firmware_audit_requested", Boolean, nullable=False, default=False), + Column("kubernetes_audit_requested", Boolean, nullable=False, default=False), + Column("spare_audit_requested", Boolean, nullable=False, default=False), + Column("spare2_audit_requested", Boolean, nullable=False, default=False), + Column("reserved", Text), + mysql_engine="InnoDB", + mysql_charset="utf8", ) subcloud_audits.create() # Create rows in the new table for each non-deleted subcloud. - subcloud_list = list(subclouds.select().where(subclouds.c.deleted == 0) - .order_by(subclouds.c.id).execute()) + subcloud_list = list( + subclouds.select() + .where(subclouds.c.deleted == 0) + .order_by(subclouds.c.id) + .execute() + ) for subcloud in subcloud_list: # pylint: disable-next=no-value-for-parameter - subcloud_audits.insert().execute({'subcloud_id': subcloud['id']}) + subcloud_audits.insert().execute({"subcloud_id": subcloud["id"]}) def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/009_add_kube_rootca_audit.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/009_add_kube_rootca_audit.py index 4390d0121..4b02f0b7e 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/009_add_kube_rootca_audit.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/009_add_kube_rootca_audit.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2021 Wind River Systems, Inc. +# Copyright (c) 2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import MetaData @@ -13,16 +14,20 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subcloud_audits = Table('subcloud_audits', meta, autoload=True) + subcloud_audits = Table("subcloud_audits", meta, autoload=True) # Add the kube_rootca_update_audit_requested column to the audits table. - subcloud_audits.create_column(Column('kube_rootca_update_audit_requested', - Boolean, - nullable=False, - default=False, - server_default='0')) + subcloud_audits.create_column( + Column( + "kube_rootca_update_audit_requested", + Boolean, + nullable=False, + default=False, + server_default="0", + ) + ) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/010_add_update_extra_args.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/010_add_update_extra_args.py index b5eecc9f3..2cbcb43c6 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/010_add_update_extra_args.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/010_add_update_extra_args.py @@ -1,8 +1,9 @@ # -# Copyright (c) 2021 Wind River Systems, Inc. +# Copyright (c) 2021, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # + import sqlalchemy @@ -11,14 +12,11 @@ def upgrade(migrate_engine): meta.bind = migrate_engine # Add the 'extra_args' column to the sw_update_strategy table. - sw_update_strategy = sqlalchemy.Table('sw_update_strategy', - meta, - autoload=True) + sw_update_strategy = sqlalchemy.Table("sw_update_strategy", meta, autoload=True) # JSONEncodedDict is stored in the database as Text - sw_update_strategy.create_column(sqlalchemy.Column('extra_args', - sqlalchemy.Text)) + sw_update_strategy.create_column(sqlalchemy.Column("extra_args", sqlalchemy.Text)) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/011_add_subcloud_backup_columns.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/011_add_subcloud_backup_columns.py index 7458693c7..84e3216ce 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/011_add_subcloud_backup_columns.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/011_add_subcloud_backup_columns.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -11,14 +11,14 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the backup-related columns to the subclouds table. - subclouds.create_column(Column('backup_status', String(255))) - subclouds.create_column(Column('backup_datetime', DateTime(timezone=False))) + subclouds.create_column(Column("backup_status", String(255))) + subclouds.create_column(Column("backup_datetime", DateTime(timezone=False))) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/012_add_deploy_error_desc_column.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/012_add_deploy_error_desc_column.py index 6d926df08..212d19d1f 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/012_add_deploy_error_desc_column.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/012_add_deploy_error_desc_column.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022 Wind River Systems, Inc. +# Copyright (c) 2022, 2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,15 +19,15 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'error_description' column to the subclouds table. - subclouds.create_column(Column('error_description', - String(2048), - default="No errors present")) + subclouds.create_column( + Column("error_description", String(2048), default="No errors present") + ) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/013_add_subcloud_region_name_column.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/013_add_subcloud_region_name_column.py index 1e7ee2972..a3e3e8d44 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/013_add_subcloud_region_name_column.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/013_add_subcloud_region_name_column.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -19,14 +19,13 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'region_name' column to the subclouds table. - subclouds.create_column(Column('region_name', - String(255))) + subclouds.create_column(Column("region_name", String(255))) # populates region_name with name field value for existing subclouds - if migrate_engine.name == 'postgresql': + if migrate_engine.name == "postgresql": with migrate_engine.begin() as conn: conn.execute("UPDATE subclouds SET region_name = name") @@ -34,4 +33,4 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/014_add_subcloud_peer_group_and_association.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/014_add_subcloud_peer_group_and_association.py index 9d794e969..71da6a6ae 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/014_add_subcloud_peer_group_and_association.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/014_add_subcloud_peer_group_and_association.py @@ -1,106 +1,122 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import sqlalchemy -ENGINE = 'InnoDB', -CHARSET = 'utf8' +ENGINE = ("InnoDB",) +CHARSET = "utf8" def upgrade(migrate_engine): meta = sqlalchemy.MetaData(bind=migrate_engine) - subclouds = sqlalchemy.Table('subclouds', meta, autoload=True) + subclouds = sqlalchemy.Table("subclouds", meta, autoload=True) # Add the 'rehome_data' column to the subclouds table. - subclouds.create_column(sqlalchemy.Column('rehome_data', sqlalchemy.Text)) + subclouds.create_column(sqlalchemy.Column("rehome_data", sqlalchemy.Text)) # Declare the new subcloud_peer_group table subcloud_peer_group = sqlalchemy.Table( - 'subcloud_peer_group', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, - autoincrement=True, - nullable=False), - sqlalchemy.Column('peer_group_name', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('group_priority', sqlalchemy.Integer), - sqlalchemy.Column('group_state', sqlalchemy.String(255)), - sqlalchemy.Column('system_leader_id', sqlalchemy.String(255)), - sqlalchemy.Column('system_leader_name', sqlalchemy.String(255)), - sqlalchemy.Column('max_subcloud_rehoming', sqlalchemy.Integer), - sqlalchemy.Column('migration_status', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer, default=0), + "subcloud_peer_group", + meta, + sqlalchemy.Column( + "id", + sqlalchemy.Integer, + primary_key=True, + autoincrement=True, + nullable=False, + ), + sqlalchemy.Column("peer_group_name", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("group_priority", sqlalchemy.Integer), + sqlalchemy.Column("group_state", sqlalchemy.String(255)), + sqlalchemy.Column("system_leader_id", sqlalchemy.String(255)), + sqlalchemy.Column("system_leader_name", sqlalchemy.String(255)), + sqlalchemy.Column("max_subcloud_rehoming", sqlalchemy.Integer), + sqlalchemy.Column("migration_status", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer, default=0), mysql_engine=ENGINE, - mysql_charset=CHARSET + mysql_charset=CHARSET, ) subcloud_peer_group.create() # Add the 'peer_greoup_id' column to the subclouds table. - subclouds.create_column(sqlalchemy.Column('peer_group_id', sqlalchemy.Integer)) + subclouds.create_column(sqlalchemy.Column("peer_group_id", sqlalchemy.Integer)) # Declare the new system_peer table system_peer = sqlalchemy.Table( - 'system_peer', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, - autoincrement=True, - nullable=False), - sqlalchemy.Column('peer_uuid', sqlalchemy.String(36), unique=True), - sqlalchemy.Column('peer_name', sqlalchemy.String(255), unique=True), - sqlalchemy.Column('manager_endpoint', sqlalchemy.String(255)), - sqlalchemy.Column('manager_username', sqlalchemy.String(255)), - sqlalchemy.Column('manager_password', sqlalchemy.String(255)), - sqlalchemy.Column('peer_controller_gateway_ip', sqlalchemy.String(255)), - sqlalchemy.Column('administrative_state', sqlalchemy.String(255)), - sqlalchemy.Column('heartbeat_interval', sqlalchemy.Integer), - sqlalchemy.Column('heartbeat_failure_threshold', sqlalchemy.Integer), - sqlalchemy.Column('heartbeat_failure_policy', sqlalchemy.String(255)), - sqlalchemy.Column('heartbeat_maintenance_timeout', sqlalchemy.Integer), - sqlalchemy.Column('availability_state', sqlalchemy.String(255)), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer, default=0), + "system_peer", + meta, + sqlalchemy.Column( + "id", + sqlalchemy.Integer, + primary_key=True, + autoincrement=True, + nullable=False, + ), + sqlalchemy.Column("peer_uuid", sqlalchemy.String(36), unique=True), + sqlalchemy.Column("peer_name", sqlalchemy.String(255), unique=True), + sqlalchemy.Column("manager_endpoint", sqlalchemy.String(255)), + sqlalchemy.Column("manager_username", sqlalchemy.String(255)), + sqlalchemy.Column("manager_password", sqlalchemy.String(255)), + sqlalchemy.Column("peer_controller_gateway_ip", sqlalchemy.String(255)), + sqlalchemy.Column("administrative_state", sqlalchemy.String(255)), + sqlalchemy.Column("heartbeat_interval", sqlalchemy.Integer), + sqlalchemy.Column("heartbeat_failure_threshold", sqlalchemy.Integer), + sqlalchemy.Column("heartbeat_failure_policy", sqlalchemy.String(255)), + sqlalchemy.Column("heartbeat_maintenance_timeout", sqlalchemy.Integer), + sqlalchemy.Column("availability_state", sqlalchemy.String(255)), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer, default=0), mysql_engine=ENGINE, - mysql_charset=CHARSET + mysql_charset=CHARSET, ) system_peer.create() # Declare the new peer_group_association table peer_group_association = sqlalchemy.Table( - 'peer_group_association', meta, - sqlalchemy.Column('id', sqlalchemy.Integer, - primary_key=True, - autoincrement=True, - nullable=False), - sqlalchemy.Column('peer_group_id', sqlalchemy.Integer, - sqlalchemy.ForeignKey('subcloud_peer_group.id', - ondelete='CASCADE')), - sqlalchemy.Column('system_peer_id', sqlalchemy.Integer, - sqlalchemy.ForeignKey('system_peer.id', - ondelete='CASCADE')), - sqlalchemy.Column('peer_group_priority', sqlalchemy.Integer), - sqlalchemy.Column('association_type', sqlalchemy.String(255)), - sqlalchemy.Column('sync_status', sqlalchemy.String(255)), - sqlalchemy.Column('sync_message', sqlalchemy.Text), - sqlalchemy.Column('reserved_1', sqlalchemy.Text), - sqlalchemy.Column('reserved_2', sqlalchemy.Text), - sqlalchemy.Column('created_at', sqlalchemy.DateTime), - sqlalchemy.Column('updated_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted_at', sqlalchemy.DateTime), - sqlalchemy.Column('deleted', sqlalchemy.Integer, default=0), + "peer_group_association", + meta, + sqlalchemy.Column( + "id", + sqlalchemy.Integer, + primary_key=True, + autoincrement=True, + nullable=False, + ), + sqlalchemy.Column( + "peer_group_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("subcloud_peer_group.id", ondelete="CASCADE"), + ), + sqlalchemy.Column( + "system_peer_id", + sqlalchemy.Integer, + sqlalchemy.ForeignKey("system_peer.id", ondelete="CASCADE"), + ), + sqlalchemy.Column("peer_group_priority", sqlalchemy.Integer), + sqlalchemy.Column("association_type", sqlalchemy.String(255)), + sqlalchemy.Column("sync_status", sqlalchemy.String(255)), + sqlalchemy.Column("sync_message", sqlalchemy.Text), + sqlalchemy.Column("reserved_1", sqlalchemy.Text), + sqlalchemy.Column("reserved_2", sqlalchemy.Text), + sqlalchemy.Column("created_at", sqlalchemy.DateTime), + sqlalchemy.Column("updated_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted_at", sqlalchemy.DateTime), + sqlalchemy.Column("deleted", sqlalchemy.Integer, default=0), mysql_engine=ENGINE, - mysql_charset=CHARSET + mysql_charset=CHARSET, ) peer_group_association.create() def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/015_add_subcloud_rehome_flag_column.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/015_add_subcloud_rehome_flag_column.py index ae1c0e5b1..b6e9df8cb 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/015_add_subcloud_rehome_flag_column.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/015_add_subcloud_rehome_flag_column.py @@ -1,5 +1,5 @@ # -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # @@ -11,13 +11,13 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'rehomed' column to the subclouds table. - subclouds.create_column(Column('rehomed', Boolean, default=False)) + subclouds.create_column(Column("rehomed", Boolean, default=False)) return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/016_first_identity_sync_complete.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/016_first_identity_sync_complete.py index abf4f876b..89dbadc02 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/016_first_identity_sync_complete.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/016_first_identity_sync_complete.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023 Wind River Systems, Inc. +# Copyright (c) 2023-2024 Wind River Systems, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -18,13 +18,16 @@ import sqlalchemy def upgrade(migrate_engine): meta = sqlalchemy.MetaData() meta.bind = migrate_engine - subcloud = sqlalchemy.Table('subclouds', meta, autoload=True) + subcloud = sqlalchemy.Table("subclouds", meta, autoload=True) # Add the first_identity_sync_complete column - subcloud.create_column(sqlalchemy.Column('first_identity_sync_complete', - sqlalchemy.Boolean, - default=False)) + subcloud.create_column( + sqlalchemy.Column( + "first_identity_sync_complete", sqlalchemy.Boolean, default=False + ) + ) def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade not supported - ' - 'would drop all tables') + raise NotImplementedError( + "Database downgrade not supported - would drop all tables" + ) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/017_add_subcloud_prestage_columns.py b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/017_add_subcloud_prestage_columns.py index 0ec974e2d..2a85afb2b 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/017_add_subcloud_prestage_columns.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migrate_repo/versions/017_add_subcloud_prestage_columns.py @@ -11,20 +11,20 @@ def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine - subclouds = Table('subclouds', meta, autoload=True) + subclouds = Table("subclouds", meta, autoload=True) # Add the 'prestage_status' and 'prestage_versions' columns to # the subclouds table. - subclouds.create_column(Column('prestage_status', String(255))) - subclouds.create_column(Column('prestage_versions', String(255))) + subclouds.create_column(Column("prestage_status", String(255))) + subclouds.create_column(Column("prestage_versions", String(255))) # Update existing subclouds that have the old prestaging deploy status subclouds.update().where( # pylint: disable=E1120 - subclouds.c.deploy_status.like('prestage%')).values( - {'deploy_status': 'complete'}).execute() + subclouds.c.deploy_status.like("prestage%") + ).values({"deploy_status": "complete"}).execute() return True def downgrade(migrate_engine): - raise NotImplementedError('Database downgrade is unsupported.') + raise NotImplementedError("Database downgrade is unsupported.") diff --git a/distributedcloud/dcmanager/db/sqlalchemy/migration.py b/distributedcloud/dcmanager/db/sqlalchemy/migration.py index f6a1bb6c4..0ad9c500b 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/migration.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/migration.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017, 2019, 2021 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,19 +24,15 @@ INIT_VERSION = 0 def db_sync(engine, version=None): - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') - return oslo_migration.db_sync(engine, path, version, - init_version=INIT_VERSION) + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "migrate_repo") + return oslo_migration.db_sync(engine, path, version, init_version=INIT_VERSION) def db_version(engine): - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "migrate_repo") return oslo_migration.db_version(engine, path, INIT_VERSION) def db_version_control(engine, version=None): - path = os.path.join(os.path.abspath(os.path.dirname(__file__)), - 'migrate_repo') + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "migrate_repo") return oslo_migration.db_version_control(engine, path, version) diff --git a/distributedcloud/dcmanager/db/sqlalchemy/models.py b/distributedcloud/dcmanager/db/sqlalchemy/models.py index 140597e21..a5e260bd7 100644 --- a/distributedcloud/dcmanager/db/sqlalchemy/models.py +++ b/distributedcloud/dcmanager/db/sqlalchemy/models.py @@ -62,9 +62,7 @@ class JSONEncodedDict(TypeDecorator): return value -class DCManagerBase(models.ModelBase, - models.SoftDeleteMixin, - models.TimestampMixin): +class DCManagerBase(models.ModelBase, models.SoftDeleteMixin, models.TimestampMixin): """Base class for DC Manager Models.""" # __table_args__ = {'mysql_engine': 'InnoDB'} @@ -98,7 +96,7 @@ class DCManagerBase(models.ModelBase, class SystemPeer(BASE, DCManagerBase): """Represents a system peer""" - __tablename__ = 'system_peer' + __tablename__ = "system_peer" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) peer_uuid = Column(String(36), unique=True) @@ -118,7 +116,7 @@ class SystemPeer(BASE, DCManagerBase): class SubcloudGroup(BASE, DCManagerBase): """Represents a subcloud group""" - __tablename__ = 'subcloud_group' + __tablename__ = "subcloud_group" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) name = Column(String(255), unique=True) @@ -130,7 +128,7 @@ class SubcloudGroup(BASE, DCManagerBase): class SubcloudPeerGroup(BASE, DCManagerBase): """Represents a subcloud group""" - __tablename__ = 'subcloud_peer_group' + __tablename__ = "subcloud_peer_group" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) peer_group_name = Column(String(255), unique=True) @@ -145,7 +143,7 @@ class SubcloudPeerGroup(BASE, DCManagerBase): class PeerGroupAssociation(BASE, DCManagerBase): """Represents a Peer Group Association""" - __tablename__ = 'peer_group_association' + __tablename__ = "peer_group_association" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) peer_group_id = Column(Integer) @@ -159,7 +157,7 @@ class PeerGroupAssociation(BASE, DCManagerBase): class Subcloud(BASE, DCManagerBase): """Represents a subcloud""" - __tablename__ = 'subclouds' + __tablename__ = "subclouds" id = Column(Integer, primary_key=True, nullable=False) name = Column(String(255), unique=True) @@ -183,43 +181,34 @@ class Subcloud(BASE, DCManagerBase): systemcontroller_gateway_ip = Column(String(255)) audit_fail_count = Column(Integer) first_identity_sync_complete = Column(Boolean, default=False) - peer_group_id = Column(Integer, - ForeignKey('subcloud_peer_group.id')) + peer_group_id = Column(Integer, ForeignKey("subcloud_peer_group.id")) rehome_data = Column(Text()) prestage_status = Column(String(255)) prestage_versions = Column(String(255)) # multiple subclouds can be in a particular group - group_id = Column(Integer, - ForeignKey('subcloud_group.id')) - group = relationship(SubcloudGroup, - backref=backref('subcloud')) + group_id = Column(Integer, ForeignKey("subcloud_group.id")) + group = relationship(SubcloudGroup, backref=backref("subcloud")) rehomed = Column(Boolean, default=False) class SubcloudAudits(BASE, DCManagerBase): """Represents the various audits for a subcloud""" - __tablename__ = 'subcloud_audits' + __tablename__ = "subcloud_audits" id = Column(Integer, primary_key=True, nullable=False) subcloud_id = Column( - Integer, ForeignKey('subclouds.id', ondelete='CASCADE'), unique=True - ) - audit_started_at = Column( - DateTime(timezone=False), default=datetime.datetime.min - ) - audit_finished_at = Column( - DateTime(timezone=False), default=datetime.datetime.min + Integer, ForeignKey("subclouds.id", ondelete="CASCADE"), unique=True ) + audit_started_at = Column(DateTime(timezone=False), default=datetime.datetime.min) + audit_finished_at = Column(DateTime(timezone=False), default=datetime.datetime.min) state_update_requested = Column(Boolean, nullable=False, default=False) patch_audit_requested = Column(Boolean, nullable=False, default=False) load_audit_requested = Column(Boolean, nullable=False, default=False) firmware_audit_requested = Column(Boolean, nullable=False, default=False) kubernetes_audit_requested = Column(Boolean, nullable=False, default=False) - kube_rootca_update_audit_requested = Column( - Boolean, nullable=False, default=False - ) + kube_rootca_update_audit_requested = Column(Boolean, nullable=False, default=False) spare_audit_requested = Column(Boolean, nullable=False, default=False) spare2_audit_requested = Column(Boolean, nullable=False, default=False) reserved = Column(Text) @@ -231,8 +220,7 @@ class SubcloudStatus(BASE, DCManagerBase): __tablename__ = "subcloud_status" id = Column(Integer, primary_key=True, nullable=False) - subcloud_id = Column(Integer, - ForeignKey('subclouds.id', ondelete='CASCADE')) + subcloud_id = Column(Integer, ForeignKey("subclouds.id", ondelete="CASCADE")) endpoint_type = Column(String(255)) sync_status = Column(String(255)) @@ -257,8 +245,7 @@ class SwUpdateOpts(BASE, DCManagerBase): __tablename__ = "sw_update_opts" id = Column(Integer, primary_key=True, nullable=False) - subcloud_id = Column(Integer, - ForeignKey('subclouds.id', ondelete='CASCADE')) + subcloud_id = Column(Integer, ForeignKey("subclouds.id", ondelete="CASCADE")) storage_apply_type = Column(String(255)) worker_apply_type = Column(String(255)) @@ -288,26 +275,28 @@ class StrategyStep(BASE, DCManagerBase): __tablename__ = "strategy_steps" id = Column(Integer, primary_key=True, nullable=False) - subcloud_id = Column(Integer, - ForeignKey('subclouds.id', ondelete='CASCADE'), - unique=True) + subcloud_id = Column( + Integer, ForeignKey("subclouds.id", ondelete="CASCADE"), unique=True + ) stage = Column(Integer) state = Column(String(255)) details = Column(String(255)) started_at = Column(DateTime) finished_at = Column(DateTime) - subcloud = relationship('Subcloud', backref=backref("strategy_steps", - cascade="all,delete")) + subcloud = relationship( + "Subcloud", backref=backref("strategy_steps", cascade="all,delete") + ) class SubcloudAlarmSummary(BASE, DCManagerBase): """Represents a Distributed Cloud subcloud alarm aggregate""" - __tablename__ = 'subcloud_alarms' + + __tablename__ = "subcloud_alarms" id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) uuid = Column(String(36), unique=True) - name = Column('name', String(255), unique=True) - critical_alarms = Column('critical_alarms', Integer) - major_alarms = Column('major_alarms', Integer) - minor_alarms = Column('minor_alarms', Integer) - warnings = Column('warnings', Integer) - cloud_status = Column('cloud_status', String(64)) + name = Column("name", String(255), unique=True) + critical_alarms = Column("critical_alarms", Integer) + major_alarms = Column("major_alarms", Integer) + minor_alarms = Column("minor_alarms", Integer) + warnings = Column("warnings", Integer) + cloud_status = Column("cloud_status", String(64)) diff --git a/distributedcloud/dcmanager/db/utils.py b/distributedcloud/dcmanager/db/utils.py index e2223033c..2f42aecd2 100644 --- a/distributedcloud/dcmanager/db/utils.py +++ b/distributedcloud/dcmanager/db/utils.py @@ -1,5 +1,5 @@ # Copyright (c) 2015 Ericsson AB. -# Copyright (c) 2017, 2019, 2021, 2022 Wind River Systems, Inc. +# Copyright (c) 2017, 2019, 2021, 2022, 2024 Wind River Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -47,8 +47,8 @@ class LazyPluggable(object): return getattr(backend, key) -IMPL = LazyPluggable('backend', sqlalchemy='dcmanager.db.sqlalchemy.api') +IMPL = LazyPluggable("backend", sqlalchemy="dcmanager.db.sqlalchemy.api") -def purge_deleted(age, granularity='days'): +def purge_deleted(age, granularity="days"): IMPL.purge_deleted(age, granularity) diff --git a/distributedcloud/run_black.py b/distributedcloud/run_black.py index 1ece8c03b..b428ae340 100644 --- a/distributedcloud/run_black.py +++ b/distributedcloud/run_black.py @@ -27,6 +27,7 @@ formatted_modules = [ "dcmanager/api", "dcmanager/audit", "dcmanager/common", + "dcmanager/db", ]