Apply black formatter to dccommon
This commit applies the Black format to the `dccommon` files to ensure that it adheres to the Black code style guidelines. Test Plan: PASS: Success in stx-distcloud-tox-black Story: 2011149 Task: 50411 Change-Id: I98171db7dcedbda57fd78059d45aec93e5c9cabd Signed-off-by: Hugo Brito <hugo.brito@windriver.com>
This commit is contained in:
parent
91a2305160
commit
69970aac60
@ -1,5 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -16,4 +15,4 @@
|
||||
import pbr.version
|
||||
|
||||
|
||||
__version__ = pbr.version.VersionInfo('distributedcloud').version_string()
|
||||
__version__ = pbr.version.VersionInfo("distributedcloud").version_string()
|
||||
|
@ -33,12 +33,12 @@ CLOUD_0 = "RegionOne"
|
||||
VIRTUAL_MASTER_CLOUD = "SystemController"
|
||||
|
||||
SW_UPDATE_DEFAULT_TITLE = "all clouds default"
|
||||
ANSIBLE_OVERRIDES_PATH = '/opt/dc-vault/ansible'
|
||||
LOAD_VAULT_DIR = '/opt/dc-vault/loads'
|
||||
DEPLOY_DIR = '/opt/platform/deploy'
|
||||
ANSIBLE_OVERRIDES_PATH = "/opt/dc-vault/ansible"
|
||||
LOAD_VAULT_DIR = "/opt/dc-vault/loads"
|
||||
DEPLOY_DIR = "/opt/platform/deploy"
|
||||
|
||||
USER_HEADER_VALUE = "distcloud"
|
||||
USER_HEADER = {'User-Header': USER_HEADER_VALUE}
|
||||
USER_HEADER = {"User-Header": USER_HEADER_VALUE}
|
||||
|
||||
ADMIN_USER_NAME = "admin"
|
||||
ADMIN_PROJECT_NAME = "admin"
|
||||
@ -46,42 +46,47 @@ SYSINV_USER_NAME = "sysinv"
|
||||
DCMANAGER_USER_NAME = "dcmanager"
|
||||
SERVICES_USER_NAME = "services"
|
||||
|
||||
NOVA_QUOTA_FIELDS = ("metadata_items",
|
||||
"cores",
|
||||
"instances",
|
||||
"ram",
|
||||
"key_pairs",
|
||||
"injected_files",
|
||||
"injected_file_path_bytes",
|
||||
"injected_file_content_bytes",
|
||||
"server_group_members",
|
||||
"server_groups",)
|
||||
NOVA_QUOTA_FIELDS = (
|
||||
"metadata_items",
|
||||
"cores",
|
||||
"instances",
|
||||
"ram",
|
||||
"key_pairs",
|
||||
"injected_files",
|
||||
"injected_file_path_bytes",
|
||||
"injected_file_content_bytes",
|
||||
"server_group_members",
|
||||
"server_groups",
|
||||
)
|
||||
|
||||
CINDER_QUOTA_FIELDS = ("volumes",
|
||||
"volumes_iscsi",
|
||||
"volumes_ceph",
|
||||
"per_volume_gigabytes",
|
||||
"groups",
|
||||
"snapshots",
|
||||
"snapshots_iscsi",
|
||||
"snapshots_ceph",
|
||||
"gigabytes",
|
||||
"gigabytes_iscsi",
|
||||
"gigabytes_ceph",
|
||||
"backups",
|
||||
"backup_gigabytes")
|
||||
CINDER_QUOTA_FIELDS = (
|
||||
"volumes",
|
||||
"volumes_iscsi",
|
||||
"volumes_ceph",
|
||||
"per_volume_gigabytes",
|
||||
"groups",
|
||||
"snapshots",
|
||||
"snapshots_iscsi",
|
||||
"snapshots_ceph",
|
||||
"gigabytes",
|
||||
"gigabytes_iscsi",
|
||||
"gigabytes_ceph",
|
||||
"backups",
|
||||
"backup_gigabytes",
|
||||
)
|
||||
|
||||
NEUTRON_QUOTA_FIELDS = ("network",
|
||||
"subnet",
|
||||
"subnetpool",
|
||||
"rbac_policy",
|
||||
"trunk",
|
||||
"port",
|
||||
"router",
|
||||
"floatingip",
|
||||
"security_group",
|
||||
"security_group_rule",
|
||||
)
|
||||
NEUTRON_QUOTA_FIELDS = (
|
||||
"network",
|
||||
"subnet",
|
||||
"subnetpool",
|
||||
"rbac_policy",
|
||||
"trunk",
|
||||
"port",
|
||||
"router",
|
||||
"floatingip",
|
||||
"security_group",
|
||||
"security_group_rule",
|
||||
)
|
||||
|
||||
ENDPOINT_TYPE_PLATFORM = "platform"
|
||||
ENDPOINT_TYPE_PATCHING = "patching"
|
||||
@ -90,33 +95,35 @@ ENDPOINT_TYPE_FM = "faultmanagement"
|
||||
ENDPOINT_TYPE_NFV = "nfv"
|
||||
ENDPOINT_TYPE_SOFTWARE = "usm"
|
||||
ENDPOINT_TYPE_LOAD = "load"
|
||||
ENDPOINT_TYPE_DC_CERT = 'dc-cert'
|
||||
ENDPOINT_TYPE_FIRMWARE = 'firmware'
|
||||
ENDPOINT_TYPE_KUBERNETES = 'kubernetes'
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA = 'kube-rootca'
|
||||
ENDPOINT_TYPE_DC_CERT = "dc-cert"
|
||||
ENDPOINT_TYPE_FIRMWARE = "firmware"
|
||||
ENDPOINT_TYPE_KUBERNETES = "kubernetes"
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA = "kube-rootca"
|
||||
|
||||
# All endpoint types
|
||||
ENDPOINT_TYPES_LIST = [ENDPOINT_TYPE_PLATFORM,
|
||||
ENDPOINT_TYPE_PATCHING,
|
||||
ENDPOINT_TYPE_IDENTITY,
|
||||
ENDPOINT_TYPE_LOAD,
|
||||
ENDPOINT_TYPE_DC_CERT,
|
||||
ENDPOINT_TYPE_FIRMWARE,
|
||||
ENDPOINT_TYPE_KUBERNETES,
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA,
|
||||
ENDPOINT_TYPE_SOFTWARE]
|
||||
ENDPOINT_TYPES_LIST = [
|
||||
ENDPOINT_TYPE_PLATFORM,
|
||||
ENDPOINT_TYPE_PATCHING,
|
||||
ENDPOINT_TYPE_IDENTITY,
|
||||
ENDPOINT_TYPE_LOAD,
|
||||
ENDPOINT_TYPE_DC_CERT,
|
||||
ENDPOINT_TYPE_FIRMWARE,
|
||||
ENDPOINT_TYPE_KUBERNETES,
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA,
|
||||
ENDPOINT_TYPE_SOFTWARE,
|
||||
]
|
||||
|
||||
# All endpoint audit requests
|
||||
# TODO(nicodemos): The ENDPOINT_TYPE_SOFTWARE will use the 'spare_audit_requested'
|
||||
# temporarily until the USM feature is fully complete. Afterward, the software audit
|
||||
# will replace the patch audit.
|
||||
ENDPOINT_AUDIT_REQUESTS = {
|
||||
ENDPOINT_TYPE_FIRMWARE: 'firmware_audit_requested',
|
||||
ENDPOINT_TYPE_KUBERNETES: 'kubernetes_audit_requested',
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA: 'kube_rootca_update_audit_requested',
|
||||
ENDPOINT_TYPE_LOAD: 'load_audit_requested',
|
||||
ENDPOINT_TYPE_PATCHING: 'patch_audit_requested',
|
||||
ENDPOINT_TYPE_SOFTWARE: 'spare_audit_requested',
|
||||
ENDPOINT_TYPE_FIRMWARE: "firmware_audit_requested",
|
||||
ENDPOINT_TYPE_KUBERNETES: "kubernetes_audit_requested",
|
||||
ENDPOINT_TYPE_KUBE_ROOTCA: "kube_rootca_update_audit_requested",
|
||||
ENDPOINT_TYPE_LOAD: "load_audit_requested",
|
||||
ENDPOINT_TYPE_PATCHING: "patch_audit_requested",
|
||||
ENDPOINT_TYPE_SOFTWARE: "spare_audit_requested",
|
||||
}
|
||||
|
||||
# Well known region names
|
||||
@ -137,14 +144,16 @@ SYNC_STATUS_IN_SYNC = "in-sync"
|
||||
SYNC_STATUS_OUT_OF_SYNC = "out-of-sync"
|
||||
|
||||
# Subcloud deploy configuration status
|
||||
DEPLOY_CONFIG_UP_TO_DATE = 'Deployment: configurations up-to-date'
|
||||
DEPLOY_CONFIG_OUT_OF_DATE = 'Deployment: configurations out-of-date'
|
||||
MONITORED_ALARM_ENTITIES = ['host.starlingx.windriver.com', ]
|
||||
DEPLOY_CONFIG_UP_TO_DATE = "Deployment: configurations up-to-date"
|
||||
DEPLOY_CONFIG_OUT_OF_DATE = "Deployment: configurations out-of-date"
|
||||
MONITORED_ALARM_ENTITIES = [
|
||||
"host.starlingx.windriver.com",
|
||||
]
|
||||
|
||||
# OS type
|
||||
OS_RELEASE_FILE = '/etc/os-release'
|
||||
OS_CENTOS = 'centos'
|
||||
OS_DEBIAN = 'debian'
|
||||
OS_RELEASE_FILE = "/etc/os-release"
|
||||
OS_CENTOS = "centos"
|
||||
OS_DEBIAN = "debian"
|
||||
SUPPORTED_OS_TYPES = [OS_CENTOS, OS_DEBIAN]
|
||||
|
||||
# SSL cert
|
||||
@ -153,69 +162,71 @@ CERT_CA_FILE_DEBIAN = "ca-cert.crt"
|
||||
SSL_CERT_CA_DIR = "/etc/pki/ca-trust/source/anchors/"
|
||||
|
||||
# RVMC
|
||||
RVMC_NAME_PREFIX = 'rvmc'
|
||||
RVMC_CONFIG_FILE_NAME = 'rvmc-config.yaml'
|
||||
RVMC_NAME_PREFIX = "rvmc"
|
||||
RVMC_CONFIG_FILE_NAME = "rvmc-config.yaml"
|
||||
|
||||
# Required for GEO-redundancy
|
||||
# User-Agent check for subcloud by region_name request.
|
||||
DCMANAGER_V1_HTTP_AGENT = 'dcmanager/1.0'
|
||||
DCMANAGER_V1_HTTP_AGENT = "dcmanager/1.0"
|
||||
|
||||
# Subcloud installation values
|
||||
BMC_INSTALL_VALUES = [
|
||||
'bmc_username',
|
||||
'bmc_address',
|
||||
'bmc_password',
|
||||
"bmc_username",
|
||||
"bmc_address",
|
||||
"bmc_password",
|
||||
]
|
||||
|
||||
MANDATORY_INSTALL_VALUES = [
|
||||
'bootstrap_interface',
|
||||
'bootstrap_address',
|
||||
'bootstrap_address_prefix',
|
||||
'install_type',
|
||||
"bootstrap_interface",
|
||||
"bootstrap_address",
|
||||
"bootstrap_address_prefix",
|
||||
"install_type",
|
||||
] + BMC_INSTALL_VALUES
|
||||
|
||||
OPTIONAL_INSTALL_VALUES = [
|
||||
'nexthop_gateway',
|
||||
'network_address',
|
||||
'network_mask',
|
||||
'console_type',
|
||||
'bootstrap_vlan',
|
||||
'rootfs_device',
|
||||
'boot_device',
|
||||
'rd.net.timeout.ipv6dad',
|
||||
'no_check_certificate',
|
||||
'persistent_size',
|
||||
'hw_settle',
|
||||
'extra_boot_params',
|
||||
"nexthop_gateway",
|
||||
"network_address",
|
||||
"network_mask",
|
||||
"console_type",
|
||||
"bootstrap_vlan",
|
||||
"rootfs_device",
|
||||
"boot_device",
|
||||
"rd.net.timeout.ipv6dad",
|
||||
"no_check_certificate",
|
||||
"persistent_size",
|
||||
"hw_settle",
|
||||
"extra_boot_params",
|
||||
]
|
||||
|
||||
GEN_ISO_OPTIONS = {
|
||||
'bootstrap_interface': '--boot-interface',
|
||||
'bootstrap_address': '--boot-ip',
|
||||
'bootstrap_address_prefix': '--boot-netmask',
|
||||
'install_type': '--default-boot',
|
||||
'nexthop_gateway': "--boot-gateway",
|
||||
'rootfs_device': '--param',
|
||||
'boot_device': '--param',
|
||||
'rd.net.timeout.ipv6dad': '--param',
|
||||
'bootstrap_vlan': '--param',
|
||||
'no_check_certificate': '--param',
|
||||
'persistent_size': '--param',
|
||||
'hw_settle': '--param',
|
||||
'extra_boot_params': '--param',
|
||||
"bootstrap_interface": "--boot-interface",
|
||||
"bootstrap_address": "--boot-ip",
|
||||
"bootstrap_address_prefix": "--boot-netmask",
|
||||
"install_type": "--default-boot",
|
||||
"nexthop_gateway": "--boot-gateway",
|
||||
"rootfs_device": "--param",
|
||||
"boot_device": "--param",
|
||||
"rd.net.timeout.ipv6dad": "--param",
|
||||
"bootstrap_vlan": "--param",
|
||||
"no_check_certificate": "--param",
|
||||
"persistent_size": "--param",
|
||||
"hw_settle": "--param",
|
||||
"extra_boot_params": "--param",
|
||||
}
|
||||
|
||||
SUPPORTED_INSTALL_TYPES = 6
|
||||
ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK = \
|
||||
'/usr/share/ansible/stx-ansible/playbooks/install.yml'
|
||||
ANSIBLE_SUBCLOUD_INSTALL_PLAYBOOK = (
|
||||
"/usr/share/ansible/stx-ansible/playbooks/install.yml"
|
||||
)
|
||||
|
||||
ENROLL_INIT_SEED_ISO_NAME = 'seed.iso'
|
||||
ENROLL_INIT_SEED_ISO_NAME = "seed.iso"
|
||||
|
||||
ANSIBLE_SUBCLOUD_ENROLL_PLAYBOOK = \
|
||||
ANSIBLE_SUBCLOUD_ENROLL_PLAYBOOK = (
|
||||
"/usr/share/ansible/stx-ansible/playbooks/enroll_subcloud.yml"
|
||||
)
|
||||
|
||||
# Sysinv client default timeout
|
||||
SYSINV_CLIENT_REST_DEFAULT_TIMEOUT = 600
|
||||
|
||||
SUBCLOUD_ISO_PATH = '/opt/platform/iso'
|
||||
SUBCLOUD_FEED_PATH = '/var/www/pages/feed'
|
||||
SUBCLOUD_ISO_PATH = "/opt/platform/iso"
|
||||
SUBCLOUD_FEED_PATH = "/var/www/pages/feed"
|
||||
|
@ -24,7 +24,7 @@ from dccommon import exceptions
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
API_VERSION = 'v1'
|
||||
API_VERSION = "v1"
|
||||
|
||||
|
||||
class BarbicanClient(base.DriverBase):
|
||||
@ -35,14 +35,15 @@ class BarbicanClient(base.DriverBase):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, region, session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT):
|
||||
self, region, session, endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT
|
||||
):
|
||||
try:
|
||||
self.barbican_client = client.Client(
|
||||
API_VERSION,
|
||||
session=session,
|
||||
region_name=region,
|
||||
interface=endpoint_type)
|
||||
interface=endpoint_type,
|
||||
)
|
||||
|
||||
self.region_name = region
|
||||
except exceptions.ServiceUnavailable:
|
||||
|
@ -20,15 +20,18 @@ DCMANAGER_CLIENT_REST_DEFAULT_TIMEOUT = 600
|
||||
class DcmanagerClient(base.DriverBase):
|
||||
"""Dcmanager V1 driver."""
|
||||
|
||||
def __init__(self, region, session,
|
||||
timeout=DCMANAGER_CLIENT_REST_DEFAULT_TIMEOUT,
|
||||
endpoint_type=consts.KS_ENDPOINT_PUBLIC,
|
||||
endpoint=None):
|
||||
def __init__(
|
||||
self,
|
||||
region,
|
||||
session,
|
||||
timeout=DCMANAGER_CLIENT_REST_DEFAULT_TIMEOUT,
|
||||
endpoint_type=consts.KS_ENDPOINT_PUBLIC,
|
||||
endpoint=None,
|
||||
):
|
||||
if endpoint is None:
|
||||
endpoint = session.get_endpoint(
|
||||
service_type='dcmanager',
|
||||
region_name=region,
|
||||
interface=endpoint_type)
|
||||
service_type="dcmanager", region_name=region, interface=endpoint_type
|
||||
)
|
||||
self.endpoint = endpoint
|
||||
self.token = session.get_token()
|
||||
self.timeout = timeout
|
||||
@ -45,12 +48,12 @@ class DcmanagerClient(base.DriverBase):
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'System Peer not found' in response.text:
|
||||
raise exceptions.SystemPeerNotFound(
|
||||
system_peer=system_peer_uuid)
|
||||
message = "Get SystemPeer: system_peer_uuid %s failed with RC: %d" \
|
||||
% (system_peer_uuid, response.status_code)
|
||||
if response.status_code == 404 and "System Peer not found" in response.text:
|
||||
raise exceptions.SystemPeerNotFound(system_peer=system_peer_uuid)
|
||||
message = "Get SystemPeer: system_peer_uuid %s failed with RC: %d" % (
|
||||
system_peer_uuid,
|
||||
response.status_code,
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -68,11 +71,12 @@ class DcmanagerClient(base.DriverBase):
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud not found' in response.text:
|
||||
if response.status_code == 404 and "Subcloud not found" in response.text:
|
||||
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
|
||||
message = "Get Subcloud: subcloud_ref %s failed with RC: %d" % \
|
||||
(subcloud_ref, response.status_code)
|
||||
message = "Get Subcloud: subcloud_ref %s failed with RC: %d" % (
|
||||
subcloud_ref,
|
||||
response.status_code,
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -85,10 +89,9 @@ class DcmanagerClient(base.DriverBase):
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('subclouds', [])
|
||||
return data.get("subclouds", [])
|
||||
else:
|
||||
message = "Get Subcloud list failed with RC: %d" % \
|
||||
response.status_code
|
||||
message = "Get Subcloud list failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -101,10 +104,11 @@ class DcmanagerClient(base.DriverBase):
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('subcloud_groups', [])
|
||||
return data.get("subcloud_groups", [])
|
||||
else:
|
||||
message = "Get Subcloud Group list: failed with RC: %d" % \
|
||||
response.status_code
|
||||
message = (
|
||||
"Get Subcloud Group list: failed with RC: %d" % response.status_code
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -117,10 +121,12 @@ class DcmanagerClient(base.DriverBase):
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('subcloud_peer_groups', [])
|
||||
return data.get("subcloud_peer_groups", [])
|
||||
else:
|
||||
message = "Get Subcloud Peer Group list: failed with RC: %d" % \
|
||||
response.status_code
|
||||
message = (
|
||||
"Get Subcloud Peer Group list: failed with RC: %d"
|
||||
% response.status_code
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -136,12 +142,17 @@ class DcmanagerClient(base.DriverBase):
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud Peer Group not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Subcloud Peer Group not found" in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupNotFound(
|
||||
peer_group_ref=peer_group_ref)
|
||||
message = "Get Subcloud Peer Group: peer_group_ref %s " \
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
message = (
|
||||
"Get Subcloud Peer Group: peer_group_ref %s "
|
||||
"failed with RC: %d" % (peer_group_ref, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -149,34 +160,37 @@ class DcmanagerClient(base.DriverBase):
|
||||
"""Get subclouds in the specified subcloud peer group."""
|
||||
if peer_group_ref is None:
|
||||
raise ValueError("peer_group_ref is required.")
|
||||
url = f"{self.endpoint}/subcloud-peer-groups/{peer_group_ref}/" \
|
||||
"subclouds"
|
||||
url = f"{self.endpoint}/subcloud-peer-groups/{peer_group_ref}/subclouds"
|
||||
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.get(url, headers=headers, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('subclouds', [])
|
||||
return data.get("subclouds", [])
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud Peer Group not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Subcloud Peer Group not found" in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupNotFound(
|
||||
peer_group_ref=peer_group_ref)
|
||||
message = "Get Subcloud list by Peer Group: peer_group_ref %s " \
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
message = (
|
||||
"Get Subcloud list by Peer Group: peer_group_ref %s "
|
||||
"failed with RC: %d" % (peer_group_ref, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def get_peer_group_association_with_peer_id_and_pg_id(self, peer_id,
|
||||
pg_id):
|
||||
def get_peer_group_association_with_peer_id_and_pg_id(self, peer_id, pg_id):
|
||||
"""Get peer group association with peer id and PG id."""
|
||||
for association in self.get_peer_group_association_list():
|
||||
if peer_id == association.get('system-peer-id') and \
|
||||
pg_id == association.get('peer-group-id'):
|
||||
if peer_id == association.get(
|
||||
"system-peer-id"
|
||||
) and pg_id == association.get("peer-group-id"):
|
||||
return association
|
||||
raise exceptions.PeerGroupAssociationNotFound(
|
||||
association_id=None)
|
||||
raise exceptions.PeerGroupAssociationNotFound(association_id=None)
|
||||
|
||||
def get_peer_group_association_list(self):
|
||||
"""Get peer group association list."""
|
||||
@ -187,10 +201,12 @@ class DcmanagerClient(base.DriverBase):
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
return data.get('peer_group_associations', [])
|
||||
return data.get("peer_group_associations", [])
|
||||
else:
|
||||
message = "Get Peer Group Association list failed with RC: %d" % \
|
||||
response.status_code
|
||||
message = (
|
||||
"Get Peer Group Association list failed with RC: %d"
|
||||
% response.status_code
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -198,16 +214,18 @@ class DcmanagerClient(base.DriverBase):
|
||||
"""Add a subcloud peer group."""
|
||||
url = f"{self.endpoint}/subcloud-peer-groups"
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json"}
|
||||
response = requests.post(url, json=kwargs, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": "application/json"}
|
||||
response = requests.post(
|
||||
url, json=kwargs, headers=headers, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
message = "Add Subcloud Peer Group: %s, failed with RC: %d" % \
|
||||
(kwargs, response.status_code)
|
||||
message = "Add Subcloud Peer Group: %s, failed with RC: %d" % (
|
||||
kwargs,
|
||||
response.status_code,
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -218,30 +236,39 @@ class DcmanagerClient(base.DriverBase):
|
||||
# If not explicitly specified, set 'secondary' to true by default.
|
||||
# This action adds a secondary subcloud with rehoming data in the
|
||||
# peer site without creating an actual subcloud.
|
||||
if 'secondary' in data and data['secondary'] != "true":
|
||||
if "secondary" in data and data["secondary"] != "true":
|
||||
raise ValueError("secondary in data must true.")
|
||||
data['secondary'] = "true"
|
||||
data["secondary"] = "true"
|
||||
|
||||
fields = dict()
|
||||
if files is not None:
|
||||
# If files are specified, add them to the fields.
|
||||
for k, v in files.items():
|
||||
fields.update({k: (v, open(v, 'rb'),)})
|
||||
fields.update(
|
||||
{
|
||||
k: (
|
||||
v,
|
||||
open(v, "rb"),
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
fields.update(data)
|
||||
enc = MultipartEncoder(fields=fields)
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": enc.content_type,
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT}
|
||||
response = requests.post(url, headers=headers, data=enc,
|
||||
timeout=self.timeout)
|
||||
headers = {
|
||||
"X-Auth-Token": self.token,
|
||||
"Content-Type": enc.content_type,
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT,
|
||||
}
|
||||
response = requests.post(url, headers=headers, data=enc, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
message = "Add Subcloud with secondary status: files: %s, " \
|
||||
"data: %s, failed with RC: %d" % (files, data,
|
||||
response.status_code)
|
||||
message = (
|
||||
"Add Subcloud with secondary status: files: %s, "
|
||||
"data: %s, failed with RC: %d" % (files, data, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -249,42 +276,48 @@ class DcmanagerClient(base.DriverBase):
|
||||
"""Add a peer group association."""
|
||||
url = f"{self.endpoint}/peer-group-associations"
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json"}
|
||||
response = requests.post(url, json=kwargs, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": "application/json"}
|
||||
response = requests.post(
|
||||
url, json=kwargs, headers=headers, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
message = "Add Peer Group Association: %s, failed with RC: %d" % \
|
||||
(kwargs, response.status_code)
|
||||
message = "Add Peer Group Association: %s, failed with RC: %d" % (
|
||||
kwargs,
|
||||
response.status_code,
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
def update_peer_group_association_sync_status(self, association_id,
|
||||
sync_status):
|
||||
def update_peer_group_association_sync_status(self, association_id, sync_status):
|
||||
"""Update the peer group association sync_status."""
|
||||
if association_id is None:
|
||||
raise ValueError("association_id is required.")
|
||||
url = f"{self.endpoint}/peer-group-associations/{association_id}"
|
||||
update_kwargs = {"sync_status": sync_status}
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json"}
|
||||
response = requests.patch(url, json=update_kwargs, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": "application/json"}
|
||||
response = requests.patch(
|
||||
url, json=update_kwargs, headers=headers, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Peer Group Association not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Peer Group Association not found" in response.text
|
||||
):
|
||||
raise exceptions.PeerGroupAssociationNotFound(
|
||||
association_id=association_id)
|
||||
message = "Update Peer Group Association: association_id %s, " \
|
||||
"sync_status %s, failed with RC: %d" % (
|
||||
association_id, sync_status, response.status_code)
|
||||
association_id=association_id
|
||||
)
|
||||
message = (
|
||||
"Update Peer Group Association: association_id %s, "
|
||||
"sync_status %s, failed with RC: %d"
|
||||
% (association_id, sync_status, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -294,22 +327,29 @@ class DcmanagerClient(base.DriverBase):
|
||||
raise ValueError("peer_group_ref is required.")
|
||||
url = f"{self.endpoint}/subcloud-peer-groups/{peer_group_ref}"
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT}
|
||||
response = requests.patch(url, json=kwargs, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {
|
||||
"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT,
|
||||
}
|
||||
response = requests.patch(
|
||||
url, json=kwargs, headers=headers, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud Peer Group not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Subcloud Peer Group not found" in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupNotFound(
|
||||
peer_group_ref=peer_group_ref)
|
||||
message = "Update Subcloud Peer Group: peer_group_ref %s, %s, " \
|
||||
"failed with RC: %d" % (peer_group_ref, kwargs,
|
||||
response.status_code)
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
message = (
|
||||
"Update Subcloud Peer Group: peer_group_ref %s, %s, "
|
||||
"failed with RC: %d" % (peer_group_ref, kwargs, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -319,21 +359,25 @@ class DcmanagerClient(base.DriverBase):
|
||||
raise ValueError("peer_group_ref is required.")
|
||||
url = f"{self.endpoint}/subcloud-peer-groups/{peer_group_ref}/audit"
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": "application/json"}
|
||||
response = requests.patch(url, json=kwargs, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": "application/json"}
|
||||
response = requests.patch(
|
||||
url, json=kwargs, headers=headers, timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud Peer Group not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Subcloud Peer Group not found" in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupNotFound(
|
||||
peer_group_ref=peer_group_ref)
|
||||
message = "Audit Subcloud Peer Group: peer_group_ref %s, %s, " \
|
||||
"failed with RC: %d" % (peer_group_ref, kwargs,
|
||||
response.status_code)
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
message = (
|
||||
"Audit Subcloud Peer Group: peer_group_ref %s, %s, "
|
||||
"failed with RC: %d" % (peer_group_ref, kwargs, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -347,28 +391,34 @@ class DcmanagerClient(base.DriverBase):
|
||||
if files is not None:
|
||||
# If files are specified, add them to the fields.
|
||||
for k, v in files.items():
|
||||
fields.update({k: (v, open(v, 'rb'),)})
|
||||
fields.update(
|
||||
{
|
||||
k: (
|
||||
v,
|
||||
open(v, "rb"),
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
fields.update(data)
|
||||
enc = MultipartEncoder(fields=fields)
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"Content-Type": enc.content_type}
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": enc.content_type}
|
||||
# Add header to flag the request is from another DC,
|
||||
# server will treat subcloud_ref as a region_name
|
||||
if is_region_name:
|
||||
headers["User-Agent"] = consts.DCMANAGER_V1_HTTP_AGENT
|
||||
response = requests.patch(url, headers=headers, data=enc,
|
||||
timeout=self.timeout)
|
||||
response = requests.patch(url, headers=headers, data=enc, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud not found' in response.text:
|
||||
if response.status_code == 404 and "Subcloud not found" in response.text:
|
||||
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
|
||||
message = "Update Subcloud: subcloud_ref: %s files: %s, " \
|
||||
"data: %s, failed with RC: %d" % (subcloud_ref, files, data,
|
||||
response.status_code)
|
||||
message = (
|
||||
"Update Subcloud: subcloud_ref: %s files: %s, "
|
||||
"data: %s, failed with RC: %d"
|
||||
% (subcloud_ref, files, data, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -379,18 +429,22 @@ class DcmanagerClient(base.DriverBase):
|
||||
url = f"{self.endpoint}/peer-group-associations/{association_id}"
|
||||
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.delete(url, headers=headers,
|
||||
timeout=self.timeout)
|
||||
response = requests.delete(url, headers=headers, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Peer Group Association not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Peer Group Association not found" in response.text
|
||||
):
|
||||
raise exceptions.PeerGroupAssociationNotFound(
|
||||
association_id=association_id)
|
||||
message = "Delete Peer Group Association: association_id %s " \
|
||||
association_id=association_id
|
||||
)
|
||||
message = (
|
||||
"Delete Peer Group Association: association_id %s "
|
||||
"failed with RC: %d" % (association_id, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -401,24 +455,30 @@ class DcmanagerClient(base.DriverBase):
|
||||
url = f"{self.endpoint}/subcloud-peer-groups/{peer_group_ref}"
|
||||
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.delete(url, headers=headers,
|
||||
timeout=self.timeout)
|
||||
response = requests.delete(url, headers=headers, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud Peer Group not found' in response.text:
|
||||
if (
|
||||
response.status_code == 404
|
||||
and "Subcloud Peer Group not found" in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupNotFound(
|
||||
peer_group_ref=peer_group_ref)
|
||||
elif response.status_code == 400 and \
|
||||
'a peer group which is associated with a system peer' in \
|
||||
response.text:
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
elif (
|
||||
response.status_code == 400
|
||||
and "a peer group which is associated with a system peer"
|
||||
in response.text
|
||||
):
|
||||
raise exceptions.SubcloudPeerGroupDeleteFailedAssociated(
|
||||
peer_group_ref=peer_group_ref
|
||||
)
|
||||
message = "Delete Subcloud Peer Group: peer_group_ref %s " \
|
||||
message = (
|
||||
"Delete Subcloud Peer Group: peer_group_ref %s "
|
||||
"failed with RC: %d" % (peer_group_ref, response.status_code)
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
||||
@ -428,18 +488,20 @@ class DcmanagerClient(base.DriverBase):
|
||||
raise ValueError("subcloud_ref is required.")
|
||||
url = f"{self.endpoint}/subclouds/{subcloud_ref}"
|
||||
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT}
|
||||
response = requests.delete(url, headers=headers,
|
||||
timeout=self.timeout)
|
||||
headers = {
|
||||
"X-Auth-Token": self.token,
|
||||
"User-Agent": consts.DCMANAGER_V1_HTTP_AGENT,
|
||||
}
|
||||
response = requests.delete(url, headers=headers, timeout=self.timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
if response.status_code == 404 and \
|
||||
'Subcloud not found' in response.text:
|
||||
if response.status_code == 404 and "Subcloud not found" in response.text:
|
||||
raise exceptions.SubcloudNotFound(subcloud_ref=subcloud_ref)
|
||||
message = "Delete Subcloud: subcloud_ref %s failed with RC: %d" % \
|
||||
(subcloud_ref, response.status_code)
|
||||
message = "Delete Subcloud: subcloud_ref %s failed with RC: %d" % (
|
||||
subcloud_ref,
|
||||
response.status_code,
|
||||
)
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
@ -13,40 +13,41 @@
|
||||
#
|
||||
#
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
import fmclient
|
||||
from oslo_log import log
|
||||
|
||||
from dccommon import consts as dccommon_consts
|
||||
from dccommon.drivers import base
|
||||
from dccommon import exceptions
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
API_VERSION = '1'
|
||||
API_VERSION = "1"
|
||||
|
||||
|
||||
class FmClient(base.DriverBase):
|
||||
"""Fault Management driver."""
|
||||
|
||||
def __init__(
|
||||
self, region, session, endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT,
|
||||
endpoint=None
|
||||
self,
|
||||
region,
|
||||
session,
|
||||
endpoint_type=dccommon_consts.KS_ENDPOINT_DEFAULT,
|
||||
endpoint=None,
|
||||
):
|
||||
self.region_name = region
|
||||
try:
|
||||
self.fm = fmclient.Client(API_VERSION,
|
||||
session=session,
|
||||
region_name=region,
|
||||
endpoint_type=endpoint_type,
|
||||
endpoint=endpoint)
|
||||
self.fm = fmclient.Client(
|
||||
API_VERSION,
|
||||
session=session,
|
||||
region_name=region,
|
||||
endpoint_type=endpoint_type,
|
||||
endpoint=endpoint,
|
||||
)
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
|
||||
def get_alarm_summary(self):
|
||||
"""Get this region alarm summary
|
||||
|
||||
"""
|
||||
"""Get this region alarm summary"""
|
||||
try:
|
||||
LOG.debug("get_alarm_summary region %s" % self.region_name)
|
||||
alarms = self.fm.alarm.summary()
|
||||
@ -58,11 +59,11 @@ class FmClient(base.DriverBase):
|
||||
def get_alarms_by_id(self, alarm_id):
|
||||
"""Get list of this region alarms for a particular alarm_id"""
|
||||
try:
|
||||
LOG.debug("get_alarms_by_id %s, region %s" % (alarm_id,
|
||||
self.region_name))
|
||||
LOG.debug("get_alarms_by_id %s, region %s" % (alarm_id, self.region_name))
|
||||
alarms = self.fm.alarm.list(
|
||||
q=fmclient.common.options.cli_to_array('alarm_id=' + alarm_id),
|
||||
include_suppress=True)
|
||||
q=fmclient.common.options.cli_to_array("alarm_id=" + alarm_id),
|
||||
include_suppress=True,
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("get_alarms_by_id exception={}".format(e))
|
||||
raise e
|
||||
@ -71,16 +72,18 @@ class FmClient(base.DriverBase):
|
||||
def get_alarms_by_ids(self, alarm_id_list):
|
||||
"""Get list of this region alarms for a list of alarm_ids"""
|
||||
try:
|
||||
LOG.debug("get_alarms_by_ids %s, region %s" % (alarm_id_list,
|
||||
self.region_name))
|
||||
LOG.debug(
|
||||
"get_alarms_by_ids %s, region %s" % (alarm_id_list, self.region_name)
|
||||
)
|
||||
# fm api does not support querying two alarm IDs at once so make
|
||||
# multiple calls and join the list
|
||||
alarms = []
|
||||
for alarm_id in alarm_id_list:
|
||||
alarms.extend(self.fm.alarm.list(
|
||||
q=fmclient.common.options.cli_to_array(
|
||||
'alarm_id=' + alarm_id),
|
||||
include_suppress=True)
|
||||
alarms.extend(
|
||||
self.fm.alarm.list(
|
||||
q=fmclient.common.options.cli_to_array("alarm_id=" + alarm_id),
|
||||
include_suppress=True,
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("get_alarms_by_ids exception={}".format(e))
|
||||
|
@ -24,7 +24,7 @@ from dccommon.endpoint_cache import OptimizedEndpointCache
|
||||
from dccommon import exceptions
|
||||
|
||||
# Ensure keystonemiddleware options are imported
|
||||
importutils.import_module('keystonemiddleware.auth_token')
|
||||
importutils.import_module("keystonemiddleware.auth_token")
|
||||
|
||||
|
||||
class KeystoneClient(base.DriverBase):
|
||||
@ -47,11 +47,17 @@ class KeystoneClient(base.DriverBase):
|
||||
try:
|
||||
project_list = self.keystone_client.projects.list()
|
||||
if id_only:
|
||||
return [current_project.id for current_project in
|
||||
project_list if current_project.enabled]
|
||||
return [
|
||||
current_project.id
|
||||
for current_project in project_list
|
||||
if current_project.enabled
|
||||
]
|
||||
else:
|
||||
return [current_project for current_project in
|
||||
project_list if current_project.enabled]
|
||||
return [
|
||||
current_project
|
||||
for current_project in project_list
|
||||
if current_project.enabled
|
||||
]
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
||||
@ -78,11 +84,15 @@ class KeystoneClient(base.DriverBase):
|
||||
try:
|
||||
user_list = self.keystone_client.users.list()
|
||||
if id_only:
|
||||
return [current_user.id for current_user in
|
||||
user_list if current_user.enabled]
|
||||
return [
|
||||
current_user.id
|
||||
for current_user in user_list
|
||||
if current_user.enabled
|
||||
]
|
||||
else:
|
||||
return [current_user for current_user in
|
||||
user_list if current_user.enabled]
|
||||
return [
|
||||
current_user for current_user in user_list if current_user.enabled
|
||||
]
|
||||
except exceptions.InternalError:
|
||||
raise
|
||||
|
||||
@ -119,9 +129,9 @@ class KeystoneClient(base.DriverBase):
|
||||
try:
|
||||
region_list = []
|
||||
endpoint_manager = endpoint_filter.EndpointFilterManager(
|
||||
self.keystone_client)
|
||||
endpoint_lists = endpoint_manager.list_endpoints_for_project(
|
||||
project_id)
|
||||
self.keystone_client
|
||||
)
|
||||
endpoint_lists = endpoint_manager.list_endpoints_for_project(project_id)
|
||||
for endpoint in endpoint_lists:
|
||||
region_list.append(endpoint.region)
|
||||
return region_list
|
||||
@ -199,9 +209,7 @@ class OptimizedKeystoneClient(base.DriverBase):
|
||||
current_user.id for current_user in user_list if current_user.enabled
|
||||
]
|
||||
else:
|
||||
return [
|
||||
current_user for current_user in user_list if current_user.enabled
|
||||
]
|
||||
return [current_user for current_user in user_list if current_user.enabled]
|
||||
|
||||
def get_user_by_id(self, userid):
|
||||
if not userid:
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
# Copyright (c) 2017-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -23,12 +23,12 @@ from dccommon.drivers import base
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# Patch states
|
||||
PATCH_STATE_AVAILABLE = 'Available'
|
||||
PATCH_STATE_APPLIED = 'Applied'
|
||||
PATCH_STATE_PARTIAL_APPLY = 'Partial-Apply'
|
||||
PATCH_STATE_PARTIAL_REMOVE = 'Partial-Remove'
|
||||
PATCH_STATE_COMMITTED = 'Committed'
|
||||
PATCH_STATE_UNKNOWN = 'n/a'
|
||||
PATCH_STATE_AVAILABLE = "Available"
|
||||
PATCH_STATE_APPLIED = "Applied"
|
||||
PATCH_STATE_PARTIAL_APPLY = "Partial-Apply"
|
||||
PATCH_STATE_PARTIAL_REMOVE = "Partial-Remove"
|
||||
PATCH_STATE_COMMITTED = "Committed"
|
||||
PATCH_STATE_UNKNOWN = "n/a"
|
||||
PATCH_REST_DEFAULT_TIMEOUT = 900
|
||||
|
||||
|
||||
@ -39,9 +39,10 @@ class PatchingClient(base.DriverBase):
|
||||
# Get an endpoint and token.
|
||||
if endpoint is None:
|
||||
self.endpoint = session.get_endpoint(
|
||||
service_type='patching',
|
||||
service_type="patching",
|
||||
region_name=region,
|
||||
interface=consts.KS_ENDPOINT_ADMIN)
|
||||
interface=consts.KS_ENDPOINT_ADMIN,
|
||||
)
|
||||
else:
|
||||
self.endpoint = endpoint
|
||||
|
||||
@ -49,7 +50,7 @@ class PatchingClient(base.DriverBase):
|
||||
|
||||
def query(self, state=None, release=None, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Query patches"""
|
||||
url = self.endpoint + '/v1/query'
|
||||
url = self.endpoint + "/v1/query"
|
||||
if state is not None:
|
||||
url += "?show=%s" % state.lower()
|
||||
if release is not None:
|
||||
@ -59,12 +60,12 @@ class PatchingClient(base.DriverBase):
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "query failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "query failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -72,18 +73,18 @@ class PatchingClient(base.DriverBase):
|
||||
|
||||
def query_hosts(self, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Query hosts"""
|
||||
url = self.endpoint + '/v1/query_hosts'
|
||||
url = self.endpoint + "/v1/query_hosts"
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.get(url, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "query_hosts failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('data', [])
|
||||
return data.get("data", [])
|
||||
else:
|
||||
message = "query_hosts failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -92,18 +93,18 @@ class PatchingClient(base.DriverBase):
|
||||
def apply(self, patches, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Apply patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/apply/%s' % patch_str
|
||||
url = self.endpoint + "/v1/apply/%s" % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "apply failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "apply failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -112,18 +113,18 @@ class PatchingClient(base.DriverBase):
|
||||
def remove(self, patches, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Remove patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/remove/%s' % patch_str
|
||||
url = self.endpoint + "/v1/remove/%s" % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "remove failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "remove failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -132,18 +133,18 @@ class PatchingClient(base.DriverBase):
|
||||
def delete(self, patches, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Delete patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/delete/%s' % patch_str
|
||||
url = self.endpoint + "/v1/delete/%s" % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "delete failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "delete failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -152,18 +153,18 @@ class PatchingClient(base.DriverBase):
|
||||
def commit(self, patches, timeout=PATCH_REST_DEFAULT_TIMEOUT):
|
||||
"""Commit patches"""
|
||||
patch_str = "/".join(patches)
|
||||
url = self.endpoint + '/v1/commit/%s' % patch_str
|
||||
url = self.endpoint + "/v1/commit/%s" % patch_str
|
||||
headers = {"X-Auth-Token": self.token}
|
||||
response = requests.post(url, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "commit failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "commit failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
@ -173,25 +174,26 @@ class PatchingClient(base.DriverBase):
|
||||
"""Upload patches"""
|
||||
|
||||
for file in sorted(list(set(files))):
|
||||
enc = MultipartEncoder(fields={'file': (file,
|
||||
open(file, 'rb'),
|
||||
)})
|
||||
url = self.endpoint + '/v1/upload'
|
||||
headers = {"X-Auth-Token": self.token,
|
||||
'Content-Type': enc.content_type}
|
||||
response = requests.post(url,
|
||||
data=enc,
|
||||
headers=headers,
|
||||
timeout=timeout)
|
||||
enc = MultipartEncoder(
|
||||
fields={
|
||||
"file": (
|
||||
file,
|
||||
open(file, "rb"),
|
||||
)
|
||||
}
|
||||
)
|
||||
url = self.endpoint + "/v1/upload"
|
||||
headers = {"X-Auth-Token": self.token, "Content-Type": enc.content_type}
|
||||
response = requests.post(url, data=enc, headers=headers, timeout=timeout)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
if 'error' in data and data["error"] != "":
|
||||
if "error" in data and data["error"] != "":
|
||||
message = "upload failed with error: %s" % data["error"]
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
else:
|
||||
return data.get('pd', [])
|
||||
return data.get("pd", [])
|
||||
else:
|
||||
message = "upload failed with RC: %d" % response.status_code
|
||||
LOG.error(message)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2023-2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -21,9 +21,9 @@ from dccommon.utils import is_token_expiring_soon
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
LOCK_NAME = 'dc-openstackdriver-peer'
|
||||
KEYSTONE_CLIENT_NAME = 'keystone'
|
||||
AUTH_PLUGIN_PASSWORD = 'password'
|
||||
LOCK_NAME = "dc-openstackdriver-peer"
|
||||
KEYSTONE_CLIENT_NAME = "keystone"
|
||||
AUTH_PLUGIN_PASSWORD = "password"
|
||||
HTTP_CONNECT_TIMEOUT = 10
|
||||
|
||||
|
||||
@ -32,9 +32,15 @@ class PeerSiteDriver(object):
|
||||
os_clients_dict = collections.defaultdict(dict)
|
||||
_identity_tokens = {}
|
||||
|
||||
def __init__(self, site_uuid, auth_url, username, password,
|
||||
region_name=consts.CLOUD_0,
|
||||
endpoint_type=consts.KS_ENDPOINT_PUBLIC):
|
||||
def __init__(
|
||||
self,
|
||||
site_uuid,
|
||||
auth_url,
|
||||
username,
|
||||
password,
|
||||
region_name=consts.CLOUD_0,
|
||||
endpoint_type=consts.KS_ENDPOINT_PUBLIC,
|
||||
):
|
||||
if not (site_uuid and auth_url and username and password):
|
||||
raise exceptions.InvalidInputError
|
||||
|
||||
@ -50,31 +56,39 @@ class PeerSiteDriver(object):
|
||||
self.keystone_client = self.get_cached_keystone_client(site_uuid)
|
||||
|
||||
if self.keystone_client is None:
|
||||
LOG.debug("No cached keystone client found. Creating new keystone "
|
||||
"client for peer site %s", site_uuid)
|
||||
LOG.debug(
|
||||
"No cached keystone client found. Creating new keystone "
|
||||
"client for peer site %s",
|
||||
site_uuid,
|
||||
)
|
||||
try:
|
||||
# Create the keystone client for this site with the provided
|
||||
# username and password and auth_url.
|
||||
self.keystone_client = PeerKeystoneClient(
|
||||
auth_url, username, password,
|
||||
auth_url,
|
||||
username,
|
||||
password,
|
||||
region_name=region_name,
|
||||
auth_type=endpoint_type)
|
||||
auth_type=endpoint_type,
|
||||
)
|
||||
except Exception as exception:
|
||||
LOG.error('peer site %s keystone_client error: %s' %
|
||||
(site_uuid, str(exception)))
|
||||
LOG.error(
|
||||
"peer site %s keystone_client error: %s"
|
||||
% (site_uuid, str(exception))
|
||||
)
|
||||
raise exception
|
||||
|
||||
# Cache the client object
|
||||
PeerSiteDriver.update_site_clients(site_uuid,
|
||||
KEYSTONE_CLIENT_NAME,
|
||||
self.keystone_client)
|
||||
PeerSiteDriver.update_site_clients(
|
||||
site_uuid, KEYSTONE_CLIENT_NAME, self.keystone_client
|
||||
)
|
||||
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def get_cached_keystone_client(self, site_uuid):
|
||||
if ((site_uuid in PeerSiteDriver.os_clients_dict) and
|
||||
self._is_token_valid(site_uuid)):
|
||||
return (PeerSiteDriver.os_clients_dict[site_uuid][
|
||||
KEYSTONE_CLIENT_NAME])
|
||||
if (site_uuid in PeerSiteDriver.os_clients_dict) and self._is_token_valid(
|
||||
site_uuid
|
||||
):
|
||||
return PeerSiteDriver.os_clients_dict[site_uuid][KEYSTONE_CLIENT_NAME]
|
||||
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
@ -84,8 +98,7 @@ class PeerSiteDriver(object):
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def delete_site_clients(cls, site_uuid, clear_token=False):
|
||||
LOG.warn("delete_site_clients=%s, clear_token=%s" %
|
||||
(site_uuid, clear_token))
|
||||
LOG.warn("delete_site_clients=%s, clear_token=%s" % (site_uuid, clear_token))
|
||||
if site_uuid in cls.os_clients_dict:
|
||||
del cls.os_clients_dict[site_uuid]
|
||||
if clear_token:
|
||||
@ -94,40 +107,46 @@ class PeerSiteDriver(object):
|
||||
def _is_token_valid(self, site_uuid):
|
||||
try:
|
||||
keystone = PeerSiteDriver.os_clients_dict[site_uuid][
|
||||
KEYSTONE_CLIENT_NAME].keystone_client
|
||||
if (not PeerSiteDriver._identity_tokens
|
||||
or site_uuid not in PeerSiteDriver._identity_tokens
|
||||
or not PeerSiteDriver._identity_tokens[site_uuid]):
|
||||
PeerSiteDriver._identity_tokens[site_uuid] = \
|
||||
keystone.tokens.validate(keystone.session.get_token(),
|
||||
include_catalog=False)
|
||||
LOG.info("Token for peer site %s expires_at=%s" %
|
||||
(site_uuid,
|
||||
PeerSiteDriver._identity_tokens[site_uuid]
|
||||
['expires_at']))
|
||||
KEYSTONE_CLIENT_NAME
|
||||
].keystone_client
|
||||
if (
|
||||
not PeerSiteDriver._identity_tokens
|
||||
or site_uuid not in PeerSiteDriver._identity_tokens
|
||||
or not PeerSiteDriver._identity_tokens[site_uuid]
|
||||
):
|
||||
PeerSiteDriver._identity_tokens[site_uuid] = keystone.tokens.validate(
|
||||
keystone.session.get_token(), include_catalog=False
|
||||
)
|
||||
LOG.info(
|
||||
"Token for peer site %s expires_at=%s"
|
||||
% (
|
||||
site_uuid,
|
||||
PeerSiteDriver._identity_tokens[site_uuid]["expires_at"],
|
||||
)
|
||||
)
|
||||
except Exception as exception:
|
||||
LOG.warn('_is_token_valid handle: site: %s error: %s' %
|
||||
(site_uuid, str(exception)))
|
||||
LOG.warn(
|
||||
"_is_token_valid handle: site: %s error: %s"
|
||||
% (site_uuid, str(exception))
|
||||
)
|
||||
# Reset the cached dictionary
|
||||
PeerSiteDriver.os_clients_dict[site_uuid] = \
|
||||
collections.defaultdict(dict)
|
||||
PeerSiteDriver.os_clients_dict[site_uuid] = collections.defaultdict(dict)
|
||||
PeerSiteDriver._identity_tokens[site_uuid] = None
|
||||
return False
|
||||
|
||||
token_expiring_soon = is_token_expiring_soon(
|
||||
token=self._identity_tokens[site_uuid])
|
||||
token=self._identity_tokens[site_uuid]
|
||||
)
|
||||
|
||||
# If token is expiring soon, reset cached dictionaries and return False.
|
||||
# Else return true.
|
||||
if token_expiring_soon:
|
||||
LOG.info("The cached keystone token for peer site %s "
|
||||
"will expire soon %s" %
|
||||
(site_uuid,
|
||||
PeerSiteDriver._identity_tokens[site_uuid]
|
||||
['expires_at']))
|
||||
LOG.info(
|
||||
"The cached keystone token for peer site %s will expire soon %s"
|
||||
% (site_uuid, PeerSiteDriver._identity_tokens[site_uuid]["expires_at"])
|
||||
)
|
||||
# Reset the cached dictionary
|
||||
PeerSiteDriver.os_clients_dict[site_uuid] = \
|
||||
collections.defaultdict(dict)
|
||||
PeerSiteDriver.os_clients_dict[site_uuid] = collections.defaultdict(dict)
|
||||
PeerSiteDriver._identity_tokens[site_uuid] = None
|
||||
return False
|
||||
else:
|
||||
@ -140,12 +159,17 @@ class PeerKeystoneClient(base.DriverBase):
|
||||
plugin_loader = None
|
||||
plugin_lock = threading.Lock()
|
||||
|
||||
def __init__(self, auth_url, username, password,
|
||||
region_name=consts.CLOUD_0,
|
||||
project_name=consts.KS_ENDPOINT_PROJECT_DEFAULT,
|
||||
project_domain_name=consts.KS_ENDPOINT_PROJECT_DOMAIN_DEFAULT,
|
||||
user_domain_name=consts.KS_ENDPOINT_USER_DOMAIN_DEFAULT,
|
||||
auth_type=consts.KS_ENDPOINT_PUBLIC):
|
||||
def __init__(
|
||||
self,
|
||||
auth_url,
|
||||
username,
|
||||
password,
|
||||
region_name=consts.CLOUD_0,
|
||||
project_name=consts.KS_ENDPOINT_PROJECT_DEFAULT,
|
||||
project_domain_name=consts.KS_ENDPOINT_PROJECT_DOMAIN_DEFAULT,
|
||||
user_domain_name=consts.KS_ENDPOINT_USER_DOMAIN_DEFAULT,
|
||||
auth_type=consts.KS_ENDPOINT_PUBLIC,
|
||||
):
|
||||
if not (auth_url and username and password):
|
||||
raise exceptions.InvalidInputError
|
||||
self.auth_url = auth_url
|
||||
@ -164,17 +188,26 @@ class PeerKeystoneClient(base.DriverBase):
|
||||
self.user_domain_name,
|
||||
self.password,
|
||||
self.project_name,
|
||||
self.project_domain_name)
|
||||
self.project_domain_name,
|
||||
)
|
||||
self.keystone_client = self._create_keystone_client()
|
||||
|
||||
@classmethod
|
||||
def get_admin_session(cls, auth_url, user_name, user_domain_name,
|
||||
user_password, user_project, user_project_domain,
|
||||
timeout=None):
|
||||
def get_admin_session(
|
||||
cls,
|
||||
auth_url,
|
||||
user_name,
|
||||
user_domain_name,
|
||||
user_password,
|
||||
user_project,
|
||||
user_project_domain,
|
||||
timeout=None,
|
||||
):
|
||||
with PeerKeystoneClient.plugin_lock:
|
||||
if PeerKeystoneClient.plugin_loader is None:
|
||||
PeerKeystoneClient.plugin_loader = loading.get_plugin_loader(
|
||||
AUTH_PLUGIN_PASSWORD)
|
||||
AUTH_PLUGIN_PASSWORD
|
||||
)
|
||||
|
||||
user_auth = PeerKeystoneClient.plugin_loader.load_from_options(
|
||||
auth_url=auth_url,
|
||||
@ -184,15 +217,15 @@ class PeerKeystoneClient(base.DriverBase):
|
||||
project_name=user_project,
|
||||
project_domain_name=user_project_domain,
|
||||
)
|
||||
timeout = (HTTP_CONNECT_TIMEOUT if timeout is None else timeout)
|
||||
timeout = HTTP_CONNECT_TIMEOUT if timeout is None else timeout
|
||||
return session.Session(
|
||||
auth=user_auth, additional_headers=consts.USER_HEADER,
|
||||
timeout=timeout)
|
||||
auth=user_auth, additional_headers=consts.USER_HEADER, timeout=timeout
|
||||
)
|
||||
|
||||
def _create_keystone_client(self):
|
||||
client_kwargs = {
|
||||
'session': self.session,
|
||||
'region_name': self.region_name,
|
||||
'interface': self.auth_type
|
||||
"session": self.session,
|
||||
"region_name": self.region_name,
|
||||
"interface": self.auth_type,
|
||||
}
|
||||
return ks_client.Client(**client_kwargs)
|
||||
|
@ -34,15 +34,15 @@ from dccommon.utils import is_token_expiring_soon
|
||||
|
||||
from dcdbsync.dbsyncclient.client import Client as dbsyncclient
|
||||
|
||||
KEYSTONE_CLIENT_NAME = 'keystone'
|
||||
SYSINV_CLIENT_NAME = 'sysinv'
|
||||
FM_CLIENT_NAME = 'fm'
|
||||
BARBICAN_CLIENT_NAME = 'barbican'
|
||||
DBSYNC_CLIENT_NAME = 'dbsync'
|
||||
KEYSTONE_CLIENT_NAME = "keystone"
|
||||
SYSINV_CLIENT_NAME = "sysinv"
|
||||
FM_CLIENT_NAME = "fm"
|
||||
BARBICAN_CLIENT_NAME = "barbican"
|
||||
DBSYNC_CLIENT_NAME = "dbsync"
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
LOCK_NAME = 'dc-openstackdriver-platform'
|
||||
LOCK_NAME = "dc-openstackdriver-platform"
|
||||
|
||||
SUPPORTED_REGION_CLIENTS = (
|
||||
SYSINV_CLIENT_NAME,
|
||||
@ -65,9 +65,14 @@ class OpenStackDriver(object):
|
||||
os_clients_dict = collections.defaultdict(dict)
|
||||
_identity_tokens = {}
|
||||
|
||||
def __init__(self, region_name=consts.CLOUD_0, thread_name='dcorch',
|
||||
auth_url=None, region_clients=SUPPORTED_REGION_CLIENTS,
|
||||
endpoint_type=consts.KS_ENDPOINT_DEFAULT):
|
||||
def __init__(
|
||||
self,
|
||||
region_name=consts.CLOUD_0,
|
||||
thread_name="dcorch",
|
||||
auth_url=None,
|
||||
region_clients=SUPPORTED_REGION_CLIENTS,
|
||||
endpoint_type=consts.KS_ENDPOINT_DEFAULT,
|
||||
):
|
||||
# Check if objects are cached and try to use those
|
||||
self.region_name = region_name
|
||||
self.keystone_client = None
|
||||
@ -80,8 +85,9 @@ class OpenStackDriver(object):
|
||||
# check if the requested clients are in the supported client list
|
||||
result = all(c in SUPPORTED_REGION_CLIENTS for c in region_clients)
|
||||
if not result:
|
||||
message = ("Requested clients are not supported: %s" %
|
||||
' '.join(region_clients))
|
||||
message = "Requested clients are not supported: %s" % " ".join(
|
||||
region_clients
|
||||
)
|
||||
LOG.error(message)
|
||||
raise exceptions.InvalidInputError
|
||||
|
||||
@ -90,38 +96,43 @@ class OpenStackDriver(object):
|
||||
LOG.debug("get new keystone client for subcloud %s", region_name)
|
||||
try:
|
||||
self.keystone_client = KeystoneClient(region_name, auth_url)
|
||||
except keystone_exceptions.ConnectFailure as exception:
|
||||
LOG.error('keystone_client region %s error: %s' %
|
||||
(region_name, str(exception)))
|
||||
raise exception
|
||||
except keystone_exceptions.ConnectTimeout as exception:
|
||||
LOG.debug('keystone_client region %s error: %s' %
|
||||
(region_name, str(exception)))
|
||||
raise exception
|
||||
except keystone_exceptions.NotFound as exception:
|
||||
LOG.debug('keystone_client region %s error: %s' %
|
||||
(region_name, str(exception)))
|
||||
raise exception
|
||||
except keystone_exceptions.ConnectFailure as exc:
|
||||
LOG.error(
|
||||
"keystone_client region %s error: %s" % (region_name, str(exc))
|
||||
)
|
||||
raise exc
|
||||
except keystone_exceptions.ConnectTimeout as exc:
|
||||
LOG.debug(
|
||||
"keystone_client region %s error: %s" % (region_name, str(exc))
|
||||
)
|
||||
raise exc
|
||||
except keystone_exceptions.NotFound as exc:
|
||||
LOG.debug(
|
||||
"keystone_client region %s error: %s" % (region_name, str(exc))
|
||||
)
|
||||
raise exc
|
||||
|
||||
except Exception as exception:
|
||||
LOG.error('keystone_client region %s error: %s' %
|
||||
(region_name, str(exception)))
|
||||
raise exception
|
||||
except Exception as exc:
|
||||
LOG.error(
|
||||
"keystone_client region %s error: %s" % (region_name, str(exc))
|
||||
)
|
||||
raise exc
|
||||
|
||||
OpenStackDriver.update_region_clients(region_name,
|
||||
KEYSTONE_CLIENT_NAME,
|
||||
self.keystone_client)
|
||||
OpenStackDriver.update_region_clients(
|
||||
region_name, KEYSTONE_CLIENT_NAME, self.keystone_client
|
||||
)
|
||||
# Clear client object cache
|
||||
if region_name != consts.CLOUD_0:
|
||||
OpenStackDriver.os_clients_dict[region_name] = \
|
||||
collections.defaultdict(dict)
|
||||
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(
|
||||
dict
|
||||
)
|
||||
|
||||
if region_clients:
|
||||
self.get_cached_region_clients_for_thread(region_name,
|
||||
thread_name,
|
||||
region_clients)
|
||||
self.get_cached_region_clients_for_thread(
|
||||
region_name, thread_name, region_clients
|
||||
)
|
||||
for client_name in region_clients:
|
||||
client_obj_name = client_name + '_client'
|
||||
client_obj_name = client_name + "_client"
|
||||
if getattr(self, client_obj_name) is None:
|
||||
# Create new client object and cache it
|
||||
try:
|
||||
@ -131,69 +142,79 @@ class OpenStackDriver(object):
|
||||
if client_name == "sysinv":
|
||||
sysinv_endpoint = (
|
||||
self.keystone_client.endpoint_cache.get_endpoint(
|
||||
'sysinv'))
|
||||
"sysinv"
|
||||
)
|
||||
)
|
||||
client_object = region_client_class_map[client_name](
|
||||
region=region_name,
|
||||
session=self.keystone_client.session,
|
||||
endpoint_type=endpoint_type,
|
||||
endpoint=sysinv_endpoint)
|
||||
endpoint=sysinv_endpoint,
|
||||
)
|
||||
else:
|
||||
client_object = region_client_class_map[client_name](
|
||||
region=region_name,
|
||||
session=self.keystone_client.session,
|
||||
endpoint_type=endpoint_type)
|
||||
endpoint_type=endpoint_type,
|
||||
)
|
||||
setattr(self, client_obj_name, client_object)
|
||||
OpenStackDriver.update_region_clients(region_name,
|
||||
client_name,
|
||||
client_object,
|
||||
thread_name)
|
||||
OpenStackDriver.update_region_clients(
|
||||
region_name, client_name, client_object, thread_name
|
||||
)
|
||||
except Exception as exception:
|
||||
LOG.error('Region %s client %s thread %s error: %s' %
|
||||
(region_name, client_name, thread_name,
|
||||
str(exception)))
|
||||
LOG.error(
|
||||
"Region %s client %s thread %s error: %s"
|
||||
% (region_name, client_name, thread_name, str(exception))
|
||||
)
|
||||
raise exception
|
||||
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def get_cached_keystone_client(self, region_name):
|
||||
if ((region_name in OpenStackDriver.os_clients_dict) and
|
||||
(KEYSTONE_CLIENT_NAME in
|
||||
OpenStackDriver.os_clients_dict[region_name]) and
|
||||
self._is_token_valid(region_name)):
|
||||
self.keystone_client = (OpenStackDriver.os_clients_dict
|
||||
[region_name][KEYSTONE_CLIENT_NAME])
|
||||
if (
|
||||
(region_name in OpenStackDriver.os_clients_dict)
|
||||
and (KEYSTONE_CLIENT_NAME in OpenStackDriver.os_clients_dict[region_name])
|
||||
and self._is_token_valid(region_name)
|
||||
):
|
||||
self.keystone_client = OpenStackDriver.os_clients_dict[region_name][
|
||||
KEYSTONE_CLIENT_NAME
|
||||
]
|
||||
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def get_cached_region_clients_for_thread(self, region_name, thread_name,
|
||||
clients):
|
||||
if ((region_name in OpenStackDriver.os_clients_dict) and
|
||||
(thread_name in OpenStackDriver.os_clients_dict[
|
||||
region_name])):
|
||||
def get_cached_region_clients_for_thread(self, region_name, thread_name, clients):
|
||||
if (region_name in OpenStackDriver.os_clients_dict) and (
|
||||
thread_name in OpenStackDriver.os_clients_dict[region_name]
|
||||
):
|
||||
for client in clients:
|
||||
if client in (OpenStackDriver.os_clients_dict[region_name]
|
||||
[thread_name]):
|
||||
LOG.debug('Using cached OS %s client objects %s %s' %
|
||||
(client, region_name, thread_name))
|
||||
client_obj = (OpenStackDriver.os_clients_dict[region_name]
|
||||
[thread_name][client])
|
||||
setattr(self, client + '_client', client_obj)
|
||||
if client in (
|
||||
OpenStackDriver.os_clients_dict[region_name][thread_name]
|
||||
):
|
||||
LOG.debug(
|
||||
"Using cached OS %s client objects %s %s"
|
||||
% (client, region_name, thread_name)
|
||||
)
|
||||
client_obj = OpenStackDriver.os_clients_dict[region_name][
|
||||
thread_name
|
||||
][client]
|
||||
setattr(self, client + "_client", client_obj)
|
||||
else:
|
||||
OpenStackDriver.os_clients_dict[region_name][thread_name] = {}
|
||||
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def update_region_clients(cls, region_name, client_name, client_object,
|
||||
thread_name=None):
|
||||
def update_region_clients(
|
||||
cls, region_name, client_name, client_object, thread_name=None
|
||||
):
|
||||
if thread_name is not None:
|
||||
cls.os_clients_dict[region_name][thread_name][client_name] = \
|
||||
client_object
|
||||
cls.os_clients_dict[region_name][thread_name][client_name] = client_object
|
||||
else:
|
||||
cls.os_clients_dict[region_name][client_name] = client_object
|
||||
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def delete_region_clients(cls, region_name, clear_token=False):
|
||||
LOG.warn("delete_region_clients=%s, clear_token=%s" %
|
||||
(region_name, clear_token))
|
||||
LOG.warn(
|
||||
"delete_region_clients=%s, clear_token=%s" % (region_name, clear_token)
|
||||
)
|
||||
if region_name in cls.os_clients_dict:
|
||||
del cls.os_clients_dict[region_name]
|
||||
if clear_token:
|
||||
@ -202,50 +223,63 @@ class OpenStackDriver(object):
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def delete_region_clients_for_thread(cls, region_name, thread_name):
|
||||
LOG.debug("delete_region_clients=%s, thread_name=%s" %
|
||||
(region_name, thread_name))
|
||||
if (region_name in cls.os_clients_dict and
|
||||
thread_name in cls.os_clients_dict[region_name]):
|
||||
LOG.debug(
|
||||
"delete_region_clients=%s, thread_name=%s" % (region_name, thread_name)
|
||||
)
|
||||
if (
|
||||
region_name in cls.os_clients_dict
|
||||
and thread_name in cls.os_clients_dict[region_name]
|
||||
):
|
||||
del cls.os_clients_dict[region_name][thread_name]
|
||||
|
||||
def _is_token_valid(self, region_name):
|
||||
try:
|
||||
keystone = \
|
||||
OpenStackDriver.os_clients_dict[region_name]['keystone'].\
|
||||
keystone_client
|
||||
if (not OpenStackDriver._identity_tokens
|
||||
or region_name not in OpenStackDriver._identity_tokens
|
||||
or not OpenStackDriver._identity_tokens[region_name]):
|
||||
OpenStackDriver._identity_tokens[region_name] = \
|
||||
keystone.tokens.validate(keystone.session.get_token(),
|
||||
include_catalog=False)
|
||||
LOG.info("Token for subcloud %s expires_at=%s" %
|
||||
(region_name,
|
||||
OpenStackDriver._identity_tokens[region_name]
|
||||
['expires_at']))
|
||||
keystone = OpenStackDriver.os_clients_dict[region_name][
|
||||
"keystone"
|
||||
].keystone_client
|
||||
if (
|
||||
not OpenStackDriver._identity_tokens
|
||||
or region_name not in OpenStackDriver._identity_tokens
|
||||
or not OpenStackDriver._identity_tokens[region_name]
|
||||
):
|
||||
OpenStackDriver._identity_tokens[region_name] = (
|
||||
keystone.tokens.validate(
|
||||
keystone.session.get_token(), include_catalog=False
|
||||
)
|
||||
)
|
||||
LOG.info(
|
||||
"Token for subcloud %s expires_at=%s"
|
||||
% (
|
||||
region_name,
|
||||
OpenStackDriver._identity_tokens[region_name]["expires_at"],
|
||||
)
|
||||
)
|
||||
except Exception as exception:
|
||||
LOG.info('_is_token_valid handle: region: %s error: %s' %
|
||||
(region_name, str(exception)))
|
||||
LOG.info(
|
||||
"_is_token_valid handle: region: %s error: %s"
|
||||
% (region_name, str(exception))
|
||||
)
|
||||
# Reset the cached dictionary
|
||||
OpenStackDriver.os_clients_dict[region_name] = \
|
||||
collections.defaultdict(dict)
|
||||
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(dict)
|
||||
OpenStackDriver._identity_tokens[region_name] = None
|
||||
return False
|
||||
|
||||
token_expiring_soon = is_token_expiring_soon(
|
||||
token=self._identity_tokens[region_name])
|
||||
token=self._identity_tokens[region_name]
|
||||
)
|
||||
|
||||
# If token is expiring soon, reset cached dictionaries and return False.
|
||||
# Else return true.
|
||||
if token_expiring_soon:
|
||||
LOG.info("The cached keystone token for subcloud %s "
|
||||
"will expire soon %s" %
|
||||
(region_name,
|
||||
OpenStackDriver._identity_tokens[region_name]
|
||||
['expires_at']))
|
||||
LOG.info(
|
||||
"The cached keystone token for subcloud %s will expire soon %s"
|
||||
% (
|
||||
region_name,
|
||||
OpenStackDriver._identity_tokens[region_name]["expires_at"],
|
||||
)
|
||||
)
|
||||
# Reset the cached dictionary
|
||||
OpenStackDriver.os_clients_dict[region_name] = \
|
||||
collections.defaultdict(dict)
|
||||
OpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(dict)
|
||||
OpenStackDriver._identity_tokens[region_name] = None
|
||||
return False
|
||||
else:
|
||||
@ -305,9 +339,7 @@ class OptimizedOpenStackDriver(object):
|
||||
)
|
||||
|
||||
if region_clients:
|
||||
self.initialize_region_clients(
|
||||
region_clients, thread_name, endpoint_type
|
||||
)
|
||||
self.initialize_region_clients(region_clients, thread_name, endpoint_type)
|
||||
|
||||
def initialize_region_clients(
|
||||
self, region_clients: List[str], thread_name: str, endpoint_type: str
|
||||
@ -350,8 +382,8 @@ class OptimizedOpenStackDriver(object):
|
||||
# also pass the cached endpoint so it does not need to
|
||||
# retrieve it from keystone.
|
||||
if client_name == "sysinv":
|
||||
args["endpoint"] = (
|
||||
self.keystone_client.endpoint_cache.get_endpoint("sysinv")
|
||||
args["endpoint"] = self.keystone_client.endpoint_cache.get_endpoint(
|
||||
"sysinv"
|
||||
)
|
||||
|
||||
client_object = client_class(**args)
|
||||
@ -393,7 +425,7 @@ class OptimizedOpenStackDriver(object):
|
||||
raise exception
|
||||
except (
|
||||
keystone_exceptions.NotFound,
|
||||
keystone_exceptions.ConnectTimeout
|
||||
keystone_exceptions.ConnectTimeout,
|
||||
) as exception:
|
||||
LOG.debug(
|
||||
f"keystone_client region {self.region_name} error: {str(exception)}"
|
||||
@ -419,9 +451,7 @@ class OptimizedOpenStackDriver(object):
|
||||
:type fetch_subcloud_ips: Callable
|
||||
"""
|
||||
os_clients_dict = OptimizedOpenStackDriver.os_clients_dict
|
||||
keystone_client = os_clients_dict.get(region_name, {}).get(
|
||||
KEYSTONE_CLIENT_NAME
|
||||
)
|
||||
keystone_client = os_clients_dict.get(region_name, {}).get(KEYSTONE_CLIENT_NAME)
|
||||
|
||||
# If there's a cached keystone client and the token is valid, use it
|
||||
if keystone_client and self._is_token_valid(region_name):
|
||||
@ -489,9 +519,7 @@ class OptimizedOpenStackDriver(object):
|
||||
|
||||
@classmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def delete_region_clients(
|
||||
cls, region_name: str, clear_token: bool = False
|
||||
) -> None:
|
||||
def delete_region_clients(cls, region_name: str, clear_token: bool = False) -> None:
|
||||
"""Delete region clients from cache.
|
||||
|
||||
:param region_name: The name of the region.
|
||||
@ -528,8 +556,8 @@ class OptimizedOpenStackDriver(object):
|
||||
|
||||
@staticmethod
|
||||
def _reset_cached_clients_and_token(region_name: str) -> None:
|
||||
OptimizedOpenStackDriver.os_clients_dict[region_name] = (
|
||||
collections.defaultdict(dict)
|
||||
OptimizedOpenStackDriver.os_clients_dict[region_name] = collections.defaultdict(
|
||||
dict
|
||||
)
|
||||
OptimizedOpenStackDriver._identity_tokens[region_name] = None
|
||||
|
||||
|
@ -14,12 +14,12 @@ from dccommon import exceptions
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# Proposed States
|
||||
ABORTING = 'aborting'
|
||||
AVAILABLE = 'available'
|
||||
COMMITTED = 'committed'
|
||||
DEPLOYED = 'deployed'
|
||||
REMOVING = 'removing'
|
||||
UNAVAILABLE = 'unavailable'
|
||||
ABORTING = "aborting"
|
||||
AVAILABLE = "available"
|
||||
COMMITTED = "committed"
|
||||
DEPLOYED = "deployed"
|
||||
REMOVING = "removing"
|
||||
UNAVAILABLE = "unavailable"
|
||||
|
||||
REST_DEFAULT_TIMEOUT = 900
|
||||
|
||||
@ -31,22 +31,23 @@ class SoftwareClient(base.DriverBase):
|
||||
# Get an endpoint and token.
|
||||
if not endpoint:
|
||||
self.endpoint = session.get_endpoint(
|
||||
service_type='usm',
|
||||
service_type="usm",
|
||||
region_name=region,
|
||||
interface=consts.KS_ENDPOINT_ADMIN)
|
||||
interface=consts.KS_ENDPOINT_ADMIN,
|
||||
)
|
||||
else:
|
||||
self.endpoint = endpoint
|
||||
|
||||
# The usm systemcontroller endpoint ends with a slash but the regionone
|
||||
# and the subcloud endpoint don't. The slash is removed to standardize
|
||||
# with the other endpoints.
|
||||
self.endpoint = self.endpoint.rstrip('/') + '/v1'
|
||||
self.endpoint = self.endpoint.rstrip("/") + "/v1"
|
||||
self.token = session.get_token()
|
||||
self.headers = {"X-Auth-Token": self.token}
|
||||
|
||||
def list(self, timeout=REST_DEFAULT_TIMEOUT):
|
||||
"""List releases"""
|
||||
url = self.endpoint + '/release'
|
||||
url = self.endpoint + "/release"
|
||||
response = requests.get(url, headers=self.headers, timeout=timeout)
|
||||
return self._handle_response(response, operation="List")
|
||||
|
||||
@ -59,7 +60,7 @@ class SoftwareClient(base.DriverBase):
|
||||
def delete(self, releases, timeout=REST_DEFAULT_TIMEOUT):
|
||||
"""Delete release"""
|
||||
release_str = "/".join(releases)
|
||||
url = self.endpoint + f'/release/{release_str}'
|
||||
url = self.endpoint + f"/release/{release_str}"
|
||||
response = requests.delete(url, headers=self.headers, timeout=timeout)
|
||||
return self._handle_response(response, operation="Delete")
|
||||
|
||||
@ -72,18 +73,17 @@ class SoftwareClient(base.DriverBase):
|
||||
def commit_patch(self, releases, timeout=REST_DEFAULT_TIMEOUT):
|
||||
"""Commit patch"""
|
||||
release_str = "/".join(releases)
|
||||
url = self.endpoint + f'/commit_patch/{release_str}'
|
||||
url = self.endpoint + f"/commit_patch/{release_str}"
|
||||
response = requests.post(url, headers=self.headers, timeout=timeout)
|
||||
return self._handle_response(response, operation="Commit patch")
|
||||
|
||||
def _handle_response(self, response, operation):
|
||||
if response.status_code != 200:
|
||||
LOG.error(f"{operation} failed with RC: {response.status_code}")
|
||||
raise exceptions.ApiException(endpoint=operation,
|
||||
rc=response.status_code)
|
||||
raise exceptions.ApiException(endpoint=operation, rc=response.status_code)
|
||||
data = response.json()
|
||||
# Data response could be a dict with an error key or a list
|
||||
if isinstance(data, dict) and data.get('error'):
|
||||
if isinstance(data, dict) and data.get("error"):
|
||||
message = f"{operation} failed with error: {data.get('error')}"
|
||||
LOG.error(message)
|
||||
raise Exception(message)
|
||||
|
@ -29,16 +29,16 @@ from dccommon import utils
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
API_VERSION = '1'
|
||||
API_VERSION = "1"
|
||||
|
||||
CERT_MODE_DOCKER_REGISTRY = 'docker_registry'
|
||||
CERT_MODE_SSL = 'ssl'
|
||||
CERT_MODE_SSL_CA = 'ssl_ca'
|
||||
CERT_MODE_DOCKER_REGISTRY = "docker_registry"
|
||||
CERT_MODE_SSL = "ssl"
|
||||
CERT_MODE_SSL_CA = "ssl_ca"
|
||||
|
||||
CONTROLLER = 'controller'
|
||||
CONTROLLER = "controller"
|
||||
|
||||
NETWORK_TYPE_MGMT = 'mgmt'
|
||||
NETWORK_TYPE_ADMIN = 'admin'
|
||||
NETWORK_TYPE_MGMT = "mgmt"
|
||||
NETWORK_TYPE_ADMIN = "admin"
|
||||
|
||||
SSL_CERT_DIR = "/etc/ssl/private/"
|
||||
SSL_CERT_FILE = "server-cert.pem"
|
||||
@ -49,66 +49,66 @@ DOCKER_REGISTRY_KEY_FILE = os.path.join(SSL_CERT_DIR, "registry-cert.key")
|
||||
|
||||
# The following constants are declared in sysinv/common/kubernetes.py
|
||||
# Kubernetes upgrade states
|
||||
KUBE_UPGRADE_STARTED = 'upgrade-started'
|
||||
KUBE_UPGRADE_DOWNLOADING_IMAGES = 'downloading-images'
|
||||
KUBE_UPGRADE_DOWNLOADING_IMAGES_FAILED = 'downloading-images-failed'
|
||||
KUBE_UPGRADE_DOWNLOADED_IMAGES = 'downloaded-images'
|
||||
KUBE_UPGRADING_FIRST_MASTER = 'upgrading-first-master'
|
||||
KUBE_UPGRADING_FIRST_MASTER_FAILED = 'upgrading-first-master-failed'
|
||||
KUBE_UPGRADED_FIRST_MASTER = 'upgraded-first-master'
|
||||
KUBE_UPGRADING_NETWORKING = 'upgrading-networking'
|
||||
KUBE_UPGRADING_NETWORKING_FAILED = 'upgrading-networking-failed'
|
||||
KUBE_UPGRADED_NETWORKING = 'upgraded-networking'
|
||||
KUBE_UPGRADING_SECOND_MASTER = 'upgrading-second-master'
|
||||
KUBE_UPGRADING_SECOND_MASTER_FAILED = 'upgrading-second-master-failed'
|
||||
KUBE_UPGRADED_SECOND_MASTER = 'upgraded-second-master'
|
||||
KUBE_UPGRADING_KUBELETS = 'upgrading-kubelets'
|
||||
KUBE_UPGRADE_COMPLETE = 'upgrade-complete'
|
||||
KUBE_UPGRADE_STARTED = "upgrade-started"
|
||||
KUBE_UPGRADE_DOWNLOADING_IMAGES = "downloading-images"
|
||||
KUBE_UPGRADE_DOWNLOADING_IMAGES_FAILED = "downloading-images-failed"
|
||||
KUBE_UPGRADE_DOWNLOADED_IMAGES = "downloaded-images"
|
||||
KUBE_UPGRADING_FIRST_MASTER = "upgrading-first-master"
|
||||
KUBE_UPGRADING_FIRST_MASTER_FAILED = "upgrading-first-master-failed"
|
||||
KUBE_UPGRADED_FIRST_MASTER = "upgraded-first-master"
|
||||
KUBE_UPGRADING_NETWORKING = "upgrading-networking"
|
||||
KUBE_UPGRADING_NETWORKING_FAILED = "upgrading-networking-failed"
|
||||
KUBE_UPGRADED_NETWORKING = "upgraded-networking"
|
||||
KUBE_UPGRADING_SECOND_MASTER = "upgrading-second-master"
|
||||
KUBE_UPGRADING_SECOND_MASTER_FAILED = "upgrading-second-master-failed"
|
||||
KUBE_UPGRADED_SECOND_MASTER = "upgraded-second-master"
|
||||
KUBE_UPGRADING_KUBELETS = "upgrading-kubelets"
|
||||
KUBE_UPGRADE_COMPLETE = "upgrade-complete"
|
||||
|
||||
# Kubernetes host upgrade statuses
|
||||
KUBE_HOST_UPGRADING_CONTROL_PLANE = 'upgrading-control-plane'
|
||||
KUBE_HOST_UPGRADING_CONTROL_PLANE_FAILED = 'upgrading-control-plane-failed'
|
||||
KUBE_HOST_UPGRADING_KUBELET = 'upgrading-kubelet'
|
||||
KUBE_HOST_UPGRADING_KUBELET_FAILED = 'upgrading-kubelet-failed'
|
||||
KUBE_HOST_UPGRADING_CONTROL_PLANE = "upgrading-control-plane"
|
||||
KUBE_HOST_UPGRADING_CONTROL_PLANE_FAILED = "upgrading-control-plane-failed"
|
||||
KUBE_HOST_UPGRADING_KUBELET = "upgrading-kubelet"
|
||||
KUBE_HOST_UPGRADING_KUBELET_FAILED = "upgrading-kubelet-failed"
|
||||
|
||||
# Kubernetes rootca update states
|
||||
|
||||
KUBE_ROOTCA_UPDATE_STARTED = 'update-started'
|
||||
KUBE_ROOTCA_UPDATE_CERT_UPLOADED = 'update-new-rootca-cert-uploaded'
|
||||
KUBE_ROOTCA_UPDATE_CERT_GENERATED = 'update-new-rootca-cert-generated'
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS = 'updating-pods-trust-both-cas'
|
||||
KUBE_ROOTCA_UPDATED_PODS_TRUSTBOTHCAS = 'updated-pods-trust-both-cas'
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS_FAILED = 'updating-pods-trust-both-cas-failed'
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA = 'updating-pods-trust-new-ca'
|
||||
KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA = 'updated-pods-trust-new-ca'
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA_FAILED = 'updating-pods-trust-new-ca-failed'
|
||||
KUBE_ROOTCA_UPDATE_COMPLETED = 'update-completed'
|
||||
KUBE_ROOTCA_UPDATE_ABORTED = 'update-aborted'
|
||||
KUBE_ROOTCA_UPDATE_STARTED = "update-started"
|
||||
KUBE_ROOTCA_UPDATE_CERT_UPLOADED = "update-new-rootca-cert-uploaded"
|
||||
KUBE_ROOTCA_UPDATE_CERT_GENERATED = "update-new-rootca-cert-generated"
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS = "updating-pods-trust-both-cas"
|
||||
KUBE_ROOTCA_UPDATED_PODS_TRUSTBOTHCAS = "updated-pods-trust-both-cas"
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTBOTHCAS_FAILED = "updating-pods-trust-both-cas-failed"
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA = "updating-pods-trust-new-ca"
|
||||
KUBE_ROOTCA_UPDATED_PODS_TRUSTNEWCA = "updated-pods-trust-new-ca"
|
||||
KUBE_ROOTCA_UPDATING_PODS_TRUSTNEWCA_FAILED = "updating-pods-trust-new-ca-failed"
|
||||
KUBE_ROOTCA_UPDATE_COMPLETED = "update-completed"
|
||||
KUBE_ROOTCA_UPDATE_ABORTED = "update-aborted"
|
||||
|
||||
# Kubernetes rootca host update states
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS = 'updating-host-trust-both-cas'
|
||||
KUBE_ROOTCA_UPDATED_HOST_TRUSTBOTHCAS = 'updated-host-trust-both-cas'
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS_FAILED = 'updating-host-trust-both-cas-failed'
|
||||
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS = 'updating-host-update-certs'
|
||||
KUBE_ROOTCA_UPDATED_HOST_UPDATECERTS = 'updated-host-update-certs'
|
||||
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS_FAILED = 'updating-host-update-certs-failed'
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA = 'updating-host-trust-new-ca'
|
||||
KUBE_ROOTCA_UPDATED_HOST_TRUSTNEWCA = 'updated-host-trust-new-ca'
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA_FAILED = 'updating-host-trust-new-ca-failed'
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS = "updating-host-trust-both-cas"
|
||||
KUBE_ROOTCA_UPDATED_HOST_TRUSTBOTHCAS = "updated-host-trust-both-cas"
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTBOTHCAS_FAILED = "updating-host-trust-both-cas-failed"
|
||||
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS = "updating-host-update-certs"
|
||||
KUBE_ROOTCA_UPDATED_HOST_UPDATECERTS = "updated-host-update-certs"
|
||||
KUBE_ROOTCA_UPDATING_HOST_UPDATECERTS_FAILED = "updating-host-update-certs-failed"
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA = "updating-host-trust-new-ca"
|
||||
KUBE_ROOTCA_UPDATED_HOST_TRUSTNEWCA = "updated-host-trust-new-ca"
|
||||
KUBE_ROOTCA_UPDATING_HOST_TRUSTNEWCA_FAILED = "updating-host-trust-new-ca-failed"
|
||||
|
||||
# The following is the name of the host filesystem 'scratch' which is used
|
||||
# by dcmanager upgrade orchestration for the load import operations.
|
||||
HOST_FS_NAME_SCRATCH = 'scratch'
|
||||
HOST_FS_NAME_SCRATCH = "scratch"
|
||||
|
||||
|
||||
def make_sysinv_patch(update_dict):
|
||||
patch = []
|
||||
for k, v in update_dict.items():
|
||||
key = k
|
||||
if not k.startswith('/'):
|
||||
key = '/' + key
|
||||
if not k.startswith("/"):
|
||||
key = "/" + key
|
||||
|
||||
p = {'path': key, 'value': v, 'op': 'replace'}
|
||||
p = {"path": key, "value": v, "op": "replace"}
|
||||
patch.append(dict(p))
|
||||
|
||||
LOG.debug("make_sysinv_patch patch={}".format(patch))
|
||||
@ -119,10 +119,14 @@ def make_sysinv_patch(update_dict):
|
||||
class SysinvClient(base.DriverBase):
|
||||
"""Sysinv V1 driver."""
|
||||
|
||||
def __init__(self, region, session,
|
||||
timeout=consts.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT,
|
||||
endpoint_type=consts.KS_ENDPOINT_ADMIN,
|
||||
endpoint=None):
|
||||
def __init__(
|
||||
self,
|
||||
region,
|
||||
session,
|
||||
timeout=consts.SYSINV_CLIENT_REST_DEFAULT_TIMEOUT,
|
||||
endpoint_type=consts.KS_ENDPOINT_ADMIN,
|
||||
endpoint=None,
|
||||
):
|
||||
try:
|
||||
# TOX cannot import cgts_client and all the dependencies therefore
|
||||
# the client is being lazy loaded since TOX doesn't actually
|
||||
@ -133,15 +137,13 @@ class SysinvClient(base.DriverBase):
|
||||
# get an endpoint and token.
|
||||
if endpoint is None:
|
||||
endpoint = session.get_endpoint(
|
||||
service_type='platform',
|
||||
region_name=region,
|
||||
interface=endpoint_type)
|
||||
service_type="platform", region_name=region, interface=endpoint_type
|
||||
)
|
||||
|
||||
token = session.get_token()
|
||||
self.sysinv_client = client.Client(API_VERSION,
|
||||
endpoint=endpoint,
|
||||
token=token,
|
||||
timeout=timeout)
|
||||
self.sysinv_client = client.Client(
|
||||
API_VERSION, endpoint=endpoint, token=token, timeout=timeout
|
||||
)
|
||||
self.region_name = region
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
@ -152,60 +154,48 @@ class SysinvClient(base.DriverBase):
|
||||
|
||||
def get_controller_hosts(self):
|
||||
"""Get a list of controller hosts."""
|
||||
return self.sysinv_client.ihost.list_personality(
|
||||
CONTROLLER)
|
||||
return self.sysinv_client.ihost.list_personality(CONTROLLER)
|
||||
|
||||
def _do_host_action(self, host_id, action_value):
|
||||
"""Protected method to invoke an action on a host."""
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/action',
|
||||
'value': action_value}, ]
|
||||
patch = [
|
||||
{"op": "replace", "path": "/action", "value": action_value},
|
||||
]
|
||||
return self.sysinv_client.ihost.update(host_id, patch)
|
||||
|
||||
def lock_host(self, host_id, force=False):
|
||||
"""Lock a host"""
|
||||
if force:
|
||||
action_value = 'force-lock'
|
||||
action_value = "force-lock"
|
||||
else:
|
||||
action_value = 'lock'
|
||||
action_value = "lock"
|
||||
return self._do_host_action(host_id, action_value)
|
||||
|
||||
def unlock_host(self, host_id, force=False):
|
||||
"""Unlock a host"""
|
||||
if force:
|
||||
action_value = 'force-unlock'
|
||||
action_value = "force-unlock"
|
||||
else:
|
||||
action_value = 'unlock'
|
||||
action_value = "unlock"
|
||||
return self._do_host_action(host_id, action_value)
|
||||
|
||||
def swact_host(self, host_id, force=False):
|
||||
"""Perform host swact"""
|
||||
if force:
|
||||
action_value = 'force-swact'
|
||||
action_value = "force-swact"
|
||||
else:
|
||||
action_value = 'swact'
|
||||
action_value = "swact"
|
||||
return self._do_host_action(host_id, action_value)
|
||||
|
||||
def configure_bmc_host(self,
|
||||
host_id,
|
||||
bm_username,
|
||||
bm_ip,
|
||||
bm_password,
|
||||
bm_type='ipmi'):
|
||||
def configure_bmc_host(
|
||||
self, host_id, bm_username, bm_ip, bm_password, bm_type="ipmi"
|
||||
):
|
||||
"""Configure bmc of a host"""
|
||||
patch = [
|
||||
{'op': 'replace',
|
||||
'path': '/bm_username',
|
||||
'value': bm_username},
|
||||
{'op': 'replace',
|
||||
'path': '/bm_ip',
|
||||
'value': bm_ip},
|
||||
{'op': 'replace',
|
||||
'path': '/bm_password',
|
||||
'value': bm_password},
|
||||
{'op': 'replace',
|
||||
'path': '/bm_type',
|
||||
'value': bm_type},
|
||||
{"op": "replace", "path": "/bm_username", "value": bm_username},
|
||||
{"op": "replace", "path": "/bm_ip", "value": bm_ip},
|
||||
{"op": "replace", "path": "/bm_password", "value": bm_password},
|
||||
{"op": "replace", "path": "/bm_type", "value": bm_type},
|
||||
]
|
||||
return self.sysinv_client.ihost.update(host_id, patch)
|
||||
|
||||
@ -215,20 +205,21 @@ class SysinvClient(base.DriverBase):
|
||||
|
||||
def power_on_host(self, host_id):
|
||||
"""Power on a host"""
|
||||
action_value = 'power-on'
|
||||
action_value = "power-on"
|
||||
return self._do_host_action(host_id, action_value)
|
||||
|
||||
def power_off_host(self, host_id):
|
||||
"""Power off a host"""
|
||||
action_value = 'power-off'
|
||||
action_value = "power-off"
|
||||
return self._do_host_action(host_id, action_value)
|
||||
|
||||
def get_management_interface(self, hostname):
|
||||
"""Get the management interface for a host."""
|
||||
interfaces = self.sysinv_client.iinterface.list(hostname)
|
||||
for interface in interfaces:
|
||||
interface_networks = self.sysinv_client.interface_network.\
|
||||
list_by_interface(interface.uuid)
|
||||
interface_networks = self.sysinv_client.interface_network.list_by_interface(
|
||||
interface.uuid
|
||||
)
|
||||
for if_net in interface_networks:
|
||||
if if_net.network_type == NETWORK_TYPE_MGMT:
|
||||
return interface
|
||||
@ -255,8 +246,9 @@ class SysinvClient(base.DriverBase):
|
||||
"""Get the admin interface for a host."""
|
||||
interfaces = self.sysinv_client.iinterface.list(hostname)
|
||||
for interface in interfaces:
|
||||
interface_networks = self.sysinv_client.interface_network.\
|
||||
list_by_interface(interface.uuid)
|
||||
interface_networks = self.sysinv_client.interface_network.list_by_interface(
|
||||
interface.uuid
|
||||
)
|
||||
for if_net in interface_networks:
|
||||
if if_net.network_type == NETWORK_TYPE_ADMIN:
|
||||
return interface
|
||||
@ -291,25 +283,31 @@ class SysinvClient(base.DriverBase):
|
||||
def create_route(self, interface_uuid, network, prefix, gateway, metric):
|
||||
"""Create a static route on an interface."""
|
||||
|
||||
LOG.info("Creating route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric: %s" % (interface_uuid, network,
|
||||
prefix, gateway, metric))
|
||||
LOG.info(
|
||||
"Creating route: interface: %s dest: %s/%s gateway: %s metric: %s"
|
||||
% (interface_uuid, network, prefix, gateway, metric)
|
||||
)
|
||||
try:
|
||||
self.sysinv_client.route.create(interface_uuid=interface_uuid,
|
||||
network=network,
|
||||
prefix=prefix,
|
||||
gateway=gateway,
|
||||
metric=metric)
|
||||
self.sysinv_client.route.create(
|
||||
interface_uuid=interface_uuid,
|
||||
network=network,
|
||||
prefix=prefix,
|
||||
gateway=gateway,
|
||||
metric=metric,
|
||||
)
|
||||
except HTTPConflict:
|
||||
# The route already exists
|
||||
LOG.warning("Failed to create route, route: interface: %s dest: "
|
||||
"%s/%s gateway: %s metric: %s already exists" %
|
||||
(interface_uuid, network, prefix, gateway, metric))
|
||||
LOG.warning(
|
||||
"Failed to create route, route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric: %s already exists"
|
||||
% (interface_uuid, network, prefix, gateway, metric)
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to create route: route: interface: %s dest: "
|
||||
"%s/%s gateway: %s metric: %s" % (interface_uuid,
|
||||
network, prefix,
|
||||
gateway, metric))
|
||||
LOG.error(
|
||||
"Failed to create route: route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric: %s"
|
||||
% (interface_uuid, network, prefix, gateway, metric)
|
||||
)
|
||||
raise e
|
||||
|
||||
def delete_route(self, interface_uuid, network, prefix, gateway, metric):
|
||||
@ -318,17 +316,23 @@ class SysinvClient(base.DriverBase):
|
||||
# Get the routes for this interface
|
||||
routes = self.sysinv_client.route.list_by_interface(interface_uuid)
|
||||
for route in routes:
|
||||
if (route.network == network and route.prefix == prefix and
|
||||
route.gateway == gateway and route.metric == metric):
|
||||
LOG.info("Deleting route: interface: %s dest: %s/%s "
|
||||
"gateway: %s metric: %s" % (interface_uuid, network,
|
||||
prefix, gateway, metric))
|
||||
if (
|
||||
route.network == network
|
||||
and route.prefix == prefix
|
||||
and route.gateway == gateway
|
||||
and route.metric == metric
|
||||
):
|
||||
LOG.info(
|
||||
"Deleting route: interface: %s dest: %s/%s gateway: %s metric: %s"
|
||||
% (interface_uuid, network, prefix, gateway, metric)
|
||||
)
|
||||
self.sysinv_client.route.delete(route.uuid)
|
||||
return
|
||||
|
||||
LOG.warning("Route not found: interface: %s dest: %s/%s gateway: %s "
|
||||
"metric %s" % (interface_uuid, network, prefix, gateway,
|
||||
metric))
|
||||
LOG.warning(
|
||||
"Route not found: interface: %s dest: %s/%s gateway: %s "
|
||||
"metric %s" % (interface_uuid, network, prefix, gateway, metric)
|
||||
)
|
||||
|
||||
def get_service_groups(self):
|
||||
"""Get a list of service groups."""
|
||||
@ -353,17 +357,20 @@ class SysinvClient(base.DriverBase):
|
||||
def delete_load(self, load_id):
|
||||
"""Delete a load with the given id
|
||||
|
||||
:param: load id
|
||||
:param: load id
|
||||
"""
|
||||
try:
|
||||
LOG.info("delete_load region {} load_id: {}".format(
|
||||
self.region_name, load_id))
|
||||
LOG.info(
|
||||
"delete_load region {} load_id: {}".format(self.region_name, load_id)
|
||||
)
|
||||
self.sysinv_client.load.delete(load_id)
|
||||
except HTTPNotFound:
|
||||
LOG.info("delete_load NotFound {} for region: {}".format(
|
||||
load_id, self.region_name))
|
||||
raise exceptions.LoadNotFound(region_name=self.region_name,
|
||||
load_id=load_id)
|
||||
LOG.info(
|
||||
"delete_load NotFound {} for region: {}".format(
|
||||
load_id, self.region_name
|
||||
)
|
||||
)
|
||||
raise exceptions.LoadNotFound(region_name=self.region_name, load_id=load_id)
|
||||
except Exception as e:
|
||||
LOG.error("delete_load exception={}".format(e))
|
||||
raise e
|
||||
@ -371,8 +378,9 @@ class SysinvClient(base.DriverBase):
|
||||
def import_load(self, path_to_iso, path_to_sig):
|
||||
"""Import the particular software load."""
|
||||
try:
|
||||
return self.sysinv_client.load.import_load(path_to_iso=path_to_iso,
|
||||
path_to_sig=path_to_sig)
|
||||
return self.sysinv_client.load.import_load(
|
||||
path_to_iso=path_to_iso, path_to_sig=path_to_sig
|
||||
)
|
||||
except HTTPBadRequest as e:
|
||||
if "Max number of loads" in str(e):
|
||||
raise exceptions.LoadMaxReached(region_name=self.region_name)
|
||||
@ -407,10 +415,10 @@ class SysinvClient(base.DriverBase):
|
||||
return self.sysinv_client.upgrade.get_upgrade_msg()
|
||||
|
||||
def upgrade_activate(self):
|
||||
"""Invoke the API for 'system upgrade-activate', which is an update """
|
||||
patch = [{'op': 'replace',
|
||||
'path': '/state',
|
||||
'value': 'activation-requested'}, ]
|
||||
"""Invoke the API for 'system upgrade-activate', which is an update"""
|
||||
patch = [
|
||||
{"op": "replace", "path": "/state", "value": "activation-requested"},
|
||||
]
|
||||
return self.sysinv_client.upgrade.update(patch)
|
||||
|
||||
def upgrade_complete(self):
|
||||
@ -436,10 +444,10 @@ class SysinvClient(base.DriverBase):
|
||||
"""Get service parameters for a given name."""
|
||||
opts = []
|
||||
opt = dict()
|
||||
opt['field'] = name
|
||||
opt['value'] = value
|
||||
opt['op'] = 'eq'
|
||||
opt['type'] = ''
|
||||
opt["field"] = name
|
||||
opt["value"] = value
|
||||
opt["op"] = "eq"
|
||||
opt["type"] = ""
|
||||
opts.append(opt)
|
||||
parameters = self.sysinv_client.service_parameter.list(q=opts)
|
||||
return parameters
|
||||
@ -452,110 +460,121 @@ class SysinvClient(base.DriverBase):
|
||||
def get_certificates(self):
|
||||
"""Get the certificates for this region
|
||||
|
||||
:return: certificates
|
||||
:return: certificates
|
||||
"""
|
||||
|
||||
try:
|
||||
certificates = self.sysinv_client.certificate.list()
|
||||
except Exception as e:
|
||||
LOG.error("get_certificates region={} "
|
||||
"exception={}".format(self.region_name, e))
|
||||
LOG.error(
|
||||
"get_certificates region={} exception={}".format(self.region_name, e)
|
||||
)
|
||||
raise e
|
||||
|
||||
if not certificates:
|
||||
LOG.info("No certificates in region: {}".format(
|
||||
self.region_name))
|
||||
LOG.info("No certificates in region: {}".format(self.region_name))
|
||||
|
||||
return certificates
|
||||
|
||||
def _validate_certificate(self, signature, certificate):
|
||||
# JKUNG need to look at the crypto public serial id
|
||||
certificate_sig = hashlib.md5(
|
||||
encodeutils.safe_encode(certificate), usedforsecurity=False).hexdigest()
|
||||
encodeutils.safe_encode(certificate), usedforsecurity=False
|
||||
).hexdigest()
|
||||
|
||||
if certificate_sig == signature:
|
||||
return True
|
||||
|
||||
LOG.info("_validate_certificate region={} sig={} mismatch "
|
||||
"reference signature={}".format(
|
||||
self.region_name, certificate_sig, signature))
|
||||
LOG.info(
|
||||
"_validate_certificate region={} sig={} mismatch reference "
|
||||
"signature={}".format(self.region_name, certificate_sig, signature)
|
||||
)
|
||||
return False
|
||||
|
||||
def update_certificate(self,
|
||||
signature,
|
||||
certificate=None,
|
||||
data=None):
|
||||
def update_certificate(self, signature, certificate=None, data=None):
|
||||
"""Update the certificate for this region
|
||||
|
||||
:param: signature of the public certificate
|
||||
:param: certificate
|
||||
:param: data
|
||||
:return: icertificate
|
||||
:param: signature of the public certificate
|
||||
:param: certificate
|
||||
:param: data
|
||||
:return: icertificate
|
||||
"""
|
||||
|
||||
LOG.info("update_certificate signature {} data {}".format(
|
||||
signature, data))
|
||||
LOG.info("update_certificate signature {} data {}".format(signature, data))
|
||||
if not certificate:
|
||||
ssl_cert_ca_file = utils.get_ssl_cert_ca_file()
|
||||
if data:
|
||||
data['passphrase'] = None
|
||||
mode = data.get('mode', CERT_MODE_SSL)
|
||||
data["passphrase"] = None
|
||||
mode = data.get("mode", CERT_MODE_SSL)
|
||||
if mode == CERT_MODE_SSL_CA:
|
||||
certificate_files = [ssl_cert_ca_file]
|
||||
elif mode == CERT_MODE_SSL:
|
||||
certificate_files = [SSL_PEM_FILE]
|
||||
elif mode == CERT_MODE_DOCKER_REGISTRY:
|
||||
certificate_files = \
|
||||
[DOCKER_REGISTRY_KEY_FILE,
|
||||
DOCKER_REGISTRY_CERT_FILE]
|
||||
certificate_files = [
|
||||
DOCKER_REGISTRY_KEY_FILE,
|
||||
DOCKER_REGISTRY_CERT_FILE,
|
||||
]
|
||||
else:
|
||||
LOG.warn("update_certificate mode {} not supported".format(
|
||||
mode))
|
||||
LOG.warn("update_certificate mode {} not supported".format(mode))
|
||||
return
|
||||
elif signature and signature.startswith(CERT_MODE_SSL_CA):
|
||||
data['mode'] = CERT_MODE_SSL_CA
|
||||
data["mode"] = CERT_MODE_SSL_CA
|
||||
certificate_files = [ssl_cert_ca_file]
|
||||
elif signature and signature.startswith(CERT_MODE_SSL):
|
||||
data['mode'] = CERT_MODE_SSL
|
||||
data["mode"] = CERT_MODE_SSL
|
||||
certificate_files = [SSL_PEM_FILE]
|
||||
elif signature and signature.startswith(CERT_MODE_DOCKER_REGISTRY):
|
||||
data['mode'] = CERT_MODE_DOCKER_REGISTRY
|
||||
certificate_files = \
|
||||
[DOCKER_REGISTRY_KEY_FILE,
|
||||
DOCKER_REGISTRY_CERT_FILE]
|
||||
data["mode"] = CERT_MODE_DOCKER_REGISTRY
|
||||
certificate_files = [
|
||||
DOCKER_REGISTRY_KEY_FILE,
|
||||
DOCKER_REGISTRY_CERT_FILE,
|
||||
]
|
||||
else:
|
||||
LOG.warn("update_certificate signature {} "
|
||||
"not supported".format(signature))
|
||||
LOG.warn(
|
||||
"update_certificate signature {} not supported".format(signature)
|
||||
)
|
||||
return
|
||||
|
||||
certificate = ""
|
||||
for certificate_file in certificate_files:
|
||||
with open(certificate_file, 'r') as content_file:
|
||||
with open(certificate_file, "r") as content_file:
|
||||
certificate += content_file.read()
|
||||
|
||||
LOG.info("update_certificate from shared file {} {}".format(
|
||||
signature, certificate_files))
|
||||
LOG.info(
|
||||
"update_certificate from shared file {} {}".format(
|
||||
signature, certificate_files
|
||||
)
|
||||
)
|
||||
|
||||
if (signature and signature.startswith(CERT_MODE_SSL) and
|
||||
not signature.startswith(CERT_MODE_SSL_CA)):
|
||||
if (
|
||||
signature
|
||||
and signature.startswith(CERT_MODE_SSL)
|
||||
and not signature.startswith(CERT_MODE_SSL_CA)
|
||||
):
|
||||
# ensure https is enabled
|
||||
isystem = self.sysinv_client.isystem.list()[0]
|
||||
https_enabled = isystem.capabilities.get('https_enabled', False)
|
||||
https_enabled = isystem.capabilities.get("https_enabled", False)
|
||||
if not https_enabled:
|
||||
isystem = self.sysinv_client.isystem.update(
|
||||
isystem.uuid,
|
||||
[{"path": "/https_enabled",
|
||||
"value": "true",
|
||||
"op": "replace"}])
|
||||
LOG.info("region={} enabled https system={}".format(
|
||||
self.region_name, isystem.uuid))
|
||||
[{"path": "/https_enabled", "value": "true", "op": "replace"}],
|
||||
)
|
||||
LOG.info(
|
||||
"region={} enabled https system={}".format(
|
||||
self.region_name, isystem.uuid
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
icertificate = self.sysinv_client.certificate.certificate_install(
|
||||
certificate, data)
|
||||
LOG.info("update_certificate region={} signature={}".format(
|
||||
self.region_name,
|
||||
signature))
|
||||
certificate, data
|
||||
)
|
||||
LOG.info(
|
||||
"update_certificate region={} signature={}".format(
|
||||
self.region_name, signature
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("update_certificate exception={}".format(e))
|
||||
raise e
|
||||
@ -565,23 +584,29 @@ class SysinvClient(base.DriverBase):
|
||||
def delete_certificate(self, certificate):
|
||||
"""Delete the certificate for this region
|
||||
|
||||
:param: a CA certificate to delete
|
||||
:param: a CA certificate to delete
|
||||
"""
|
||||
try:
|
||||
LOG.info(" delete_certificate region {} certificate: {}".format(
|
||||
self.region_name, certificate.signature))
|
||||
self.sysinv_client.certificate.certificate_uninstall(
|
||||
certificate.uuid)
|
||||
LOG.info(
|
||||
" delete_certificate region {} certificate: {}".format(
|
||||
self.region_name, certificate.signature
|
||||
)
|
||||
)
|
||||
self.sysinv_client.certificate.certificate_uninstall(certificate.uuid)
|
||||
except HTTPNotFound:
|
||||
LOG.info("delete_certificate NotFound {} for region: {}".format(
|
||||
certificate.signature, self.region_name))
|
||||
LOG.info(
|
||||
"delete_certificate NotFound {} for region: {}".format(
|
||||
certificate.signature, self.region_name
|
||||
)
|
||||
)
|
||||
raise exceptions.CertificateNotFound(
|
||||
region_name=self.region_name, signature=certificate.signature)
|
||||
region_name=self.region_name, signature=certificate.signature
|
||||
)
|
||||
|
||||
def get_user(self):
|
||||
"""Get the user password info for this region
|
||||
|
||||
:return: iuser
|
||||
:return: iuser
|
||||
"""
|
||||
iusers = self.sysinv_client.iuser.list()
|
||||
if not iusers:
|
||||
@ -589,16 +614,15 @@ class SysinvClient(base.DriverBase):
|
||||
return None
|
||||
iuser = iusers[0]
|
||||
|
||||
LOG.debug("get_user uuid=%s passwd_hash=%s" %
|
||||
(iuser.uuid, iuser.passwd_hash))
|
||||
LOG.debug("get_user uuid=%s passwd_hash=%s" % (iuser.uuid, iuser.passwd_hash))
|
||||
|
||||
return iuser
|
||||
|
||||
def update_user(self, passwd_hash, root_sig, passwd_expiry_days):
|
||||
"""Update the user passwd for this region
|
||||
|
||||
:param: passwd_hash
|
||||
:return: iuser
|
||||
:param: passwd_hash
|
||||
:return: iuser
|
||||
"""
|
||||
try:
|
||||
iuser = self.get_user()
|
||||
@ -606,21 +630,29 @@ class SysinvClient(base.DriverBase):
|
||||
LOG.warn("iuser not found %s" % self.region_name)
|
||||
return iuser
|
||||
|
||||
if (iuser.passwd_hash != passwd_hash or
|
||||
iuser.passwd_expiry_days != passwd_expiry_days):
|
||||
if (
|
||||
iuser.passwd_hash != passwd_hash
|
||||
or iuser.passwd_expiry_days != passwd_expiry_days
|
||||
):
|
||||
patch = make_sysinv_patch(
|
||||
{'passwd_hash': passwd_hash,
|
||||
'passwd_expiry_days': passwd_expiry_days,
|
||||
'root_sig': root_sig,
|
||||
'action': 'apply',
|
||||
})
|
||||
LOG.info("region={} user update uuid={} patch={}".format(
|
||||
self.region_name, iuser.uuid, patch))
|
||||
{
|
||||
"passwd_hash": passwd_hash,
|
||||
"passwd_expiry_days": passwd_expiry_days,
|
||||
"root_sig": root_sig,
|
||||
"action": "apply",
|
||||
}
|
||||
)
|
||||
LOG.info(
|
||||
"region={} user update uuid={} patch={}".format(
|
||||
self.region_name, iuser.uuid, patch
|
||||
)
|
||||
)
|
||||
iuser = self.sysinv_client.iuser.update(iuser.uuid, patch)
|
||||
else:
|
||||
LOG.info("update_user no changes, skip user region={} "
|
||||
"update uuid={} passwd_hash={}".format(
|
||||
self.region_name, iuser.uuid, passwd_hash))
|
||||
LOG.info(
|
||||
"update_user no changes, skip user region={} update uuid={} "
|
||||
"passwd_hash={}".format(self.region_name, iuser.uuid, passwd_hash)
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.error("update_user exception={}".format(e))
|
||||
raise e
|
||||
@ -630,16 +662,19 @@ class SysinvClient(base.DriverBase):
|
||||
def post_fernet_repo(self, key_list=None):
|
||||
"""Add the fernet keys for this region
|
||||
|
||||
:param: key list payload
|
||||
:return: Nothing
|
||||
:param: key list payload
|
||||
:return: Nothing
|
||||
"""
|
||||
|
||||
# Example key_list:
|
||||
# [{"id": 0, "key": "GgDAOfmyr19u0hXdm5r_zMgaMLjglVFpp5qn_N4GBJQ="},
|
||||
# {"id": 1, "key": "7WfL_z54p67gWAkOmQhLA9P0ZygsbbJcKgff0uh28O8="},
|
||||
# {"id": 2, "key": ""5gsUQeOZ2FzZP58DN32u8pRKRgAludrjmrZFJSOHOw0="}]
|
||||
LOG.info("post_fernet_repo driver region={} "
|
||||
"fernet_repo_list={}".format(self.region_name, key_list))
|
||||
LOG.info(
|
||||
"post_fernet_repo driver region={} fernet_repo_list={}".format(
|
||||
self.region_name, key_list
|
||||
)
|
||||
)
|
||||
try:
|
||||
self.sysinv_client.fernet.create(key_list)
|
||||
except Exception as e:
|
||||
@ -649,11 +684,14 @@ class SysinvClient(base.DriverBase):
|
||||
def put_fernet_repo(self, key_list):
|
||||
"""Update the fernet keys for this region
|
||||
|
||||
:param: key list payload
|
||||
:return: Nothing
|
||||
:param: key list payload
|
||||
:return: Nothing
|
||||
"""
|
||||
LOG.info("put_fernet_repo driver region={} "
|
||||
"fernet_repo_list={}".format(self.region_name, key_list))
|
||||
LOG.info(
|
||||
"put_fernet_repo driver region={} fernet_repo_list={}".format(
|
||||
self.region_name, key_list
|
||||
)
|
||||
)
|
||||
try:
|
||||
self.sysinv_client.fernet.put(key_list)
|
||||
except Exception as e:
|
||||
@ -663,7 +701,7 @@ class SysinvClient(base.DriverBase):
|
||||
def get_fernet_keys(self):
|
||||
"""Retrieve the fernet keys for this region
|
||||
|
||||
:return: a list of fernet keys
|
||||
:return: a list of fernet keys
|
||||
"""
|
||||
|
||||
try:
|
||||
@ -682,7 +720,7 @@ class SysinvClient(base.DriverBase):
|
||||
def get_host_filesystem(self, host_uuid, name):
|
||||
"""Get the named filesystem for a host
|
||||
|
||||
:return: host_fs or None
|
||||
:return: host_fs or None
|
||||
"""
|
||||
|
||||
host_fs = None
|
||||
@ -785,28 +823,25 @@ class SysinvClient(base.DriverBase):
|
||||
def apply_device_image(self, device_image_id, labels=None):
|
||||
"""Apply a device image.
|
||||
|
||||
:param: device_image_id the image to apply
|
||||
:param: labels the labels to pass as part of the apply
|
||||
:param: device_image_id the image to apply
|
||||
:param: labels the labels to pass as part of the apply
|
||||
"""
|
||||
return self.sysinv_client.device_image.apply(device_image_id,
|
||||
labels=labels)
|
||||
return self.sysinv_client.device_image.apply(device_image_id, labels=labels)
|
||||
|
||||
def remove_device_image(self, device_image_id, labels=None):
|
||||
"""Remove a device image.
|
||||
|
||||
:param: device_image_id the image to remove
|
||||
:param: labels the labels to pass as part of the remove
|
||||
:param: device_image_id the image to remove
|
||||
:param: labels the labels to pass as part of the remove
|
||||
"""
|
||||
return self.sysinv_client.device_image.remove(device_image_id,
|
||||
labels=labels)
|
||||
return self.sysinv_client.device_image.remove(device_image_id, labels=labels)
|
||||
|
||||
def upload_device_image(self, device_image_file, fields):
|
||||
"""Upload a device image.
|
||||
|
||||
:param: device_image_file the file to upload
|
||||
:param: fields can be: 'bitstream_type', 'pci_vendor', 'pci_device',
|
||||
'bitstream_id', 'key_signature', 'revoke_key_id', 'name',
|
||||
'description', 'image_version', 'bmc', 'retimer_included', 'uuid'
|
||||
:param: device_image_file the file to upload
|
||||
:param: fields can be: 'bitstream_type', 'pci_vendor', 'pci_device',
|
||||
'bitstream_id', 'key_signature', 'revoke_key_id', 'name',
|
||||
'description', 'image_version', 'bmc', 'retimer_included', 'uuid'
|
||||
"""
|
||||
return self.sysinv_client.device_image.upload(device_image_file,
|
||||
**fields)
|
||||
return self.sysinv_client.device_image.upload(device_image_file, **fields)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright 2016 Ericsson AB
|
||||
# Copyright (c) 2017-2022,2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2022, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -24,24 +24,24 @@ from dccommon import exceptions
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
STRATEGY_NAME_FW_UPDATE = 'fw-update'
|
||||
STRATEGY_NAME_KUBE_ROOTCA_UPDATE = 'kube-rootca-update'
|
||||
STRATEGY_NAME_KUBE_UPGRADE = 'kube-upgrade'
|
||||
STRATEGY_NAME_SW_PATCH = 'sw-patch'
|
||||
STRATEGY_NAME_SW_UPGRADE = 'sw-upgrade'
|
||||
STRATEGY_NAME_FW_UPDATE = "fw-update"
|
||||
STRATEGY_NAME_KUBE_ROOTCA_UPDATE = "kube-rootca-update"
|
||||
STRATEGY_NAME_KUBE_UPGRADE = "kube-upgrade"
|
||||
STRATEGY_NAME_SW_PATCH = "sw-patch"
|
||||
STRATEGY_NAME_SW_UPGRADE = "sw-upgrade"
|
||||
# TODO(nicodemos): Change this to 'sw-deploy' once the new strategy is created
|
||||
STRATEGY_NAME_SW_USM = "sw-upgrade"
|
||||
STRATEGY_NAME_SYS_CONFIG_UPDATE = "system-config-update"
|
||||
|
||||
APPLY_TYPE_SERIAL = 'serial'
|
||||
APPLY_TYPE_PARALLEL = 'parallel'
|
||||
APPLY_TYPE_IGNORE = 'ignore'
|
||||
APPLY_TYPE_SERIAL = "serial"
|
||||
APPLY_TYPE_PARALLEL = "parallel"
|
||||
APPLY_TYPE_IGNORE = "ignore"
|
||||
|
||||
INSTANCE_ACTION_MIGRATE = 'migrate'
|
||||
INSTANCE_ACTION_STOP_START = 'stop-start'
|
||||
INSTANCE_ACTION_MIGRATE = "migrate"
|
||||
INSTANCE_ACTION_STOP_START = "stop-start"
|
||||
|
||||
ALARM_RESTRICTIONS_STRICT = 'strict'
|
||||
ALARM_RESTRICTIONS_RELAXED = 'relaxed'
|
||||
ALARM_RESTRICTIONS_STRICT = "strict"
|
||||
ALARM_RESTRICTIONS_RELAXED = "relaxed"
|
||||
|
||||
SW_UPDATE_OPTS_CONST_DEFAULT = {
|
||||
"name": consts.SW_UPDATE_DEFAULT_TITLE,
|
||||
@ -51,27 +51,30 @@ SW_UPDATE_OPTS_CONST_DEFAULT = {
|
||||
"default-instance-action": INSTANCE_ACTION_MIGRATE,
|
||||
"alarm-restriction-type": ALARM_RESTRICTIONS_RELAXED,
|
||||
"created-at": None,
|
||||
"updated-at": None}
|
||||
"updated-at": None,
|
||||
}
|
||||
|
||||
STATE_INITIAL = 'initial'
|
||||
STATE_BUILDING = 'building'
|
||||
STATE_BUILD_FAILED = 'build-failed'
|
||||
STATE_BUILD_TIMEOUT = 'build-timeout'
|
||||
STATE_READY_TO_APPLY = 'ready-to-apply'
|
||||
STATE_APPLYING = 'applying'
|
||||
STATE_APPLY_FAILED = 'apply-failed'
|
||||
STATE_APPLY_TIMEOUT = 'apply-timeout'
|
||||
STATE_APPLIED = 'applied'
|
||||
STATE_ABORTING = 'aborting'
|
||||
STATE_ABORT_FAILED = 'abort-failed'
|
||||
STATE_ABORT_TIMEOUT = 'abort-timeout'
|
||||
STATE_ABORTED = 'aborted'
|
||||
STATE_INITIAL = "initial"
|
||||
STATE_BUILDING = "building"
|
||||
STATE_BUILD_FAILED = "build-failed"
|
||||
STATE_BUILD_TIMEOUT = "build-timeout"
|
||||
STATE_READY_TO_APPLY = "ready-to-apply"
|
||||
STATE_APPLYING = "applying"
|
||||
STATE_APPLY_FAILED = "apply-failed"
|
||||
STATE_APPLY_TIMEOUT = "apply-timeout"
|
||||
STATE_APPLIED = "applied"
|
||||
STATE_ABORTING = "aborting"
|
||||
STATE_ABORT_FAILED = "abort-failed"
|
||||
STATE_ABORT_TIMEOUT = "abort-timeout"
|
||||
STATE_ABORTED = "aborted"
|
||||
|
||||
TRANSITORY_STATES = [STATE_INITIAL,
|
||||
STATE_BUILDING,
|
||||
STATE_READY_TO_APPLY,
|
||||
STATE_APPLYING,
|
||||
STATE_ABORTING]
|
||||
TRANSITORY_STATES = [
|
||||
STATE_INITIAL,
|
||||
STATE_BUILDING,
|
||||
STATE_READY_TO_APPLY,
|
||||
STATE_APPLYING,
|
||||
STATE_ABORTING,
|
||||
]
|
||||
|
||||
# The exception message when vim authorization fails
|
||||
VIM_AUTHORIZATION_FAILED = "Authorization failed"
|
||||
@ -86,9 +89,10 @@ class VimClient(base.DriverBase):
|
||||
# get an endpoint and token.
|
||||
if endpoint is None:
|
||||
self.endpoint = session.get_endpoint(
|
||||
service_type='nfv',
|
||||
service_type="nfv",
|
||||
region_name=region,
|
||||
interface=consts.KS_ENDPOINT_ADMIN)
|
||||
interface=consts.KS_ENDPOINT_ADMIN,
|
||||
)
|
||||
else:
|
||||
self.endpoint = endpoint
|
||||
|
||||
@ -98,7 +102,7 @@ class VimClient(base.DriverBase):
|
||||
self.username = consts.DCMANAGER_USER_NAME
|
||||
# session object does not provide a domain query
|
||||
# The only domain used for dcmanager is 'default'
|
||||
self.user_domain_name = 'default'
|
||||
self.user_domain_name = "default"
|
||||
# session.get_project_id() returns a UUID
|
||||
# that always corresponds to 'services'
|
||||
self.tenant = consts.SERVICES_USER_NAME
|
||||
@ -106,19 +110,22 @@ class VimClient(base.DriverBase):
|
||||
except exceptions.ServiceUnavailable:
|
||||
raise
|
||||
|
||||
def create_strategy(self,
|
||||
strategy_name,
|
||||
storage_apply_type,
|
||||
worker_apply_type,
|
||||
max_parallel_worker_hosts,
|
||||
default_instance_action,
|
||||
alarm_restrictions,
|
||||
**kwargs):
|
||||
def create_strategy(
|
||||
self,
|
||||
strategy_name,
|
||||
storage_apply_type,
|
||||
worker_apply_type,
|
||||
max_parallel_worker_hosts,
|
||||
default_instance_action,
|
||||
alarm_restrictions,
|
||||
**kwargs
|
||||
):
|
||||
"""Create orchestration strategy"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.create_strategy(
|
||||
self.token, url,
|
||||
self.token,
|
||||
url,
|
||||
strategy_name=strategy_name,
|
||||
controller_apply_type=APPLY_TYPE_SERIAL,
|
||||
storage_apply_type=storage_apply_type,
|
||||
@ -130,7 +137,8 @@ class VimClient(base.DriverBase):
|
||||
username=self.username,
|
||||
user_domain_name=self.user_domain_name,
|
||||
tenant=self.tenant,
|
||||
**kwargs)
|
||||
**kwargs
|
||||
)
|
||||
if not strategy:
|
||||
raise Exception("Strategy:(%s) creation failed" % strategy_name)
|
||||
|
||||
@ -142,11 +150,13 @@ class VimClient(base.DriverBase):
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.get_strategies(
|
||||
self.token, url,
|
||||
self.token,
|
||||
url,
|
||||
strategy_name=strategy_name,
|
||||
username=self.username,
|
||||
user_domain_name=self.user_domain_name,
|
||||
tenant=self.tenant)
|
||||
tenant=self.tenant,
|
||||
)
|
||||
if not strategy:
|
||||
if raise_error_if_missing:
|
||||
raise Exception("Get strategy failed")
|
||||
@ -158,9 +168,7 @@ class VimClient(base.DriverBase):
|
||||
"""Get the current active strategy type and state"""
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.get_current_strategy(
|
||||
self.token, url
|
||||
)
|
||||
strategy = sw_update.get_current_strategy(self.token, url)
|
||||
|
||||
LOG.debug("Strategy: %s" % strategy)
|
||||
return strategy
|
||||
@ -170,11 +178,13 @@ class VimClient(base.DriverBase):
|
||||
|
||||
url = self.endpoint
|
||||
success = sw_update.delete_strategy(
|
||||
self.token, url,
|
||||
self.token,
|
||||
url,
|
||||
strategy_name=strategy_name,
|
||||
username=self.username,
|
||||
user_domain_name=self.user_domain_name,
|
||||
tenant=self.tenant)
|
||||
tenant=self.tenant,
|
||||
)
|
||||
if not success:
|
||||
raise Exception("Delete strategy failed")
|
||||
|
||||
@ -185,11 +195,13 @@ class VimClient(base.DriverBase):
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.apply_strategy(
|
||||
self.token, url,
|
||||
self.token,
|
||||
url,
|
||||
strategy_name=strategy_name,
|
||||
username=self.username,
|
||||
user_domain_name=self.user_domain_name,
|
||||
tenant=self.tenant)
|
||||
tenant=self.tenant,
|
||||
)
|
||||
if not strategy:
|
||||
raise Exception("Strategy apply failed")
|
||||
|
||||
@ -201,12 +213,14 @@ class VimClient(base.DriverBase):
|
||||
|
||||
url = self.endpoint
|
||||
strategy = sw_update.abort_strategy(
|
||||
self.token, url,
|
||||
self.token,
|
||||
url,
|
||||
strategy_name=strategy_name,
|
||||
stage_id=None,
|
||||
username=self.username,
|
||||
user_domain_name=self.user_domain_name,
|
||||
tenant=self.tenant)
|
||||
tenant=self.tenant,
|
||||
)
|
||||
if not strategy:
|
||||
raise Exception("Strategy abort failed")
|
||||
|
||||
|
@ -82,26 +82,31 @@ class EndpointCache(object):
|
||||
CONF.endpoint_cache.user_domain_name,
|
||||
CONF.endpoint_cache.password,
|
||||
CONF.endpoint_cache.project_name,
|
||||
CONF.endpoint_cache.project_domain_name)
|
||||
CONF.endpoint_cache.project_domain_name,
|
||||
)
|
||||
|
||||
self.keystone_client, self.service_endpoint_map = \
|
||||
self.get_cached_master_keystone_client_and_region_endpoint_map(
|
||||
region_name)
|
||||
self.keystone_client, self.service_endpoint_map = (
|
||||
self.get_cached_master_keystone_client_and_region_endpoint_map(region_name)
|
||||
)
|
||||
|
||||
# if Endpoint cache is intended for a subcloud then
|
||||
# we need to retrieve the subcloud token and session.
|
||||
# Skip this if auth_url was provided as its assumed that the
|
||||
# auth_url would correspond to a subcloud so session was
|
||||
# set up above
|
||||
if (not auth_url and region_name and
|
||||
region_name not in
|
||||
[consts.CLOUD_0, consts.VIRTUAL_MASTER_CLOUD]):
|
||||
if (
|
||||
not auth_url
|
||||
and region_name
|
||||
and region_name not in [consts.CLOUD_0, consts.VIRTUAL_MASTER_CLOUD]
|
||||
):
|
||||
try:
|
||||
sc_auth_url = self.service_endpoint_map['keystone']
|
||||
sc_auth_url = self.service_endpoint_map["keystone"]
|
||||
except KeyError:
|
||||
# Should not be here...
|
||||
LOG.exception("Endpoint not found for region_name = %s. "
|
||||
"Refreshing cached data..." % region_name)
|
||||
LOG.exception(
|
||||
"Endpoint not found for region_name=%s. Refreshing cached data..."
|
||||
% region_name
|
||||
)
|
||||
self.re_initialize_master_keystone_client()
|
||||
raise
|
||||
|
||||
@ -113,23 +118,31 @@ class EndpointCache(object):
|
||||
CONF.endpoint_cache.user_domain_name,
|
||||
CONF.endpoint_cache.password,
|
||||
CONF.endpoint_cache.project_name,
|
||||
CONF.endpoint_cache.project_domain_name)
|
||||
CONF.endpoint_cache.project_domain_name,
|
||||
)
|
||||
|
||||
try:
|
||||
self.keystone_client = ks_client.Client(
|
||||
session=self.admin_session,
|
||||
region_name=region_name)
|
||||
session=self.admin_session, region_name=region_name
|
||||
)
|
||||
except Exception:
|
||||
LOG.error("Retrying keystone client creation for %s" % region_name)
|
||||
self.keystone_client = ks_client.Client(
|
||||
session=self.admin_session,
|
||||
region_name=region_name)
|
||||
session=self.admin_session, region_name=region_name
|
||||
)
|
||||
self.external_auth_url = sc_auth_url
|
||||
|
||||
@classmethod
|
||||
def get_admin_session(cls, auth_url, user_name, user_domain_name,
|
||||
user_password, user_project, user_project_domain,
|
||||
timeout=None):
|
||||
def get_admin_session(
|
||||
cls,
|
||||
auth_url,
|
||||
user_name,
|
||||
user_domain_name,
|
||||
user_password,
|
||||
user_project,
|
||||
user_project_domain,
|
||||
timeout=None,
|
||||
):
|
||||
|
||||
user_auth = v3.Password(
|
||||
auth_url=auth_url,
|
||||
@ -140,11 +153,12 @@ class EndpointCache(object):
|
||||
project_domain_name=user_project_domain,
|
||||
include_catalog=True,
|
||||
)
|
||||
timeout = (CONF.endpoint_cache.http_connect_timeout if timeout is None
|
||||
else timeout)
|
||||
timeout = (
|
||||
CONF.endpoint_cache.http_connect_timeout if timeout is None else timeout
|
||||
)
|
||||
return session.Session(
|
||||
auth=user_auth, additional_headers=consts.USER_HEADER,
|
||||
timeout=timeout)
|
||||
auth=user_auth, additional_headers=consts.USER_HEADER, timeout=timeout
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_master_services_list(cls):
|
||||
@ -163,22 +177,22 @@ class EndpointCache(object):
|
||||
# pylint: disable-next=not-an-iterable
|
||||
for service in EndpointCache.master_services_list:
|
||||
service_dict = service.to_dict()
|
||||
service_id_name_map[service_dict['id']] = service_dict['name']
|
||||
service_id_name_map[service_dict["id"]] = service_dict["name"]
|
||||
|
||||
service_endpoint_map = {}
|
||||
for endpoint in master_endpoints_list:
|
||||
endpoint_dict = endpoint.to_dict()
|
||||
region_id = endpoint_dict['region']
|
||||
region_id = endpoint_dict["region"]
|
||||
# within central cloud, use internal endpoints
|
||||
if EndpointCache._is_central_cloud(region_id):
|
||||
if endpoint_dict['interface'] != consts.KS_ENDPOINT_INTERNAL:
|
||||
if endpoint_dict["interface"] != consts.KS_ENDPOINT_INTERNAL:
|
||||
continue
|
||||
# Otherwise should always use admin endpoints
|
||||
elif endpoint_dict['interface'] != consts.KS_ENDPOINT_ADMIN:
|
||||
elif endpoint_dict["interface"] != consts.KS_ENDPOINT_ADMIN:
|
||||
continue
|
||||
|
||||
service_id = endpoint_dict['service_id']
|
||||
url = endpoint_dict['url']
|
||||
service_id = endpoint_dict["service_id"]
|
||||
url = endpoint_dict["url"]
|
||||
service_name = service_id_name_map[service_id]
|
||||
if region_id not in service_endpoint_map:
|
||||
service_endpoint_map[region_id] = {}
|
||||
@ -215,9 +229,10 @@ class EndpointCache(object):
|
||||
|
||||
:return: session object.
|
||||
"""
|
||||
loader = loading.get_plugin_loader('token')
|
||||
auth = loader.load_from_options(auth_url=self.external_auth_url,
|
||||
token=token, project_id=project_id)
|
||||
loader = loading.get_plugin_loader("token")
|
||||
auth = loader.load_from_options(
|
||||
auth_url=self.external_auth_url, token=token, project_id=project_id
|
||||
)
|
||||
sess = session.Session(auth=auth)
|
||||
return sess
|
||||
|
||||
@ -229,39 +244,46 @@ class EndpointCache(object):
|
||||
def get_cached_master_keystone_client_and_region_endpoint_map(self, region_name):
|
||||
if EndpointCache.master_keystone_client is None:
|
||||
self._create_master_cached_data()
|
||||
LOG.info("Generated Master keystone client and master token the "
|
||||
"very first time")
|
||||
LOG.info(
|
||||
"Generated Master keystone client and master token the very first time"
|
||||
)
|
||||
else:
|
||||
token_expiring_soon = is_token_expiring_soon(
|
||||
token=EndpointCache.master_token)
|
||||
token=EndpointCache.master_token
|
||||
)
|
||||
|
||||
# If token is expiring soon, initialize a new master keystone
|
||||
# client
|
||||
if token_expiring_soon:
|
||||
LOG.info("The cached keystone token for %s "
|
||||
"will expire soon %s" %
|
||||
(consts.CLOUD_0, EndpointCache.master_token['expires_at']))
|
||||
LOG.info(
|
||||
"The cached keystone token for %s will expire soon %s"
|
||||
% (consts.CLOUD_0, EndpointCache.master_token["expires_at"])
|
||||
)
|
||||
self._create_master_cached_data()
|
||||
LOG.info("Generated Master keystone client and master token as they "
|
||||
"are expiring soon")
|
||||
LOG.info(
|
||||
"Generated Master keystone client and master token as they "
|
||||
"are expiring soon"
|
||||
)
|
||||
else:
|
||||
# Check if the cached master service endpoint map needs to be
|
||||
# refreshed
|
||||
if region_name not in self.master_service_endpoint_map:
|
||||
previous_size = len(EndpointCache.master_service_endpoint_map)
|
||||
EndpointCache.master_service_endpoint_map = (
|
||||
self._generate_master_service_endpoint_map(self))
|
||||
self._generate_master_service_endpoint_map(self)
|
||||
)
|
||||
current_size = len(EndpointCache.master_service_endpoint_map)
|
||||
LOG.info(
|
||||
"Master endpoints list refreshed to include region %s: "
|
||||
"prev_size=%d, current_size=%d" % (
|
||||
region_name, previous_size, current_size)
|
||||
"prev_size=%d, current_size=%d"
|
||||
% (region_name, previous_size, current_size)
|
||||
)
|
||||
|
||||
# TODO(clientsession)
|
||||
if region_name is not None:
|
||||
region_service_endpoint_map = EndpointCache.master_service_endpoint_map[
|
||||
region_name]
|
||||
region_name
|
||||
]
|
||||
else:
|
||||
region_service_endpoint_map = collections.defaultdict(dict)
|
||||
|
||||
@ -274,19 +296,21 @@ class EndpointCache(object):
|
||||
|
||||
def _create_master_cached_data(self):
|
||||
EndpointCache.master_keystone_client = ks_client.Client(
|
||||
session=self.admin_session,
|
||||
region_name=consts.CLOUD_0)
|
||||
session=self.admin_session, region_name=consts.CLOUD_0
|
||||
)
|
||||
EndpointCache.master_token = (
|
||||
EndpointCache.master_keystone_client.tokens.validate(
|
||||
EndpointCache.master_keystone_client.session.get_token(),
|
||||
include_catalog=False
|
||||
include_catalog=False,
|
||||
)
|
||||
)
|
||||
if EndpointCache.master_services_list is None:
|
||||
EndpointCache.master_services_list = (
|
||||
EndpointCache.master_keystone_client.services.list())
|
||||
EndpointCache.master_keystone_client.services.list()
|
||||
)
|
||||
EndpointCache.master_service_endpoint_map = (
|
||||
self._generate_master_service_endpoint_map(self))
|
||||
self._generate_master_service_endpoint_map(self)
|
||||
)
|
||||
|
||||
|
||||
def build_subcloud_endpoint_map(ip: str) -> dict:
|
||||
@ -421,9 +445,7 @@ class OptimizedEndpointCache(object):
|
||||
)
|
||||
|
||||
self.keystone_client, self.service_endpoint_map = (
|
||||
self.get_cached_master_keystone_client_and_region_endpoint_map(
|
||||
region_name
|
||||
)
|
||||
self.get_cached_master_keystone_client_and_region_endpoint_map(region_name)
|
||||
)
|
||||
|
||||
# If endpoint cache is intended for a subcloud then we need to
|
||||
@ -536,9 +558,7 @@ class OptimizedEndpointCache(object):
|
||||
service_id_name_map[service.id] = service.name
|
||||
|
||||
service_endpoint_map = collections.defaultdict(dict)
|
||||
for (
|
||||
endpoint
|
||||
) in OptimizedEndpointCache.master_keystone_client.endpoints.list():
|
||||
for endpoint in OptimizedEndpointCache.master_keystone_client.endpoints.list():
|
||||
# Within central cloud, use only internal endpoints
|
||||
if OptimizedEndpointCache._is_central_cloud(endpoint.region):
|
||||
if endpoint.interface != consts.KS_ENDPOINT_INTERNAL:
|
||||
@ -702,8 +722,7 @@ class OptimizedEndpointCache(object):
|
||||
)
|
||||
current_size = len(OptimizedEndpointCache.master_service_endpoint_map)
|
||||
LOG.info(
|
||||
"Master endpoints list refreshed to include "
|
||||
f"region {region_name}: "
|
||||
f"Master endpoints list refreshed to include region {region_name}: "
|
||||
f"prev_size={previous_size}, current_size={current_size}"
|
||||
)
|
||||
|
||||
|
@ -82,8 +82,9 @@ class OAMAddressesNotFound(NotFound):
|
||||
|
||||
|
||||
class CertificateNotFound(NotFound):
|
||||
message = _("Certificate in region=%(region_name)s with signature "
|
||||
"%(signature)s not found")
|
||||
message = _(
|
||||
"Certificate in region=%(region_name)s with signature %(signature)s not found"
|
||||
)
|
||||
|
||||
|
||||
class LoadNotFound(NotFound):
|
||||
@ -103,14 +104,17 @@ class PlaybookExecutionFailed(DCCommonException):
|
||||
|
||||
|
||||
class PlaybookExecutionTimeout(PlaybookExecutionFailed):
|
||||
message = _("Playbook execution failed [TIMEOUT (%(timeout)s)], "
|
||||
"command=%(playbook_cmd)s")
|
||||
message = _(
|
||||
"Playbook execution failed [TIMEOUT (%(timeout)s)], command=%(playbook_cmd)s"
|
||||
)
|
||||
|
||||
|
||||
class ImageNotInLocalRegistry(NotFound):
|
||||
message = _("Image %(image_name)s:%(image_tag)s not found in the local "
|
||||
"registry. Please check with command: system registry-image-list or "
|
||||
"system registry-image-tags %(image_name)s")
|
||||
message = _(
|
||||
"Image %(image_name)s:%(image_tag)s not found in the local registry. "
|
||||
"Please check with command: system registry-image-list or "
|
||||
"system registry-image-tags %(image_name)s"
|
||||
)
|
||||
|
||||
|
||||
class ApiException(DCCommonException):
|
||||
@ -134,8 +138,10 @@ class PeerGroupAssociationNotFound(NotFound):
|
||||
|
||||
|
||||
class SubcloudPeerGroupDeleteFailedAssociated(DCCommonException):
|
||||
message = _("Subcloud Peer Group %(peer_group_ref)s delete failed "
|
||||
"cause it is associated with a system peer.")
|
||||
message = _(
|
||||
"Subcloud Peer Group %(peer_group_ref)s delete failed "
|
||||
"cause it is associated with a system peer."
|
||||
)
|
||||
|
||||
|
||||
class RvmcException(Exception):
|
||||
|
@ -14,12 +14,12 @@ from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split('.', maxsplit=1)[0])
|
||||
KUBE_CONFIG_PATH = '/etc/kubernetes/admin.conf'
|
||||
K8S_MODULE_MAJOR_VERSION = int(K8S_MODULE_VERSION.split(".", maxsplit=1)[0])
|
||||
KUBE_CONFIG_PATH = "/etc/kubernetes/admin.conf"
|
||||
|
||||
CERT_MANAGER_GROUP = 'cert-manager.io'
|
||||
CERT_MANAGER_VERSION = 'v1'
|
||||
CERT_MANAGER_CERTIFICATE = 'certificates'
|
||||
CERT_MANAGER_GROUP = "cert-manager.io"
|
||||
CERT_MANAGER_VERSION = "v1"
|
||||
CERT_MANAGER_CERTIFICATE = "certificates"
|
||||
|
||||
|
||||
class KubeOperator(object):
|
||||
@ -65,8 +65,10 @@ class KubeOperator(object):
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return None
|
||||
else:
|
||||
LOG.error("Failed to get Secret %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
LOG.error(
|
||||
"Failed to get Secret %s under Namespace %s: %s"
|
||||
% (name, namespace, e.body)
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_get_secret: %s" % e)
|
||||
@ -83,11 +85,12 @@ class KubeOperator(object):
|
||||
c.delete_namespaced_secret(name, namespace, body=body)
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
LOG.warn("Secret %s under Namespace %s "
|
||||
"not found." % (name, namespace))
|
||||
LOG.warn("Secret %s under Namespace %s not found." % (name, namespace))
|
||||
else:
|
||||
LOG.error("Failed to clean up Secret %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
LOG.error(
|
||||
"Failed to clean up Secret %s under Namespace %s: %s"
|
||||
% (name, namespace, e.body)
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_secret: %s" % e)
|
||||
@ -102,7 +105,8 @@ class KubeOperator(object):
|
||||
CERT_MANAGER_VERSION,
|
||||
namespace,
|
||||
CERT_MANAGER_CERTIFICATE,
|
||||
name)
|
||||
name,
|
||||
)
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return None
|
||||
@ -123,7 +127,7 @@ class KubeOperator(object):
|
||||
namespace,
|
||||
CERT_MANAGER_CERTIFICATE,
|
||||
name,
|
||||
body
|
||||
body,
|
||||
)
|
||||
else:
|
||||
custom_object_api.create_namespaced_custom_object(
|
||||
@ -131,7 +135,8 @@ class KubeOperator(object):
|
||||
CERT_MANAGER_VERSION,
|
||||
namespace,
|
||||
CERT_MANAGER_CERTIFICATE,
|
||||
body)
|
||||
body,
|
||||
)
|
||||
|
||||
def delete_cert_manager_certificate(self, namespace, name):
|
||||
custom_object_api = self._get_kubernetesclient_custom_objects()
|
||||
@ -143,7 +148,7 @@ class KubeOperator(object):
|
||||
namespace,
|
||||
CERT_MANAGER_CERTIFICATE,
|
||||
name,
|
||||
body={}
|
||||
body={},
|
||||
)
|
||||
except ApiException as e:
|
||||
if e.status != httplib.NOT_FOUND:
|
||||
@ -158,8 +163,7 @@ class KubeOperator(object):
|
||||
except ApiException as e:
|
||||
if e.status == httplib.NOT_FOUND:
|
||||
return []
|
||||
LOG.error("Failed to get pod name under "
|
||||
"Namespace %s." % (namespace))
|
||||
LOG.error("Failed to get pod name under Namespace %s." % (namespace))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in get_pods_by_namespace: %s" % e)
|
||||
@ -181,8 +185,10 @@ class KubeOperator(object):
|
||||
b.delete_namespaced_job(name, namespace, body=body)
|
||||
except ApiException as e:
|
||||
if e.status != httplib.NOT_FOUND:
|
||||
LOG.error("Failed to delete job %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
LOG.error(
|
||||
"Failed to delete job %s under Namespace %s: %s"
|
||||
% (name, namespace, e.body)
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_job: %s" % e)
|
||||
@ -198,8 +204,10 @@ class KubeOperator(object):
|
||||
c.delete_namespaced_pod(name, namespace, body=body)
|
||||
except ApiException as e:
|
||||
if e.status != httplib.NOT_FOUND:
|
||||
LOG.error("Failed to delete pod %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body))
|
||||
LOG.error(
|
||||
"Failed to delete pod %s under "
|
||||
"Namespace %s: %s" % (name, namespace, e.body)
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in kube_delete_pod: %s" % e)
|
||||
@ -217,12 +225,13 @@ class KubeOperator(object):
|
||||
LOG.error("Failed to get Namespace %s: %s" % (namespace, e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in "
|
||||
"kube_get_namespace %s: %s" % (namespace, e))
|
||||
LOG.error(
|
||||
"Kubernetes exception in kube_get_namespace %s: %s" % (namespace, e)
|
||||
)
|
||||
raise
|
||||
|
||||
def kube_create_namespace(self, namespace):
|
||||
body = {'metadata': {'name': namespace}}
|
||||
body = {"metadata": {"name": namespace}}
|
||||
|
||||
c = self._get_kubernetesclient_core()
|
||||
try:
|
||||
@ -235,8 +244,9 @@ class KubeOperator(object):
|
||||
LOG.error("Failed to create Namespace %s: %s" % (namespace, e.body))
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.error("Kubernetes exception in "
|
||||
"_kube_create_namespace %s: %s" % (namespace, e))
|
||||
LOG.error(
|
||||
"Kubernetes exception in _kube_create_namespace %s: %s" % (namespace, e)
|
||||
)
|
||||
raise
|
||||
|
||||
def kube_copy_secret(self, name, src_namespace, dst_namespace):
|
||||
@ -247,6 +257,8 @@ class KubeOperator(object):
|
||||
body.metadata.namespace = dst_namespace
|
||||
c.create_namespaced_secret(dst_namespace, body)
|
||||
except Exception as e:
|
||||
LOG.error("Failed to copy Secret %s from Namespace %s to Namespace "
|
||||
"%s: %s" % (name, src_namespace, dst_namespace, e))
|
||||
LOG.error(
|
||||
"Failed to copy Secret %s from Namespace %s to Namespace %s: %s"
|
||||
% (name, src_namespace, dst_namespace, e)
|
||||
)
|
||||
raise
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
SUBCLOUD_ISO_PATH = '/opt/platform/iso'
|
||||
SUBCLOUD_ISO_PATH = "/opt/platform/iso"
|
||||
|
||||
|
||||
class SubcloudEnrollmentInit(object):
|
||||
@ -54,83 +54,82 @@ class SubcloudEnrollmentInit(object):
|
||||
def get_https_enabled(self):
|
||||
if self.https_enabled is None:
|
||||
system = self.sysinv_client.get_system()
|
||||
self.https_enabled = system.capabilities.get('https_enabled',
|
||||
False)
|
||||
self.https_enabled = system.capabilities.get("https_enabled", False)
|
||||
return self.https_enabled
|
||||
|
||||
def _build_seed_meta_data(self, path, iso_values):
|
||||
if not os.path.isdir(path):
|
||||
msg = f'No directory exists: {path}'
|
||||
msg = f"No directory exists: {path}"
|
||||
raise exceptions.EnrollInitExecutionFailed(reason=msg)
|
||||
|
||||
meta_data = {
|
||||
'instance-id': self.name,
|
||||
'local-hostname': 'controller-0'
|
||||
}
|
||||
meta_data = {"instance-id": self.name, "local-hostname": "controller-0"}
|
||||
|
||||
meta_data_file = os.path.join(path, 'meta-data')
|
||||
with open(meta_data_file, 'w') as f_out_meta_data_file:
|
||||
f_out_meta_data_file.write(yaml.dump(meta_data,
|
||||
default_flow_style=False,
|
||||
sort_keys=False))
|
||||
meta_data_file = os.path.join(path, "meta-data")
|
||||
with open(meta_data_file, "w") as f_out_meta_data_file:
|
||||
f_out_meta_data_file.write(
|
||||
yaml.dump(meta_data, default_flow_style=False, sort_keys=False)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def create_enroll_override_file(self, override_path, payload):
|
||||
enroll_override_file = os.path.join(override_path,
|
||||
'enroll_overrides.yml')
|
||||
enroll_override_file = os.path.join(override_path, "enroll_overrides.yml")
|
||||
|
||||
with open(enroll_override_file, 'w') as f_out_override_file:
|
||||
with open(enroll_override_file, "w") as f_out_override_file:
|
||||
f_out_override_file.write(
|
||||
'---'
|
||||
'\nenroll_reconfigured_oam: ' +
|
||||
payload['external_oam_floating_address'] + '\n'
|
||||
"---"
|
||||
"\nenroll_reconfigured_oam: "
|
||||
+ payload["external_oam_floating_address"]
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
enroll_overrides = payload['install_values'].get('enroll_overrides', {})
|
||||
enroll_overrides = payload["install_values"].get("enroll_overrides", {})
|
||||
|
||||
if enroll_overrides:
|
||||
for k, v in enroll_overrides.items():
|
||||
f_out_override_file.write(f'{k}: {v}')
|
||||
f_out_override_file.write(f"{k}: {v}")
|
||||
|
||||
def _build_seed_user_config(self, path, iso_values):
|
||||
if not os.path.isdir(path):
|
||||
msg = f'No directory exists: {path}'
|
||||
msg = f"No directory exists: {path}"
|
||||
raise exceptions.EnrollInitExecutionFailed(reason=msg)
|
||||
|
||||
hashed_password = crypt.crypt(iso_values['admin_password'],
|
||||
crypt.mksalt(crypt.METHOD_SHA512))
|
||||
hashed_password = crypt.crypt(
|
||||
iso_values["admin_password"], crypt.mksalt(crypt.METHOD_SHA512)
|
||||
)
|
||||
|
||||
enroll_utils = '/usr/local/bin/'
|
||||
reconfig_script = os.path.join(enroll_utils,
|
||||
'enroll-init-reconfigure')
|
||||
cleanup_script = os.path.join(enroll_utils,
|
||||
'enroll-init-cleanup')
|
||||
enroll_utils = "/usr/local/bin/"
|
||||
reconfig_script = os.path.join(enroll_utils, "enroll-init-reconfigure")
|
||||
cleanup_script = os.path.join(enroll_utils, "enroll-init-cleanup")
|
||||
|
||||
runcmd = [
|
||||
f"{reconfig_script}"
|
||||
f" --oam_subnet {iso_values['external_oam_subnet']}"
|
||||
f" --oam_gateway_ip {iso_values['external_oam_gateway_address']}"
|
||||
f" --oam_ip {iso_values['external_oam_floating_address']}"
|
||||
f" --new_password \'{hashed_password}\'",
|
||||
cleanup_script
|
||||
f" --new_password '{hashed_password}'",
|
||||
cleanup_script,
|
||||
]
|
||||
|
||||
user_data_file = os.path.join(path, 'user-data')
|
||||
with open(user_data_file, 'w') as f_out_user_data_file:
|
||||
contents = {'runcmd': runcmd}
|
||||
f_out_user_data_file.writelines('#cloud-config\n')
|
||||
f_out_user_data_file.write(yaml.dump(contents,
|
||||
default_flow_style=False,
|
||||
sort_keys=False,
|
||||
width=float("inf")))
|
||||
user_data_file = os.path.join(path, "user-data")
|
||||
with open(user_data_file, "w") as f_out_user_data_file:
|
||||
contents = {"runcmd": runcmd}
|
||||
f_out_user_data_file.writelines("#cloud-config\n")
|
||||
f_out_user_data_file.write(
|
||||
yaml.dump(
|
||||
contents,
|
||||
default_flow_style=False,
|
||||
sort_keys=False,
|
||||
width=float("inf"),
|
||||
)
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def _generate_seed_iso(self, payload):
|
||||
LOG.info(f'Preparing seed iso generation for {self.name}')
|
||||
LOG.info(f"Preparing seed iso generation for {self.name}")
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='seed_') as temp_seed_data_dir:
|
||||
with tempfile.TemporaryDirectory(prefix="seed_") as temp_seed_data_dir:
|
||||
# TODO(srana): After integration, extract required bootstrap and install
|
||||
# into iso_values. For now, pass in payload.
|
||||
try:
|
||||
@ -138,58 +137,66 @@ class SubcloudEnrollmentInit(object):
|
||||
self._build_seed_meta_data(temp_seed_data_dir, payload)
|
||||
self._build_seed_user_config(temp_seed_data_dir, payload)
|
||||
except Exception as e:
|
||||
LOG.exception(f'Unable to generate seed config files '
|
||||
f'for {self.name}: {e}')
|
||||
LOG.exception(
|
||||
f"Unable to generate seed config files for {self.name}: {e}"
|
||||
)
|
||||
return False
|
||||
|
||||
gen_seed_iso_command = [
|
||||
"genisoimage",
|
||||
"-o", self.seed_iso_path,
|
||||
"-volid", "CIDATA",
|
||||
"-o",
|
||||
self.seed_iso_path,
|
||||
"-volid",
|
||||
"CIDATA",
|
||||
"-untranslated-filenames",
|
||||
"-joliet",
|
||||
"-rock",
|
||||
"-iso-level", "2",
|
||||
temp_seed_data_dir
|
||||
"-iso-level",
|
||||
"2",
|
||||
temp_seed_data_dir,
|
||||
]
|
||||
|
||||
LOG.info(f'Running gen_seed_iso_command '
|
||||
f'for {self.name}: {gen_seed_iso_command}')
|
||||
result = subprocess.run(gen_seed_iso_command,
|
||||
# capture both streams in stdout:
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
LOG.info(
|
||||
f"Running gen_seed_iso_command for {self.name}: {gen_seed_iso_command}"
|
||||
)
|
||||
result = subprocess.run(
|
||||
gen_seed_iso_command,
|
||||
# capture both streams in stdout:
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
msg = (
|
||||
f'Finished generating seed iso for {self.name}: '
|
||||
f'{gen_seed_iso_command}'
|
||||
f"Finished generating seed iso for {self.name}: {gen_seed_iso_command}"
|
||||
)
|
||||
LOG.info(
|
||||
"%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode("utf-8").replace("\n", ", "),
|
||||
)
|
||||
LOG.info("%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode('utf-8').replace('\n', ', '))
|
||||
else:
|
||||
msg = (
|
||||
f'Failed to generate seed iso for {self.name}: '
|
||||
f'{gen_seed_iso_command}'
|
||||
msg = f"Failed to generate seed iso for {self.name}: {gen_seed_iso_command}"
|
||||
LOG.error(
|
||||
"%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode("utf-8").replace("\n", ", "),
|
||||
)
|
||||
LOG.error("%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode('utf-8').replace('\n', ', '))
|
||||
raise Exception(msg)
|
||||
|
||||
return True
|
||||
|
||||
def prep(self, override_path, payload):
|
||||
LOG.info(f'Prepare config for {self.name} enroll init')
|
||||
LOG.info(f"Prepare config for {self.name} enroll init")
|
||||
|
||||
software_version = str(payload['software_version'])
|
||||
software_version = str(payload["software_version"])
|
||||
self.www_root = os.path.join(SUBCLOUD_ISO_PATH, software_version)
|
||||
self.iso_dir_path = os.path.join(self.www_root, 'nodes', self.name)
|
||||
self.seed_iso_path = os.path.join(self.iso_dir_path,
|
||||
consts.ENROLL_INIT_SEED_ISO_NAME)
|
||||
self.iso_dir_path = os.path.join(self.www_root, "nodes", self.name)
|
||||
self.seed_iso_path = os.path.join(
|
||||
self.iso_dir_path, consts.ENROLL_INIT_SEED_ISO_NAME
|
||||
)
|
||||
override_path = os.path.join(override_path, self.name)
|
||||
|
||||
if not os.path.isdir(override_path):
|
||||
@ -201,26 +208,32 @@ class SubcloudEnrollmentInit(object):
|
||||
if not os.path.isdir(self.iso_dir_path):
|
||||
os.makedirs(self.iso_dir_path, 0o755, exist_ok=True)
|
||||
elif os.path.exists(self.seed_iso_path):
|
||||
# Clean up iso file if it already exists
|
||||
# This may happen if a previous enroll init attempt was abruptly
|
||||
# terminated
|
||||
LOG.info(f'Found preexisting seed iso for subcloud {self.name}, '
|
||||
'cleaning up')
|
||||
# Clean up iso file if it already exists.
|
||||
# This may happen if a previous enroll init attempt was abruptly terminated.
|
||||
LOG.info(
|
||||
f"Found preexisting seed iso for subcloud {self.name}, cleaning up"
|
||||
)
|
||||
os.remove(self.seed_iso_path)
|
||||
|
||||
self._generate_seed_iso(payload)
|
||||
|
||||
# get the boot image url for bmc
|
||||
image_base_url = SubcloudInstall.get_image_base_url(self.get_https_enabled(),
|
||||
self.sysinv_client)
|
||||
image_base_url = SubcloudInstall.get_image_base_url(
|
||||
self.get_https_enabled(), self.sysinv_client
|
||||
)
|
||||
bmc_values = {
|
||||
'bmc_username': payload['install_values']['bmc_username'],
|
||||
'bmc_password': payload['bmc_password'],
|
||||
'bmc_address': payload['install_values']['bmc_address']
|
||||
"bmc_username": payload["install_values"]["bmc_username"],
|
||||
"bmc_password": payload["bmc_password"],
|
||||
"bmc_address": payload["install_values"]["bmc_address"],
|
||||
}
|
||||
bmc_values['image'] = os.path.join(image_base_url, 'iso',
|
||||
software_version, 'nodes',
|
||||
self.name, consts.ENROLL_INIT_SEED_ISO_NAME)
|
||||
bmc_values["image"] = os.path.join(
|
||||
image_base_url,
|
||||
"iso",
|
||||
software_version,
|
||||
"nodes",
|
||||
self.name,
|
||||
consts.ENROLL_INIT_SEED_ISO_NAME,
|
||||
)
|
||||
|
||||
SubcloudInstall.create_rvmc_config_file(override_path, bmc_values)
|
||||
|
||||
@ -229,19 +242,18 @@ class SubcloudEnrollmentInit(object):
|
||||
return True
|
||||
|
||||
def enroll_init(self, log_file_dir, enroll_command):
|
||||
LOG.info(f'Start enroll init for {self.name}')
|
||||
LOG.info(f"Start enroll init for {self.name}")
|
||||
subcloud_log_base_path = os.path.join(log_file_dir, self.name)
|
||||
playbook_log_file = f'{subcloud_log_base_path}_playbook_output.log'
|
||||
log_file = f"{subcloud_log_base_path}_playbook_output.log"
|
||||
|
||||
try:
|
||||
ansible = dccommon_utils.AnsiblePlaybook(self.name)
|
||||
ansible.run_playbook(playbook_log_file, enroll_command)
|
||||
ansible.run_playbook(log_file, enroll_command)
|
||||
return True
|
||||
except exceptions.PlaybookExecutionFailed:
|
||||
msg = (
|
||||
f"Failed to enroll init {self.name}, check individual "
|
||||
f"logs at {playbook_log_file}. "
|
||||
f"Run {dcmanager_consts.ERROR_DESC_CMD} for details"
|
||||
f"Failed to enroll init {self.name}, check individual logs at "
|
||||
f"{log_file}. Run {dcmanager_consts.ERROR_DESC_CMD} for details."
|
||||
)
|
||||
raise Exception(msg)
|
||||
|
||||
|
@ -30,7 +30,7 @@ from oslo_log import log as logging
|
||||
|
||||
from dccommon import consts
|
||||
from dccommon.drivers.openstack.sdk_platform import (
|
||||
OptimizedOpenStackDriver as OpenStackDriver
|
||||
OptimizedOpenStackDriver as OpenStackDriver,
|
||||
)
|
||||
from dccommon.drivers.openstack.sysinv_v1 import SysinvClient
|
||||
from dccommon import exceptions
|
||||
@ -44,17 +44,17 @@ from dcmanager.common import utils
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
BOOT_MENU_TIMEOUT = '5'
|
||||
BOOT_MENU_TIMEOUT = "5"
|
||||
|
||||
SUBCLOUD_ISO_DOWNLOAD_PATH = '/var/www/pages/iso'
|
||||
DCVAULT_BOOTIMAGE_PATH = '/opt/dc-vault/loads/'
|
||||
PACKAGE_LIST_PATH = '/usr/local/share/pkg-list'
|
||||
GEN_ISO_COMMAND = '/usr/local/bin/gen-bootloader-iso.sh'
|
||||
GEN_ISO_COMMAND_CENTOS = '/usr/local/bin/gen-bootloader-iso-centos.sh'
|
||||
NETWORK_SCRIPTS = '/etc/sysconfig/network-scripts'
|
||||
NETWORK_INTERFACE_PREFIX = 'ifcfg'
|
||||
NETWORK_ROUTE_PREFIX = 'route'
|
||||
LOCAL_REGISTRY_PREFIX = 'registry.local:9001/'
|
||||
SUBCLOUD_ISO_DOWNLOAD_PATH = "/var/www/pages/iso"
|
||||
DCVAULT_BOOTIMAGE_PATH = "/opt/dc-vault/loads/"
|
||||
PACKAGE_LIST_PATH = "/usr/local/share/pkg-list"
|
||||
GEN_ISO_COMMAND = "/usr/local/bin/gen-bootloader-iso.sh"
|
||||
GEN_ISO_COMMAND_CENTOS = "/usr/local/bin/gen-bootloader-iso-centos.sh"
|
||||
NETWORK_SCRIPTS = "/etc/sysconfig/network-scripts"
|
||||
NETWORK_INTERFACE_PREFIX = "ifcfg"
|
||||
NETWORK_ROUTE_PREFIX = "route"
|
||||
LOCAL_REGISTRY_PREFIX = "registry.local:9001/"
|
||||
SERIAL_CONSOLE_INSTALL_TYPES = (0, 2, 4)
|
||||
RVMC_DEBUG_LEVEL_IPMI_CAPTURE = 1
|
||||
|
||||
@ -72,8 +72,7 @@ class SubcloudInstall(object):
|
||||
|
||||
@staticmethod
|
||||
def config_device(ks_cfg, interface, vlan=False):
|
||||
device_cfg = "%s/%s-%s" % (NETWORK_SCRIPTS, NETWORK_INTERFACE_PREFIX,
|
||||
interface)
|
||||
device_cfg = "%s/%s-%s" % (NETWORK_SCRIPTS, NETWORK_INTERFACE_PREFIX, interface)
|
||||
ks_cfg.write("\tcat << EOF > " + device_cfg + "\n")
|
||||
ks_cfg.write("DEVICE=" + interface + "\n")
|
||||
ks_cfg.write("BOOTPROTO=none\n")
|
||||
@ -83,37 +82,36 @@ class SubcloudInstall(object):
|
||||
|
||||
@staticmethod
|
||||
def config_ip_address(ks_cfg, values):
|
||||
ks_cfg.write("IPADDR=" + values['bootstrap_address'] + "\n")
|
||||
ks_cfg.write(
|
||||
"PREFIX=" + str(values['bootstrap_address_prefix']) + "\n")
|
||||
ks_cfg.write("IPADDR=" + values["bootstrap_address"] + "\n")
|
||||
ks_cfg.write("PREFIX=" + str(values["bootstrap_address_prefix"]) + "\n")
|
||||
|
||||
@staticmethod
|
||||
def config_default_route(ks_cfg, values, ip_version):
|
||||
if ip_version == 4:
|
||||
ks_cfg.write("DEFROUTE=yes\n")
|
||||
ks_cfg.write("GATEWAY=" + values['nexthop_gateway'] + "\n")
|
||||
ks_cfg.write("GATEWAY=" + values["nexthop_gateway"] + "\n")
|
||||
else:
|
||||
ks_cfg.write("IPV6INIT=yes\n")
|
||||
ks_cfg.write("IPV6_DEFROUTE=yes\n")
|
||||
ks_cfg.write("IPV6_DEFAULTGW=" + values['nexthop_gateway'] + "\n")
|
||||
ks_cfg.write("IPV6_DEFAULTGW=" + values["nexthop_gateway"] + "\n")
|
||||
|
||||
@staticmethod
|
||||
def config_static_route(ks_cfg, interface, values, ip_version):
|
||||
if ip_version == 4:
|
||||
route_cfg = "%s/%s-%s" % (NETWORK_SCRIPTS, NETWORK_ROUTE_PREFIX,
|
||||
interface)
|
||||
route_cfg = "%s/%s-%s" % (NETWORK_SCRIPTS, NETWORK_ROUTE_PREFIX, interface)
|
||||
ks_cfg.write("\tcat << EOF > " + route_cfg + "\n")
|
||||
ks_cfg.write("ADDRESS0=" + values['network_address'] + "\n")
|
||||
ks_cfg.write("NETMASK0=" + str(values['network_mask']) + "\n")
|
||||
ks_cfg.write("GATEWAY0=" + values['nexthop_gateway'] + "\n")
|
||||
ks_cfg.write("ADDRESS0=" + values["network_address"] + "\n")
|
||||
ks_cfg.write("NETMASK0=" + str(values["network_mask"]) + "\n")
|
||||
ks_cfg.write("GATEWAY0=" + values["nexthop_gateway"] + "\n")
|
||||
else:
|
||||
route_cfg = "%s/%s6-%s" % (NETWORK_SCRIPTS, NETWORK_ROUTE_PREFIX,
|
||||
interface)
|
||||
route_cfg = "%s/%s6-%s" % (NETWORK_SCRIPTS, NETWORK_ROUTE_PREFIX, interface)
|
||||
ks_cfg.write("\tcat << EOF > " + route_cfg + "\n")
|
||||
route_args = "%s/%s via %s dev %s\n" % (values['network_address'],
|
||||
values['network_mask'],
|
||||
values['nexthop_gateway'],
|
||||
interface)
|
||||
route_args = "%s/%s via %s dev %s\n" % (
|
||||
values["network_address"],
|
||||
values["network_mask"],
|
||||
values["nexthop_gateway"],
|
||||
interface,
|
||||
)
|
||||
ks_cfg.write(route_args)
|
||||
ks_cfg.write("EOF\n\n")
|
||||
|
||||
@ -125,9 +123,8 @@ class SubcloudInstall(object):
|
||||
fetch_subcloud_ips=utils.fetch_subcloud_mgmt_ips,
|
||||
).keystone_client
|
||||
session = ks_client.session
|
||||
endpoint = ks_client.endpoint_cache.get_endpoint('sysinv')
|
||||
return SysinvClient(consts.CLOUD_0,
|
||||
session, endpoint=endpoint)
|
||||
endpoint = ks_client.endpoint_cache.get_endpoint("sysinv")
|
||||
return SysinvClient(consts.CLOUD_0, session, endpoint=endpoint)
|
||||
|
||||
@staticmethod
|
||||
def format_address(ip_address):
|
||||
@ -144,79 +141,79 @@ class SubcloudInstall(object):
|
||||
def get_https_enabled(self):
|
||||
if self.https_enabled is None:
|
||||
system = self.sysinv_client.get_system()
|
||||
self.https_enabled = system.capabilities.get('https_enabled',
|
||||
False)
|
||||
self.https_enabled = system.capabilities.get("https_enabled", False)
|
||||
return self.https_enabled
|
||||
|
||||
@staticmethod
|
||||
def get_image_base_url(https_enabled, sysinv_client):
|
||||
# get the protocol and the configured http or https port
|
||||
protocol, value = ('https', 'https_port') if https_enabled \
|
||||
else ('http', 'http_port')
|
||||
protocol, value = (
|
||||
("https", "https_port") if https_enabled else ("http", "http_port")
|
||||
)
|
||||
|
||||
http_parameters = sysinv_client.get_service_parameters('name', value)
|
||||
port = getattr(http_parameters[0], 'value')
|
||||
http_parameters = sysinv_client.get_service_parameters("name", value)
|
||||
port = getattr(http_parameters[0], "value")
|
||||
|
||||
oam_addresses = sysinv_client.get_oam_addresses()
|
||||
oam_floating_ip = SubcloudInstall.format_address(
|
||||
oam_addresses.oam_floating_ip)
|
||||
oam_floating_ip = SubcloudInstall.format_address(oam_addresses.oam_floating_ip)
|
||||
|
||||
return f"{protocol}://{oam_floating_ip}:{port}"
|
||||
|
||||
@staticmethod
|
||||
def create_rvmc_config_file(override_path, payload):
|
||||
|
||||
LOG.debug("create rvmc config file, path: %s, payload: %s",
|
||||
override_path, payload)
|
||||
LOG.debug(
|
||||
"create rvmc config file, path: %s, payload: %s", override_path, payload
|
||||
)
|
||||
rvmc_config_file = os.path.join(override_path, consts.RVMC_CONFIG_FILE_NAME)
|
||||
|
||||
with open(rvmc_config_file, 'w') as f_out_rvmc_config_file:
|
||||
with open(rvmc_config_file, "w") as f_out_rvmc_config_file:
|
||||
for k, v in payload.items():
|
||||
if k in consts.BMC_INSTALL_VALUES or k == 'image':
|
||||
f_out_rvmc_config_file.write(k + ': ' + v + '\n')
|
||||
if k in consts.BMC_INSTALL_VALUES or k == "image":
|
||||
f_out_rvmc_config_file.write(k + ": " + v + "\n")
|
||||
|
||||
def create_install_override_file(self, override_path, payload):
|
||||
|
||||
LOG.debug("create install override file")
|
||||
install_override_file = os.path.join(override_path,
|
||||
'install_values.yml')
|
||||
install_override_file = os.path.join(override_path, "install_values.yml")
|
||||
host_name = socket.gethostname()
|
||||
|
||||
with open(install_override_file, 'w') as f_out_override_file:
|
||||
with open(install_override_file, "w") as f_out_override_file:
|
||||
f_out_override_file.write(
|
||||
'---'
|
||||
'\npassword_change: true'
|
||||
'\nhost_name: ' + host_name +
|
||||
'\nrvmc_config_dir: ' + override_path +
|
||||
'\n'
|
||||
"---"
|
||||
"\npassword_change: true"
|
||||
"\nhost_name: "
|
||||
+ host_name
|
||||
+ "\nrvmc_config_dir: "
|
||||
+ override_path
|
||||
+ "\n"
|
||||
)
|
||||
for k, v in payload.items():
|
||||
f_out_override_file.write("%s: %s\n" % (k, v))
|
||||
|
||||
def create_ks_conf_file(self, filename, values):
|
||||
try:
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, "w") as f:
|
||||
# create ks-addon.cfg
|
||||
default_route = False
|
||||
static_route = False
|
||||
if 'nexthop_gateway' in values:
|
||||
if 'network_address' in values:
|
||||
if "nexthop_gateway" in values:
|
||||
if "network_address" in values:
|
||||
static_route = True
|
||||
else:
|
||||
default_route = True
|
||||
|
||||
f.write("OAM_DEV=" + str(values['bootstrap_interface']) + "\n")
|
||||
f.write("OAM_DEV=" + str(values["bootstrap_interface"]) + "\n")
|
||||
|
||||
vlan_id = None
|
||||
if 'bootstrap_vlan' in values:
|
||||
vlan_id = values['bootstrap_vlan']
|
||||
if "bootstrap_vlan" in values:
|
||||
vlan_id = values["bootstrap_vlan"]
|
||||
f.write("OAM_VLAN=" + str(vlan_id) + "\n\n")
|
||||
|
||||
interface = "$OAM_DEV"
|
||||
self.config_device(f, interface)
|
||||
|
||||
ip_version = netaddr.IPAddress(
|
||||
values['bootstrap_address']).version
|
||||
ip_version = netaddr.IPAddress(values["bootstrap_address"]).version
|
||||
if vlan_id is None:
|
||||
self.config_ip_address(f, values)
|
||||
if default_route:
|
||||
@ -234,8 +231,7 @@ class SubcloudInstall(object):
|
||||
route_interface = vlan_interface
|
||||
|
||||
if static_route:
|
||||
self.config_static_route(f, route_interface,
|
||||
values, ip_version)
|
||||
self.config_static_route(f, route_interface, values, ip_version)
|
||||
except IOError as e:
|
||||
LOG.error("Failed to open file: %s", filename)
|
||||
LOG.exception(e)
|
||||
@ -244,20 +240,23 @@ class SubcloudInstall(object):
|
||||
def update_iso(self, override_path, values):
|
||||
if not os.path.isdir(self.www_iso_root):
|
||||
os.mkdir(self.www_iso_root, 0o755)
|
||||
LOG.debug("update_iso: www_iso_root: %s, values: %s, override_path: %s",
|
||||
self.www_iso_root, str(values), override_path)
|
||||
LOG.debug(
|
||||
"update_iso: www_iso_root: %s, values: %s, override_path: %s",
|
||||
self.www_iso_root,
|
||||
str(values),
|
||||
override_path,
|
||||
)
|
||||
path = None
|
||||
software_version = str(values['software_version'])
|
||||
software_version = str(values["software_version"])
|
||||
try:
|
||||
if parse.urlparse(values['image']).scheme:
|
||||
url = values['image']
|
||||
if parse.urlparse(values["image"]).scheme:
|
||||
url = values["image"]
|
||||
else:
|
||||
path = os.path.abspath(values['image'])
|
||||
url = parse.urljoin('file:', request.pathname2url(path))
|
||||
filename = os.path.join(override_path, 'bootimage.iso')
|
||||
path = os.path.abspath(values["image"])
|
||||
url = parse.urljoin("file:", request.pathname2url(path))
|
||||
filename = os.path.join(override_path, "bootimage.iso")
|
||||
|
||||
if path and path.startswith(consts.LOAD_VAULT_DIR +
|
||||
'/' + software_version):
|
||||
if path and path.startswith(consts.LOAD_VAULT_DIR + "/" + software_version):
|
||||
if os.path.exists(path):
|
||||
# Reference known load in vault
|
||||
LOG.info("Setting input_iso to load vault path %s" % path)
|
||||
@ -271,134 +270,166 @@ class SubcloudInstall(object):
|
||||
LOG.info("Downloaded %s to %s", url, self.input_iso)
|
||||
except urllib_error.ContentTooShortError as e:
|
||||
msg = "Error: Downloading file %s may be interrupted: %s" % (
|
||||
values['image'], e)
|
||||
values["image"],
|
||||
e,
|
||||
)
|
||||
LOG.error(msg)
|
||||
raise exceptions.DCCommonException(
|
||||
resource=self.name,
|
||||
msg=msg)
|
||||
raise exceptions.DCCommonException(resource=self.name, msg=msg)
|
||||
except Exception as e:
|
||||
msg = "Error: Could not download file %s: %s" % (
|
||||
values['image'], e)
|
||||
msg = "Error: Could not download file %s: %s" % (values["image"], e)
|
||||
LOG.error(msg)
|
||||
raise exceptions.DCCommonException(
|
||||
resource=self.name,
|
||||
msg=msg)
|
||||
raise exceptions.DCCommonException(resource=self.name, msg=msg)
|
||||
|
||||
is_subcloud_debian = dccommon_utils.is_debian(software_version)
|
||||
|
||||
if is_subcloud_debian:
|
||||
update_iso_cmd = [
|
||||
GEN_ISO_COMMAND,
|
||||
"--input", self.input_iso,
|
||||
"--www-root", self.www_iso_root,
|
||||
"--id", self.name,
|
||||
"--boot-hostname", self.name,
|
||||
"--timeout", BOOT_MENU_TIMEOUT,
|
||||
"--input",
|
||||
self.input_iso,
|
||||
"--www-root",
|
||||
self.www_iso_root,
|
||||
"--id",
|
||||
self.name,
|
||||
"--boot-hostname",
|
||||
self.name,
|
||||
"--timeout",
|
||||
BOOT_MENU_TIMEOUT,
|
||||
]
|
||||
else:
|
||||
update_iso_cmd = [
|
||||
GEN_ISO_COMMAND_CENTOS,
|
||||
"--input", self.input_iso,
|
||||
"--www-root", self.www_iso_root,
|
||||
"--id", self.name,
|
||||
"--boot-hostname", self.name,
|
||||
"--timeout", BOOT_MENU_TIMEOUT,
|
||||
"--input",
|
||||
self.input_iso,
|
||||
"--www-root",
|
||||
self.www_iso_root,
|
||||
"--id",
|
||||
self.name,
|
||||
"--boot-hostname",
|
||||
self.name,
|
||||
"--timeout",
|
||||
BOOT_MENU_TIMEOUT,
|
||||
"--patches-from-iso",
|
||||
]
|
||||
for key, _ in consts.GEN_ISO_OPTIONS.items():
|
||||
if key in values:
|
||||
LOG.debug("Setting option from key=%s, option=%s, value=%s",
|
||||
key, consts.GEN_ISO_OPTIONS[key], values[key])
|
||||
if key in ('bootstrap_address', 'nexthop_gateway'):
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
self.format_address(values[key])]
|
||||
elif key == 'no_check_certificate':
|
||||
if str(values[key]) == 'True' and self.get_https_enabled():
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
'inst.noverifyssl=True']
|
||||
elif key in ('rootfs_device', 'boot_device',
|
||||
'rd.net.timeout.ipv6dad'):
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
(key + '=' + str(values[key]))]
|
||||
elif key == 'bootstrap_vlan':
|
||||
vlan_inteface = "%s.%s:%s" % \
|
||||
(values['bootstrap_interface'],
|
||||
values['bootstrap_vlan'],
|
||||
values['bootstrap_interface'])
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
('vlan' + '=' + vlan_inteface)]
|
||||
elif (key == 'bootstrap_interface'
|
||||
and 'bootstrap_vlan' in values):
|
||||
boot_interface = "%s.%s" % (values['bootstrap_interface'],
|
||||
values['bootstrap_vlan'])
|
||||
LOG.debug(
|
||||
"Setting option from key=%s, option=%s, value=%s",
|
||||
key,
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
values[key],
|
||||
)
|
||||
if key in ("bootstrap_address", "nexthop_gateway"):
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
self.format_address(values[key]),
|
||||
]
|
||||
elif key == "no_check_certificate":
|
||||
if str(values[key]) == "True" and self.get_https_enabled():
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
"inst.noverifyssl=True",
|
||||
]
|
||||
elif key in ("rootfs_device", "boot_device", "rd.net.timeout.ipv6dad"):
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
(key + "=" + str(values[key])),
|
||||
]
|
||||
elif key == "bootstrap_vlan":
|
||||
vlan_inteface = "%s.%s:%s" % (
|
||||
values["bootstrap_interface"],
|
||||
values["bootstrap_vlan"],
|
||||
values["bootstrap_interface"],
|
||||
)
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
("vlan" + "=" + vlan_inteface),
|
||||
]
|
||||
elif key == "bootstrap_interface" and "bootstrap_vlan" in values:
|
||||
boot_interface = "%s.%s" % (
|
||||
values["bootstrap_interface"],
|
||||
values["bootstrap_vlan"],
|
||||
)
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key], boot_interface]
|
||||
elif key == 'persistent_size':
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
('persistent_size=%s'
|
||||
% str(values[key]))]
|
||||
elif key == 'hw_settle':
|
||||
elif key == "persistent_size":
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
("persistent_size=%s" % str(values[key])),
|
||||
]
|
||||
elif key == "hw_settle":
|
||||
# translate to 'insthwsettle' boot parameter
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
('insthwsettle=%s'
|
||||
% str(values[key]))]
|
||||
elif key == 'extra_boot_params':
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key],
|
||||
('extra_boot_params=%s'
|
||||
% str(values[key]))]
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
("insthwsettle=%s" % str(values[key])),
|
||||
]
|
||||
elif key == "extra_boot_params":
|
||||
update_iso_cmd += [
|
||||
consts.GEN_ISO_OPTIONS[key],
|
||||
("extra_boot_params=%s" % str(values[key])),
|
||||
]
|
||||
else:
|
||||
update_iso_cmd += [consts.GEN_ISO_OPTIONS[key], str(values[key])]
|
||||
|
||||
if not is_subcloud_debian:
|
||||
# create ks-addon.cfg
|
||||
addon_cfg = os.path.join(override_path, 'ks-addon.cfg')
|
||||
addon_cfg = os.path.join(override_path, "ks-addon.cfg")
|
||||
self.create_ks_conf_file(addon_cfg, values)
|
||||
|
||||
update_iso_cmd += ['--addon', addon_cfg]
|
||||
update_iso_cmd += ["--addon", addon_cfg]
|
||||
|
||||
image_base_url = self.get_image_base_url(self.get_https_enabled(),
|
||||
self.sysinv_client)
|
||||
base_url = os.path.join(image_base_url, 'iso', software_version)
|
||||
update_iso_cmd += ['--base-url', base_url]
|
||||
image_base_url = self.get_image_base_url(
|
||||
self.get_https_enabled(), self.sysinv_client
|
||||
)
|
||||
base_url = os.path.join(image_base_url, "iso", software_version)
|
||||
update_iso_cmd += ["--base-url", base_url]
|
||||
|
||||
str_cmd = ' '.join(x for x in update_iso_cmd)
|
||||
str_cmd = " ".join(x for x in update_iso_cmd)
|
||||
LOG.info("Running update_iso_cmd: %s", str_cmd)
|
||||
result = subprocess.run(update_iso_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
result = subprocess.run(
|
||||
update_iso_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
if result.returncode != 0:
|
||||
msg = f'Failed to update iso: {str_cmd}'
|
||||
LOG.error("%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode('utf-8').replace('\n', ', '))
|
||||
msg = f"Failed to update iso: {str_cmd}"
|
||||
LOG.error(
|
||||
"%s returncode: %s, output: %s",
|
||||
msg,
|
||||
result.returncode,
|
||||
result.stdout.decode("utf-8").replace("\n", ", "),
|
||||
)
|
||||
raise Exception(msg)
|
||||
|
||||
def cleanup(self, software_version=None):
|
||||
# Do not remove the input_iso if it is in the Load Vault
|
||||
if (self.input_iso is not None and
|
||||
not self.input_iso.startswith(consts.LOAD_VAULT_DIR) and
|
||||
os.path.exists(self.input_iso)):
|
||||
if (
|
||||
self.input_iso is not None
|
||||
and not self.input_iso.startswith(consts.LOAD_VAULT_DIR)
|
||||
and os.path.exists(self.input_iso)
|
||||
):
|
||||
os.remove(self.input_iso)
|
||||
|
||||
if (self.www_iso_root is not None and os.path.isdir(self.www_iso_root)):
|
||||
if self.www_iso_root is not None and os.path.isdir(self.www_iso_root):
|
||||
if dccommon_utils.is_debian(software_version):
|
||||
cleanup_cmd = [
|
||||
GEN_ISO_COMMAND,
|
||||
"--id", self.name,
|
||||
"--www-root", self.www_iso_root,
|
||||
"--delete"
|
||||
"--id",
|
||||
self.name,
|
||||
"--www-root",
|
||||
self.www_iso_root,
|
||||
"--delete",
|
||||
]
|
||||
else:
|
||||
cleanup_cmd = [
|
||||
GEN_ISO_COMMAND_CENTOS,
|
||||
"--id", self.name,
|
||||
"--www-root", self.www_iso_root,
|
||||
"--delete"
|
||||
"--id",
|
||||
self.name,
|
||||
"--www-root",
|
||||
self.www_iso_root,
|
||||
"--delete",
|
||||
]
|
||||
LOG.info("Running install cleanup: %s", self.name)
|
||||
result = subprocess.run(cleanup_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
result = subprocess.run(
|
||||
cleanup_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
if result.returncode == 0:
|
||||
# Note: watch for non-exit 0 errors in this output as well
|
||||
LOG.info(
|
||||
@ -428,17 +459,25 @@ class SubcloudInstall(object):
|
||||
return
|
||||
|
||||
temp_bootimage_mnt_dir = tempfile.mkdtemp()
|
||||
bootimage_path = os.path.join(DCVAULT_BOOTIMAGE_PATH, software_version,
|
||||
'bootimage.iso')
|
||||
bootimage_path = os.path.join(
|
||||
DCVAULT_BOOTIMAGE_PATH, software_version, "bootimage.iso"
|
||||
)
|
||||
|
||||
with open(os.devnull, "w") as fnull:
|
||||
try:
|
||||
# pylint: disable-next=not-callable
|
||||
subprocess.check_call(['mount', '-r', '-o', 'loop',
|
||||
bootimage_path,
|
||||
temp_bootimage_mnt_dir],
|
||||
stdout=fnull,
|
||||
stderr=fnull)
|
||||
subprocess.check_call(
|
||||
[
|
||||
"mount",
|
||||
"-r",
|
||||
"-o",
|
||||
"loop",
|
||||
bootimage_path,
|
||||
temp_bootimage_mnt_dir,
|
||||
],
|
||||
stdout=fnull,
|
||||
stderr=fnull,
|
||||
)
|
||||
except Exception:
|
||||
os.rmdir(temp_bootimage_mnt_dir)
|
||||
raise Exception("Unable to mount bootimage.iso")
|
||||
@ -446,8 +485,7 @@ class SubcloudInstall(object):
|
||||
# Now that the bootimage.iso has been mounted, copy package_checksums to
|
||||
# pkg_file_src.
|
||||
try:
|
||||
pkg_file = os.path.join(temp_bootimage_mnt_dir,
|
||||
'package_checksums')
|
||||
pkg_file = os.path.join(temp_bootimage_mnt_dir, "package_checksums")
|
||||
LOG.info("Copying %s to %s", pkg_file, pkg_file_src)
|
||||
shutil.copy(pkg_file, pkg_file_src)
|
||||
|
||||
@ -462,8 +500,9 @@ class SubcloudInstall(object):
|
||||
if not os.path.exists(PACKAGE_LIST_PATH):
|
||||
os.mkdir(PACKAGE_LIST_PATH, 0o755)
|
||||
|
||||
package_list_file = os.path.join(PACKAGE_LIST_PATH,
|
||||
software_version + "_packages_list.txt")
|
||||
package_list_file = os.path.join(
|
||||
PACKAGE_LIST_PATH, software_version + "_packages_list.txt"
|
||||
)
|
||||
shutil.copy(pkg_file_src, package_list_file)
|
||||
except IOError:
|
||||
# bootimage.iso in /opt/dc-vault/<release-id>/ does not have the file.
|
||||
@ -473,13 +512,12 @@ class SubcloudInstall(object):
|
||||
raise Exception(msg)
|
||||
finally:
|
||||
# pylint: disable-next=not-callable
|
||||
subprocess.check_call(['umount', '-l', temp_bootimage_mnt_dir])
|
||||
subprocess.check_call(["umount", "-l", temp_bootimage_mnt_dir])
|
||||
os.rmdir(temp_bootimage_mnt_dir)
|
||||
|
||||
@staticmethod
|
||||
def is_serial_console(install_type):
|
||||
return (install_type is not None
|
||||
and install_type in SERIAL_CONSOLE_INSTALL_TYPES)
|
||||
return install_type is not None and install_type in SERIAL_CONSOLE_INSTALL_TYPES
|
||||
|
||||
def prep(self, override_path, payload):
|
||||
"""Update the iso image and create the config files for the subcloud"""
|
||||
@ -501,9 +539,9 @@ class SubcloudInstall(object):
|
||||
if k in payload:
|
||||
iso_values[k] = payload.get(k)
|
||||
|
||||
software_version = str(payload['software_version'])
|
||||
iso_values['software_version'] = payload['software_version']
|
||||
iso_values['image'] = payload['image']
|
||||
software_version = str(payload["software_version"])
|
||||
iso_values["software_version"] = payload["software_version"]
|
||||
iso_values["image"] = payload["image"]
|
||||
|
||||
override_path = os.path.join(override_path, self.name)
|
||||
if not os.path.isdir(override_path):
|
||||
@ -517,10 +555,11 @@ class SubcloudInstall(object):
|
||||
# Clean up iso directory if it already exists
|
||||
# This may happen if a previous installation attempt was abruptly
|
||||
# terminated
|
||||
iso_dir_path = os.path.join(self.www_iso_root, 'nodes', self.name)
|
||||
iso_dir_path = os.path.join(self.www_iso_root, "nodes", self.name)
|
||||
if os.path.isdir(iso_dir_path):
|
||||
LOG.info("Found preexisting iso dir for subcloud %s, cleaning up",
|
||||
self.name)
|
||||
LOG.info(
|
||||
"Found preexisting iso dir for subcloud %s, cleaning up", self.name
|
||||
)
|
||||
self.cleanup(software_version)
|
||||
|
||||
# Update the default iso image based on the install values
|
||||
@ -533,11 +572,12 @@ class SubcloudInstall(object):
|
||||
del payload[k]
|
||||
|
||||
# get the boot image url for bmc
|
||||
image_base_url = self.get_image_base_url(self.get_https_enabled(),
|
||||
self.sysinv_client)
|
||||
payload['image'] = os.path.join(image_base_url, 'iso',
|
||||
software_version, 'nodes',
|
||||
self.name, 'bootimage.iso')
|
||||
image_base_url = self.get_image_base_url(
|
||||
self.get_https_enabled(), self.sysinv_client
|
||||
)
|
||||
payload["image"] = os.path.join(
|
||||
image_base_url, "iso", software_version, "nodes", self.name, "bootimage.iso"
|
||||
)
|
||||
|
||||
# create the rvmc config file
|
||||
self.create_rvmc_config_file(override_path, payload)
|
||||
@ -560,26 +600,27 @@ class SubcloudInstall(object):
|
||||
pkg_file_dest = os.path.join(
|
||||
SUBCLOUD_ISO_DOWNLOAD_PATH,
|
||||
software_version,
|
||||
'nodes',
|
||||
"nodes",
|
||||
self.name,
|
||||
software_version + "_packages_list.txt")
|
||||
software_version + "_packages_list.txt",
|
||||
)
|
||||
|
||||
pkg_file_src = os.path.join(consts.SUBCLOUD_FEED_PATH,
|
||||
"rel-{version}".format(
|
||||
version=software_version),
|
||||
'package_checksums')
|
||||
pkg_file_src = os.path.join(
|
||||
consts.SUBCLOUD_FEED_PATH,
|
||||
"rel-{version}".format(version=software_version),
|
||||
"package_checksums",
|
||||
)
|
||||
|
||||
if not os.path.exists(pkg_file_src):
|
||||
# the file does not exist. copy it from the bootimage.
|
||||
self._copy_packages_list_from_bootimage(software_version,
|
||||
pkg_file_src)
|
||||
self._copy_packages_list_from_bootimage(software_version, pkg_file_src)
|
||||
|
||||
# since we now have package_checksums, copy to destination.
|
||||
shutil.copy(pkg_file_src, pkg_file_dest)
|
||||
|
||||
# remove the boot image url from the payload
|
||||
if 'image' in payload:
|
||||
del payload['image']
|
||||
if "image" in payload:
|
||||
del payload["image"]
|
||||
|
||||
# create the install override file
|
||||
self.create_install_override_file(override_path, payload)
|
||||
@ -619,8 +660,9 @@ class IpmiLogger(object):
|
||||
self.name = subcloud_name
|
||||
self.override_path = os.path.join(override_path, subcloud_name)
|
||||
# Note: will not exist yet, but is created before ipmicap_start:
|
||||
self.rvmc_config_file = os.path.join(self.override_path,
|
||||
consts.RVMC_CONFIG_FILE_NAME)
|
||||
self.rvmc_config_file = os.path.join(
|
||||
self.override_path, consts.RVMC_CONFIG_FILE_NAME
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_enabled(rvmc_debug_level):
|
||||
@ -647,9 +689,7 @@ class IpmiLogger(object):
|
||||
try:
|
||||
return int(rvmc_debug_level) >= RVMC_DEBUG_LEVEL_IPMI_CAPTURE
|
||||
except ValueError:
|
||||
LOG.exception(
|
||||
f"Invalid rvmc_debug_level in payload: '{rvmc_debug_level}'"
|
||||
)
|
||||
LOG.exception(f"Invalid rvmc_debug_level in payload: '{rvmc_debug_level}'")
|
||||
return False
|
||||
|
||||
def start_logging(self, log_file):
|
||||
@ -662,6 +702,7 @@ class IpmiLogger(object):
|
||||
Run this script in a separate thread so that we can wait for the
|
||||
process to end while not blocking the caller.
|
||||
"""
|
||||
|
||||
def ipmicap_start(log_file):
|
||||
"""Thread function: Invoke the IPMI capture script.
|
||||
|
||||
@ -670,10 +711,12 @@ class IpmiLogger(object):
|
||||
try:
|
||||
ipmi_cmd = [
|
||||
"/usr/local/bin/ipmicap.sh",
|
||||
"--force-deactivate", "--redirect",
|
||||
"--force-deactivate",
|
||||
"--redirect",
|
||||
"--rvmc-config",
|
||||
self.rvmc_config_file,
|
||||
"--log", log_file,
|
||||
"--log",
|
||||
log_file,
|
||||
]
|
||||
msg = "IPMI capture"
|
||||
|
||||
@ -684,7 +727,10 @@ class IpmiLogger(object):
|
||||
|
||||
LOG.info(
|
||||
"%s start %s: %s, pty:%s",
|
||||
msg, self.name, " ".join(ipmi_cmd), os.ttyname(slave_fd),
|
||||
msg,
|
||||
self.name,
|
||||
" ".join(ipmi_cmd),
|
||||
os.ttyname(slave_fd),
|
||||
)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
@ -699,14 +745,19 @@ class IpmiLogger(object):
|
||||
if output:
|
||||
LOG.info(
|
||||
"%s finished %s, output: %s",
|
||||
msg, self.name, output,
|
||||
msg,
|
||||
self.name,
|
||||
output,
|
||||
)
|
||||
else:
|
||||
LOG.info("%s finished %s", msg, self.name)
|
||||
else:
|
||||
LOG.warn(
|
||||
"%s failed %s, returncode: %s, output: %s",
|
||||
msg, self.name, result.returncode, output,
|
||||
msg,
|
||||
self.name,
|
||||
result.returncode,
|
||||
output,
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
@ -722,10 +773,7 @@ class IpmiLogger(object):
|
||||
LOG.exception(f"IPMI capture start failed: {self.name}")
|
||||
|
||||
try:
|
||||
capture_thread = threading.Thread(
|
||||
target=ipmicap_start,
|
||||
args=(log_file, )
|
||||
)
|
||||
capture_thread = threading.Thread(target=ipmicap_start, args=(log_file,))
|
||||
capture_thread.start()
|
||||
|
||||
except Exception:
|
||||
@ -751,13 +799,16 @@ class IpmiLogger(object):
|
||||
if result.returncode == 0:
|
||||
LOG.info(
|
||||
"%s %s, output: %s",
|
||||
msg, self.name,
|
||||
msg,
|
||||
self.name,
|
||||
result.stdout.decode("utf-8").replace("\n", ", "),
|
||||
)
|
||||
else:
|
||||
LOG.warn(
|
||||
"%s %s failed, returncode: %s, output: %s",
|
||||
msg, self.name, result.returncode,
|
||||
msg,
|
||||
self.name,
|
||||
result.returncode,
|
||||
result.stdout.decode("utf-8").replace("\n", ", "),
|
||||
)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2022 Wind River Systems, Inc.
|
||||
# Copyright (c) 2022, 2024 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@ -24,7 +24,7 @@ class SubprocessCleanup(object):
|
||||
to be cleaned up here.
|
||||
"""
|
||||
|
||||
LOCK_NAME = 'subprocess-cleanup'
|
||||
LOCK_NAME = "subprocess-cleanup"
|
||||
SUBPROCESS_GROUPS = {}
|
||||
|
||||
@staticmethod
|
||||
@ -37,36 +37,42 @@ class SubprocessCleanup(object):
|
||||
|
||||
@staticmethod
|
||||
@lockutils.synchronized(LOCK_NAME)
|
||||
def shutdown_cleanup(origin='service'):
|
||||
def shutdown_cleanup(origin="service"):
|
||||
SubprocessCleanup._shutdown_subprocess_groups(origin)
|
||||
|
||||
@staticmethod
|
||||
def _shutdown_subprocess_groups(origin):
|
||||
num_process_groups = len(SubprocessCleanup.SUBPROCESS_GROUPS)
|
||||
if num_process_groups > 0:
|
||||
LOG.warn("Shutting down %d process groups via %s",
|
||||
num_process_groups, origin)
|
||||
LOG.warn(
|
||||
"Shutting down %d process groups via %s", num_process_groups, origin
|
||||
)
|
||||
start_time = time.time()
|
||||
for _, subp in SubprocessCleanup.SUBPROCESS_GROUPS.items():
|
||||
kill_subprocess_group(subp)
|
||||
LOG.info("Time for %s child processes to exit: %s",
|
||||
num_process_groups,
|
||||
time.time() - start_time)
|
||||
LOG.info(
|
||||
"Time for %s child processes to exit: %s",
|
||||
num_process_groups,
|
||||
time.time() - start_time,
|
||||
)
|
||||
|
||||
|
||||
def kill_subprocess_group(subp, logmsg=None):
|
||||
"""Kill the subprocess and any children."""
|
||||
exitcode = subp.poll()
|
||||
if exitcode:
|
||||
LOG.info("kill_subprocess_tree: subprocess has already "
|
||||
"terminated, pid: %s, exitcode=%s", subp.pid, exitcode)
|
||||
LOG.info(
|
||||
"kill_subprocess_tree: subprocess has already "
|
||||
"terminated, pid: %s, exitcode=%s",
|
||||
subp.pid,
|
||||
exitcode,
|
||||
)
|
||||
return False
|
||||
|
||||
if logmsg:
|
||||
LOG.warn(logmsg)
|
||||
else:
|
||||
LOG.warn("Killing subprocess group for pid: %s, args: %s",
|
||||
subp.pid, subp.args)
|
||||
LOG.warn("Killing subprocess group for pid: %s, args: %s", subp.pid, subp.args)
|
||||
# Send a SIGTERM (normal kill). We do not verify if the processes
|
||||
# are shutdown (best-effort), since we don't want to wait around before
|
||||
# issueing a SIGKILL (fast shutdown)
|
||||
|
@ -20,21 +20,42 @@ from oslotest import base
|
||||
from dccommon.tests import utils
|
||||
|
||||
KEYSTONE_ENDPOINT_0 = [
|
||||
"9785cc7f99b6469ba6fe89bd8d5b9072", "NULL", "admin",
|
||||
"7d48ddb964034eb588e557b976d11cdf", "http://[fd01:1::2]:9292", "{}", True,
|
||||
"SystemController"
|
||||
"9785cc7f99b6469ba6fe89bd8d5b9072",
|
||||
"NULL",
|
||||
"admin",
|
||||
"7d48ddb964034eb588e557b976d11cdf",
|
||||
"http://[fd01:1::2]:9292",
|
||||
"{}",
|
||||
True,
|
||||
"SystemController",
|
||||
]
|
||||
|
||||
ROUTE_0 = [
|
||||
"2018-04-11 17:01:49.654734", "NULL", "NULL", 1,
|
||||
"3a07ca95-d6fe-48cb-9393-b949f800b552", 6,
|
||||
"fd01:2::", 64, "fd01:1::1", 1, 9
|
||||
"2018-04-11 17:01:49.654734",
|
||||
"NULL",
|
||||
"NULL",
|
||||
1,
|
||||
"3a07ca95-d6fe-48cb-9393-b949f800b552",
|
||||
6,
|
||||
"fd01:2::",
|
||||
64,
|
||||
"fd01:1::1",
|
||||
1,
|
||||
9,
|
||||
]
|
||||
|
||||
ROUTE_1 = [
|
||||
"2018-04-11 17:01:49.654734", "NULL", "NULL", 1,
|
||||
"3a07ca95-d6fe-48cb-9393-b949f800b552", 6,
|
||||
"fd01:3::", 64, "fd01:1::1", 1, 9
|
||||
"2018-04-11 17:01:49.654734",
|
||||
"NULL",
|
||||
"NULL",
|
||||
1,
|
||||
"3a07ca95-d6fe-48cb-9393-b949f800b552",
|
||||
6,
|
||||
"fd01:3::",
|
||||
64,
|
||||
"fd01:1::1",
|
||||
1,
|
||||
9,
|
||||
]
|
||||
|
||||
|
||||
|
@ -14,36 +14,38 @@ from dccommon.drivers.openstack import dcmanager_v1
|
||||
from dccommon import exceptions as dccommon_exceptions
|
||||
from dccommon.tests import base
|
||||
|
||||
FAKE_ID = '1'
|
||||
SUBCLOUD_NAME = 'Subcloud1'
|
||||
SUBCLOUD_BOOTSTRAP_ADDRESS = '192.168.0.10'
|
||||
SUBCLOUD_BOOTSTRAP_VALUE_PATH = '/tmp/test_subcloud_bootstrap_value.yaml'
|
||||
SUBCLOUD_GROUP_NAME = 'SubcloudGroup1'
|
||||
FAKE_ID = "1"
|
||||
SUBCLOUD_NAME = "Subcloud1"
|
||||
SUBCLOUD_BOOTSTRAP_ADDRESS = "192.168.0.10"
|
||||
SUBCLOUD_BOOTSTRAP_VALUE_PATH = "/tmp/test_subcloud_bootstrap_value.yaml"
|
||||
SUBCLOUD_GROUP_NAME = "SubcloudGroup1"
|
||||
SYSTEM_PEER_UUID = str(uuid.uuid4())
|
||||
SYSTEM_PEER_NAME = 'SystemPeer1'
|
||||
SYSTEM_PEER_NAME = "SystemPeer1"
|
||||
SUBCLOUD_PEER_GROUP_ID = 1
|
||||
SUBCLOUD_PEER_GROUP_NAME = 'SubcloudPeerGroup1'
|
||||
SUBCLOUD_PEER_GROUP_NAME = "SubcloudPeerGroup1"
|
||||
|
||||
FAKE_ENDPOINT = 'http://128.128.1.1:8119/v1.0'
|
||||
FAKE_TOKEN = 'token'
|
||||
FAKE_ENDPOINT = "http://128.128.1.1:8119/v1.0"
|
||||
FAKE_TOKEN = "token"
|
||||
FAKE_TIMEOUT = 600
|
||||
|
||||
FAKE_SUBCLOUD_DATA = {"id": FAKE_ID,
|
||||
"name": SUBCLOUD_NAME,
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
"software-version": "22.12",
|
||||
"management-state": "managed",
|
||||
"deploy-status": "complete",
|
||||
"management-subnet": "192.168.101.0/24",
|
||||
"management-start-ip": "192.168.101.2",
|
||||
"management-end-ip": "192.168.101.50",
|
||||
"management-gateway-ip": "192.168.101.1",
|
||||
"systemcontroller-gateway-ip": "192.168.204.101",
|
||||
"group-id": 1,
|
||||
"peer-group-id": SUBCLOUD_PEER_GROUP_ID,
|
||||
"rehome-data": "null",
|
||||
"availability-status": "disabled"}
|
||||
FAKE_SUBCLOUD_DATA = {
|
||||
"id": FAKE_ID,
|
||||
"name": SUBCLOUD_NAME,
|
||||
"description": "subcloud1 description",
|
||||
"location": "subcloud1 location",
|
||||
"software-version": "22.12",
|
||||
"management-state": "managed",
|
||||
"deploy-status": "complete",
|
||||
"management-subnet": "192.168.101.0/24",
|
||||
"management-start-ip": "192.168.101.2",
|
||||
"management-end-ip": "192.168.101.50",
|
||||
"management-gateway-ip": "192.168.101.1",
|
||||
"systemcontroller-gateway-ip": "192.168.204.101",
|
||||
"group-id": 1,
|
||||
"peer-group-id": SUBCLOUD_PEER_GROUP_ID,
|
||||
"rehome-data": "null",
|
||||
"availability-status": "disabled",
|
||||
}
|
||||
|
||||
FAKE_SUBCLOUD_PEER_GROUP_DATA = {
|
||||
"id": SUBCLOUD_PEER_GROUP_ID,
|
||||
@ -52,7 +54,7 @@ FAKE_SUBCLOUD_PEER_GROUP_DATA = {
|
||||
"system-leader-name": SYSTEM_PEER_NAME,
|
||||
"max-subcloud-rehoming": 1,
|
||||
"group-state": "enabled",
|
||||
"group-priority": 1
|
||||
"group-priority": 1,
|
||||
}
|
||||
|
||||
|
||||
@ -60,8 +62,8 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
def setUp(self):
|
||||
super(TestDcmanagerClient, self).setUp()
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
@ -70,16 +72,17 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_subcloud = client.get_subcloud(SUBCLOUD_NAME)
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subcloud.get('name'))
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subcloud.get("name"))
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_not_found(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 404
|
||||
@ -88,78 +91,84 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
self.assertRaises(dccommon_exceptions.SubcloudNotFound,
|
||||
client.get_subcloud, SUBCLOUD_NAME)
|
||||
self.assertRaises(
|
||||
dccommon_exceptions.SubcloudNotFound, client.get_subcloud, SUBCLOUD_NAME
|
||||
)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_list(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"subclouds": [FAKE_SUBCLOUD_DATA]}
|
||||
mock_response.json.return_value = {"subclouds": [FAKE_SUBCLOUD_DATA]}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_subclouds = client.get_subcloud_list()
|
||||
self.assertEqual(1, len(actual_subclouds))
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subclouds[0].get('name'))
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subclouds[0].get("name"))
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_group_list(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"subcloud_groups": [{"name": SUBCLOUD_GROUP_NAME}]}
|
||||
"subcloud_groups": [{"name": SUBCLOUD_GROUP_NAME}]
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_subcloud_groups = client.get_subcloud_group_list()
|
||||
self.assertEqual(1, len(actual_subcloud_groups))
|
||||
self.assertEqual(SUBCLOUD_GROUP_NAME,
|
||||
actual_subcloud_groups[0].get('name'))
|
||||
self.assertEqual(SUBCLOUD_GROUP_NAME, actual_subcloud_groups[0].get("name"))
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_peer_group_list(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"subcloud_peer_groups": [FAKE_SUBCLOUD_PEER_GROUP_DATA]}
|
||||
"subcloud_peer_groups": [FAKE_SUBCLOUD_PEER_GROUP_DATA]
|
||||
}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_peer_group = client.get_subcloud_peer_group_list()
|
||||
self.assertEqual(1, len(actual_peer_group))
|
||||
self.assertEqual(SUBCLOUD_PEER_GROUP_NAME,
|
||||
actual_peer_group[0].get('peer-group-name'))
|
||||
self.assertEqual(
|
||||
SUBCLOUD_PEER_GROUP_NAME, actual_peer_group[0].get("peer-group-name")
|
||||
)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_peer_group(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
@ -168,21 +177,20 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_peer_group = client.get_subcloud_peer_group(
|
||||
SUBCLOUD_PEER_GROUP_NAME)
|
||||
self.assertEqual(SUBCLOUD_PEER_GROUP_NAME,
|
||||
actual_peer_group.get('peer-group-name'))
|
||||
actual_peer_group = client.get_subcloud_peer_group(SUBCLOUD_PEER_GROUP_NAME)
|
||||
self.assertEqual(
|
||||
SUBCLOUD_PEER_GROUP_NAME, actual_peer_group.get("peer-group-name")
|
||||
)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
def test_get_subcloud_peer_group_not_found(
|
||||
self, mock_client_init, mock_get
|
||||
):
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_peer_group_not_found(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 404
|
||||
mock_response.text = "Subcloud Peer Group not found"
|
||||
@ -190,38 +198,42 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
self.assertRaises(dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.get_subcloud_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME)
|
||||
self.assertRaises(
|
||||
dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.get_subcloud_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME,
|
||||
)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_list_by_peer_group(self, mock_client_init, mock_get):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
"subclouds": [FAKE_SUBCLOUD_DATA]}
|
||||
mock_response.json.return_value = {"subclouds": [FAKE_SUBCLOUD_DATA]}
|
||||
mock_get.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_subclouds = client.get_subcloud_list_by_peer_group(
|
||||
SUBCLOUD_PEER_GROUP_NAME)
|
||||
SUBCLOUD_PEER_GROUP_NAME
|
||||
)
|
||||
self.assertEqual(1, len(actual_subclouds))
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subclouds[0].get('name'))
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subclouds[0].get("name"))
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.get")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_get_subcloud_list_by_peer_group_not_found(
|
||||
self, mock_client_init, mock_get
|
||||
):
|
||||
@ -232,21 +244,22 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
self.assertRaises(dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.get_subcloud_list_by_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME)
|
||||
self.assertRaises(
|
||||
dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.get_subcloud_list_by_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME,
|
||||
)
|
||||
|
||||
@mock.patch('requests.post')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.post")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_add_subcloud_peer_group(self, mock_client_init, mock_post):
|
||||
peer_group_kwargs = {
|
||||
'peer-group-name': SUBCLOUD_PEER_GROUP_NAME
|
||||
}
|
||||
peer_group_kwargs = {"peer-group-name": SUBCLOUD_PEER_GROUP_NAME}
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = FAKE_SUBCLOUD_PEER_GROUP_DATA
|
||||
@ -254,20 +267,20 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
actual_peer_group = client.add_subcloud_peer_group(
|
||||
**peer_group_kwargs)
|
||||
self.assertEqual(SUBCLOUD_PEER_GROUP_NAME,
|
||||
actual_peer_group.get('peer-group-name'))
|
||||
actual_peer_group = client.add_subcloud_peer_group(**peer_group_kwargs)
|
||||
self.assertEqual(
|
||||
SUBCLOUD_PEER_GROUP_NAME, actual_peer_group.get("peer-group-name")
|
||||
)
|
||||
|
||||
@mock.patch('requests.post')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
def test_add_subcloud_with_secondary_status(self, mock_client_init,
|
||||
mock_post):
|
||||
@mock.patch("requests.post")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_add_subcloud_with_secondary_status(self, mock_client_init, mock_post):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = FAKE_SUBCLOUD_DATA
|
||||
@ -275,59 +288,54 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
# create the cache file for subcloud create
|
||||
yaml_data = yaml.dump(FAKE_SUBCLOUD_DATA)
|
||||
with open(SUBCLOUD_BOOTSTRAP_VALUE_PATH, 'w') as file:
|
||||
with open(SUBCLOUD_BOOTSTRAP_VALUE_PATH, "w") as file:
|
||||
file.write(yaml_data)
|
||||
|
||||
subcloud_kwargs = {
|
||||
"data": {
|
||||
"bootstrap-address": SUBCLOUD_BOOTSTRAP_ADDRESS
|
||||
},
|
||||
"files": {
|
||||
"bootstrap_values": SUBCLOUD_BOOTSTRAP_VALUE_PATH
|
||||
}
|
||||
"data": {"bootstrap-address": SUBCLOUD_BOOTSTRAP_ADDRESS},
|
||||
"files": {"bootstrap_values": SUBCLOUD_BOOTSTRAP_VALUE_PATH},
|
||||
}
|
||||
actual_subcloud = client.add_subcloud_with_secondary_status(
|
||||
**subcloud_kwargs)
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subcloud.get('name'))
|
||||
actual_subcloud = client.add_subcloud_with_secondary_status(**subcloud_kwargs)
|
||||
self.assertEqual(SUBCLOUD_NAME, actual_subcloud.get("name"))
|
||||
|
||||
# purge the cache file
|
||||
os.remove(SUBCLOUD_BOOTSTRAP_VALUE_PATH)
|
||||
|
||||
@mock.patch('requests.delete')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.delete")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_delete_subcloud_peer_group(self, mock_client_init, mock_delete):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = ''
|
||||
mock_response.json.return_value = ""
|
||||
mock_delete.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
result = client.delete_subcloud_peer_group(SUBCLOUD_PEER_GROUP_NAME)
|
||||
mock_delete.assert_called_once_with(
|
||||
FAKE_ENDPOINT + '/subcloud-peer-groups/' +
|
||||
SUBCLOUD_PEER_GROUP_NAME,
|
||||
FAKE_ENDPOINT + "/subcloud-peer-groups/" + SUBCLOUD_PEER_GROUP_NAME,
|
||||
headers={"X-Auth-Token": FAKE_TOKEN},
|
||||
timeout=FAKE_TIMEOUT
|
||||
timeout=FAKE_TIMEOUT,
|
||||
)
|
||||
self.assertEqual(result, '')
|
||||
self.assertEqual(result, "")
|
||||
|
||||
@mock.patch('requests.delete')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
def test_delete_subcloud_peer_group_not_found(self, mock_client_init,
|
||||
mock_delete):
|
||||
@mock.patch("requests.delete")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_delete_subcloud_peer_group_not_found(self, mock_client_init, mock_delete):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 404
|
||||
mock_response.text = "Subcloud Peer Group not found"
|
||||
@ -335,43 +343,48 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
self.assertRaises(dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.delete_subcloud_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME)
|
||||
self.assertRaises(
|
||||
dccommon_exceptions.SubcloudPeerGroupNotFound,
|
||||
client.delete_subcloud_peer_group,
|
||||
SUBCLOUD_PEER_GROUP_NAME,
|
||||
)
|
||||
|
||||
@mock.patch('requests.delete')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
@mock.patch("requests.delete")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_delete_subcloud(self, mock_client_init, mock_delete):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = ''
|
||||
mock_response.json.return_value = ""
|
||||
mock_delete.return_value = mock_response
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
result = client.delete_subcloud(SUBCLOUD_NAME)
|
||||
mock_delete.assert_called_once_with(
|
||||
FAKE_ENDPOINT + '/subclouds/' + SUBCLOUD_NAME,
|
||||
headers={"X-Auth-Token": FAKE_TOKEN,
|
||||
"User-Agent": dccommon_consts.DCMANAGER_V1_HTTP_AGENT},
|
||||
timeout=FAKE_TIMEOUT
|
||||
FAKE_ENDPOINT + "/subclouds/" + SUBCLOUD_NAME,
|
||||
headers={
|
||||
"X-Auth-Token": FAKE_TOKEN,
|
||||
"User-Agent": dccommon_consts.DCMANAGER_V1_HTTP_AGENT,
|
||||
},
|
||||
timeout=FAKE_TIMEOUT,
|
||||
)
|
||||
self.assertEqual(result, '')
|
||||
self.assertEqual(result, "")
|
||||
|
||||
@mock.patch('requests.delete')
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, '__init__')
|
||||
def test_delete_subcloud_not_found(self, mock_client_init,
|
||||
mock_delete):
|
||||
@mock.patch("requests.delete")
|
||||
@mock.patch.object(dcmanager_v1.DcmanagerClient, "__init__")
|
||||
def test_delete_subcloud_not_found(self, mock_client_init, mock_delete):
|
||||
mock_response = mock.MagicMock()
|
||||
mock_response.status_code = 404
|
||||
mock_response.text = "Subcloud not found"
|
||||
@ -379,11 +392,12 @@ class TestDcmanagerClient(base.DCCommonTestCase):
|
||||
|
||||
mock_client_init.return_value = None
|
||||
client = dcmanager_v1.DcmanagerClient(
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None)
|
||||
dccommon_consts.SYSTEM_CONTROLLER_NAME, None
|
||||
)
|
||||
client.endpoint = FAKE_ENDPOINT
|
||||
client.token = FAKE_TOKEN
|
||||
client.timeout = FAKE_TIMEOUT
|
||||
|
||||
self.assertRaises(dccommon_exceptions.SubcloudNotFound,
|
||||
client.delete_subcloud,
|
||||
SUBCLOUD_NAME)
|
||||
self.assertRaises(
|
||||
dccommon_exceptions.SubcloudNotFound, client.delete_subcloud, SUBCLOUD_NAME
|
||||
)
|
||||
|
@ -18,10 +18,7 @@ from dccommon.drivers.openstack import keystone_v3
|
||||
from dccommon.tests import base
|
||||
from dccommon.tests import utils
|
||||
|
||||
FAKE_SERVICE = [
|
||||
'endpoint_volume',
|
||||
'endpoint_network'
|
||||
]
|
||||
FAKE_SERVICE = ["endpoint_volume", "endpoint_network"]
|
||||
|
||||
|
||||
class Project(object):
|
||||
@ -49,72 +46,73 @@ class TestKeystoneClient(base.DCCommonTestCase):
|
||||
super(TestKeystoneClient, self).setUp()
|
||||
self.ctx = utils.dummy_context()
|
||||
|
||||
@mock.patch.object(keystone_v3, 'KeystoneClient')
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
@mock.patch.object(keystone_v3, "KeystoneClient")
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_init(self, mock_endpoint_cache, mock_keystone):
|
||||
mock_keystone().services_list = FAKE_SERVICE
|
||||
mock_endpoint_cache().admin_session = 'fake_session'
|
||||
mock_endpoint_cache().keystone_client = 'fake_key_client'
|
||||
mock_endpoint_cache().admin_session = "fake_session"
|
||||
mock_endpoint_cache().keystone_client = "fake_key_client"
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
self.assertIsNotNone(key_client.keystone_client)
|
||||
self.assertEqual(key_client.services_list,
|
||||
FAKE_SERVICE)
|
||||
self.assertEqual(key_client.services_list, FAKE_SERVICE)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'KeystoneClient')
|
||||
@mock.patch.object(keystone_v3, "KeystoneClient")
|
||||
def test_is_service_enabled(self, mock_keystone):
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
mock_keystone().is_service_enabled.return_value = True
|
||||
network_enabled = key_client.is_service_enabled('network')
|
||||
network_enabled = key_client.is_service_enabled("network")
|
||||
self.assertEqual(network_enabled, True)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_get_enabled_projects(self, mock_endpoint_cache):
|
||||
p1 = Project('proj1', '123')
|
||||
p2 = Project('proj2', '456', False)
|
||||
p1 = Project("proj1", "123")
|
||||
p2 = Project("proj2", "456", False)
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
mock_endpoint_cache().keystone_client.projects.list.return_value =\
|
||||
[p1, p2]
|
||||
mock_endpoint_cache().keystone_client.projects.list.return_value = [p1, p2]
|
||||
project_list = key_client.get_enabled_projects()
|
||||
self.assertIn(p1.id, project_list)
|
||||
self.assertNotIn(p2.id, project_list)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_get_enabled_users(self, mock_endpoint_cache):
|
||||
u1 = User('user1', '123')
|
||||
u2 = User('user2', '456', False)
|
||||
u1 = User("user1", "123")
|
||||
u2 = User("user2", "456", False)
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
mock_endpoint_cache().keystone_client.users.list.return_value =\
|
||||
[u1, u2]
|
||||
mock_endpoint_cache().keystone_client.users.list.return_value = [u1, u2]
|
||||
users_list = key_client.get_enabled_users()
|
||||
self.assertIn(u1.id, users_list)
|
||||
self.assertNotIn(u2.id, users_list)
|
||||
|
||||
@mock.patch.object(keystone_v3.endpoint_filter, 'EndpointFilterManager')
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
def test_get_filtered_region(self, mock_endpoint_cache,
|
||||
mock_endpoint_filter_manager):
|
||||
endpoint_1 = FakeEndpoint('endpoint1', 'regionOne')
|
||||
endpoint_2 = FakeEndpoint('endpoint2', 'regionTwo')
|
||||
@mock.patch.object(keystone_v3.endpoint_filter, "EndpointFilterManager")
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_get_filtered_region(
|
||||
self, mock_endpoint_cache, mock_endpoint_filter_manager
|
||||
):
|
||||
endpoint_1 = FakeEndpoint("endpoint1", "regionOne")
|
||||
endpoint_2 = FakeEndpoint("endpoint2", "regionTwo")
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
mock_endpoint_filter_manager(). \
|
||||
list_endpoints_for_project.return_value = [endpoint_1, endpoint_2]
|
||||
region_list = key_client.get_filtered_region('fake_project')
|
||||
self.assertIn('regionOne', region_list)
|
||||
self.assertIn('regionTwo', region_list)
|
||||
mock_endpoint_filter_manager().list_endpoints_for_project.return_value = [
|
||||
endpoint_1,
|
||||
endpoint_2,
|
||||
]
|
||||
region_list = key_client.get_filtered_region("fake_project")
|
||||
self.assertIn("regionOne", region_list)
|
||||
self.assertIn("regionTwo", region_list)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_delete_endpoints(self, mock_endpoint_cache):
|
||||
endpoint_1 = FakeEndpoint('endpoint1', 'regionOne')
|
||||
mock_endpoint_cache().keystone_client.endpoints.list.return_value = \
|
||||
[endpoint_1]
|
||||
endpoint_1 = FakeEndpoint("endpoint1", "regionOne")
|
||||
mock_endpoint_cache().keystone_client.endpoints.list.return_value = [endpoint_1]
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
key_client.delete_endpoints('regionOne')
|
||||
mock_endpoint_cache().keystone_client.endpoints.delete.\
|
||||
assert_called_with(endpoint_1)
|
||||
key_client.delete_endpoints("regionOne")
|
||||
mock_endpoint_cache().keystone_client.endpoints.delete.assert_called_with(
|
||||
endpoint_1
|
||||
)
|
||||
|
||||
@mock.patch.object(keystone_v3, 'EndpointCache')
|
||||
@mock.patch.object(keystone_v3, "EndpointCache")
|
||||
def test_delete_region(self, mock_endpoint_cache):
|
||||
key_client = keystone_v3.KeystoneClient()
|
||||
key_client.delete_region('regionOne')
|
||||
mock_endpoint_cache().keystone_client.regions.delete.\
|
||||
assert_called_with('regionOne')
|
||||
key_client.delete_region("regionOne")
|
||||
mock_endpoint_cache().keystone_client.regions.delete.assert_called_with(
|
||||
"regionOne"
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -21,10 +21,10 @@ from dccommon.tests import base
|
||||
|
||||
class TestOpenStackDriver(base.DCCommonTestCase):
|
||||
|
||||
@mock.patch.object(sdk, 'KeystoneClient')
|
||||
@mock.patch.object(sdk.OpenStackDriver, '_is_token_valid')
|
||||
@mock.patch.object(sdk, "KeystoneClient")
|
||||
@mock.patch.object(sdk.OpenStackDriver, "_is_token_valid")
|
||||
def test_init(self, mock_keystone_client, mock_is_token_valid):
|
||||
region_name = 'subcloud1'
|
||||
region_name = "subcloud1"
|
||||
|
||||
os_client = sdk.OpenStackDriver(region_name, region_clients=None)
|
||||
self.assertIsNotNone(os_client)
|
||||
@ -32,9 +32,13 @@ class TestOpenStackDriver(base.DCCommonTestCase):
|
||||
self.assertIsNotNone(new_keystone_client)
|
||||
mock_is_token_valid(region_name).return_value = True
|
||||
cached_keystone_client = sdk.OpenStackDriver(
|
||||
region_name, region_clients=None).keystone_client
|
||||
region_name, region_clients=None
|
||||
).keystone_client
|
||||
self.assertEqual(new_keystone_client, cached_keystone_client)
|
||||
|
||||
self.assertRaises(exceptions.InvalidInputError,
|
||||
sdk.OpenStackDriver,
|
||||
region_name, region_clients=['fake_client'])
|
||||
self.assertRaises(
|
||||
exceptions.InvalidInputError,
|
||||
sdk.OpenStackDriver,
|
||||
region_name,
|
||||
region_clients=["fake_client"],
|
||||
)
|
||||
|
@ -70,28 +70,21 @@ SHOW_RESPONSE = {
|
||||
"packages": [],
|
||||
}
|
||||
|
||||
ERROR_RESPONSE = {
|
||||
"error": "something went wrong"
|
||||
}
|
||||
ERROR_RESPONSE = {"error": "something went wrong"}
|
||||
|
||||
INFO_RESPONSE = {
|
||||
"info": "Ok"
|
||||
}
|
||||
INFO_RESPONSE = {"info": "Ok"}
|
||||
|
||||
URLS = [
|
||||
"/deploy",
|
||||
"/commit_patch"
|
||||
]
|
||||
URLS = ["/deploy", "/commit_patch"]
|
||||
|
||||
|
||||
def mocked_requests_success(*args, **kwargs):
|
||||
response_content = None
|
||||
|
||||
if args[0].endswith('/release'):
|
||||
if args[0].endswith("/release"):
|
||||
response_content = json.dumps(LIST_RESPONSE)
|
||||
elif args[0].endswith("/release/DC.1"):
|
||||
response_content = json.dumps(SHOW_RESPONSE)
|
||||
elif args[0].endswith('/release/DC.1/DC.2'):
|
||||
elif args[0].endswith("/release/DC.1/DC.2"):
|
||||
response_content = json.dumps(INFO_RESPONSE)
|
||||
elif any([url in args[0] for url in URLS]):
|
||||
response_content = json.dumps(INFO_RESPONSE)
|
||||
@ -122,13 +115,13 @@ class TestSoftwareClient(base.DCCommonTestCase):
|
||||
session=mock.MagicMock(),
|
||||
)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch("requests.get")
|
||||
def test_list_success(self, mock_get):
|
||||
mock_get.side_effect = mocked_requests_success
|
||||
response = self.software_client.list()
|
||||
self.assertEqual(response, CLIENT_LIST_RESPONSE)
|
||||
|
||||
@mock.patch('requests.get')
|
||||
@mock.patch("requests.get")
|
||||
def test_list_failure(self, mock_get):
|
||||
mock_get.side_effect = mocked_requests_failure
|
||||
exc = self.assertRaises(exceptions.ApiException, self.software_client.list)
|
||||
@ -144,39 +137,39 @@ class TestSoftwareClient(base.DCCommonTestCase):
|
||||
@mock.patch("requests.get")
|
||||
def test_show_failure(self, mock_get):
|
||||
mock_get.side_effect = mocked_requests_failure
|
||||
release = 'DC.1'
|
||||
release = "DC.1"
|
||||
exc = self.assertRaises(
|
||||
exceptions.ApiException, self.software_client.show, release
|
||||
)
|
||||
self.assertTrue("Show failed with status code: 500" in str(exc))
|
||||
|
||||
@mock.patch('requests.delete')
|
||||
@mock.patch("requests.delete")
|
||||
def test_delete_success(self, mock_delete):
|
||||
mock_delete.side_effect = mocked_requests_success
|
||||
releases = ['DC.1', 'DC.2']
|
||||
releases = ["DC.1", "DC.2"]
|
||||
response = self.software_client.delete(releases)
|
||||
self.assertEqual(response, INFO_RESPONSE)
|
||||
|
||||
@mock.patch("requests.delete")
|
||||
def test_delete_failure(self, mock_delete):
|
||||
mock_delete.side_effect = mocked_requests_failure
|
||||
releases = ['DC.1', 'DC.2']
|
||||
releases = ["DC.1", "DC.2"]
|
||||
exc = self.assertRaises(
|
||||
exceptions.ApiException, self.software_client.delete, releases
|
||||
)
|
||||
self.assertTrue("Delete failed with status code: 500" in str(exc))
|
||||
|
||||
@mock.patch('requests.post')
|
||||
@mock.patch("requests.post")
|
||||
def test_commit_patch_success(self, mock_post):
|
||||
mock_post.side_effect = mocked_requests_success
|
||||
releases = ['DC.1', 'DC.2']
|
||||
releases = ["DC.1", "DC.2"]
|
||||
response = self.software_client.commit_patch(releases)
|
||||
self.assertEqual(response, INFO_RESPONSE)
|
||||
|
||||
@mock.patch('requests.post')
|
||||
@mock.patch("requests.post")
|
||||
def test_commit_patch_failure(self, mock_post):
|
||||
mock_post.side_effect = mocked_requests_failure
|
||||
releases = ['DC.1', 'DC.2']
|
||||
releases = ["DC.1", "DC.2"]
|
||||
exc = self.assertRaises(
|
||||
exceptions.ApiException, self.software_client.commit_patch, releases
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (c) 2017-2023 Wind River Systems, Inc.
|
||||
# Copyright (c) 2017-2024 Wind River Systems, Inc.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -45,11 +45,11 @@ class FakeAddressPool(object):
|
||||
|
||||
class FakeRoute(object):
|
||||
def __init__(self, data):
|
||||
self.uuid = data['uuid']
|
||||
self.network = data['network']
|
||||
self.prefix = data['prefix']
|
||||
self.gateway = data['gateway']
|
||||
self.metric = data['metric']
|
||||
self.uuid = data["uuid"]
|
||||
self.network = data["network"]
|
||||
self.prefix = data["prefix"]
|
||||
self.gateway = data["gateway"]
|
||||
self.metric = data["metric"]
|
||||
|
||||
|
||||
class TestSysinvClient(base.DCCommonTestCase):
|
||||
@ -57,42 +57,46 @@ class TestSysinvClient(base.DCCommonTestCase):
|
||||
super(TestSysinvClient, self).setUp()
|
||||
self.ctx = utils.dummy_context()
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_get_controller_hosts(self, mock_sysinvclient_init):
|
||||
controller_list = ['controller-0', 'controller-1']
|
||||
controller_list = ["controller-0", "controller-1"]
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.ihost.list_personality = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.ihost.list_personality.return_value = \
|
||||
sysinv_client.sysinv_client.ihost.list_personality.return_value = (
|
||||
controller_list
|
||||
)
|
||||
controllers = sysinv_client.get_controller_hosts()
|
||||
self.assertEqual(controller_list, controllers)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_get_management_interface(self, mock_sysinvclient_init):
|
||||
interface = FakeInterface('interface', 'uuid')
|
||||
interface_network = FakeInterfaceNetwork('mgmt', 'interface')
|
||||
interface = FakeInterface("interface", "uuid")
|
||||
interface_network = FakeInterfaceNetwork("mgmt", "interface")
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.iinterface.list = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.iinterface.list.return_value = [interface]
|
||||
sysinv_client.sysinv_client.interface_network.list_by_interface.\
|
||||
return_value = [interface_network]
|
||||
management_interface = sysinv_client.get_management_interface(
|
||||
'hostname')
|
||||
sysinv_client.sysinv_client.interface_network.list_by_interface.return_value = [
|
||||
interface_network
|
||||
]
|
||||
management_interface = sysinv_client.get_management_interface("hostname")
|
||||
self.assertEqual(interface, management_interface)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_get_management_address_pool(self, mock_sysinvclient_init):
|
||||
network = FakeNetwork('mgmt', 'uuid')
|
||||
pool = FakeAddressPool('uuid')
|
||||
network = FakeNetwork("mgmt", "uuid")
|
||||
pool = FakeAddressPool("uuid")
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.network.list = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.network.list.return_value = [network]
|
||||
@ -101,29 +105,31 @@ class TestSysinvClient(base.DCCommonTestCase):
|
||||
management_pool = sysinv_client.get_management_address_pool()
|
||||
self.assertEqual(pool, management_pool)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_get_admin_interface(self, mock_sysinvclient_init):
|
||||
interface = FakeInterface('interface', 'uuid')
|
||||
interface_network = FakeInterfaceNetwork('admin', 'interface')
|
||||
interface = FakeInterface("interface", "uuid")
|
||||
interface_network = FakeInterfaceNetwork("admin", "interface")
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.iinterface.list = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.iinterface.list.return_value = [interface]
|
||||
sysinv_client.sysinv_client.interface_network.list_by_interface.\
|
||||
return_value = [interface_network]
|
||||
admin_interface = sysinv_client.get_admin_interface(
|
||||
'hostname')
|
||||
sysinv_client.sysinv_client.interface_network.list_by_interface.return_value = [
|
||||
interface_network
|
||||
]
|
||||
admin_interface = sysinv_client.get_admin_interface("hostname")
|
||||
self.assertEqual(interface, admin_interface)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_get_admin_address_pool(self, mock_sysinvclient_init):
|
||||
network = FakeNetwork('admin', 'uuid')
|
||||
pool = FakeAddressPool('uuid')
|
||||
network = FakeNetwork("admin", "uuid")
|
||||
pool = FakeAddressPool("uuid")
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.network.list = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.network.list.return_value = [network]
|
||||
@ -132,60 +138,76 @@ class TestSysinvClient(base.DCCommonTestCase):
|
||||
admin_pool = sysinv_client.get_admin_address_pool()
|
||||
self.assertEqual(pool, admin_pool)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_create_route(self, mock_sysinvclient_init):
|
||||
fake_route = utils.create_route_dict(base.ROUTE_0)
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.route.create = mock.MagicMock()
|
||||
sysinv_client.create_route(fake_route['uuid'],
|
||||
fake_route['network'],
|
||||
fake_route['prefix'],
|
||||
fake_route['gateway'],
|
||||
fake_route['metric'])
|
||||
sysinv_client.create_route(
|
||||
fake_route["uuid"],
|
||||
fake_route["network"],
|
||||
fake_route["prefix"],
|
||||
fake_route["gateway"],
|
||||
fake_route["metric"],
|
||||
)
|
||||
sysinv_client.sysinv_client.route.create.assert_called_with(
|
||||
interface_uuid=fake_route['uuid'],
|
||||
network=fake_route['network'], prefix=fake_route['prefix'],
|
||||
gateway=fake_route['gateway'], metric=fake_route['metric'])
|
||||
interface_uuid=fake_route["uuid"],
|
||||
network=fake_route["network"],
|
||||
prefix=fake_route["prefix"],
|
||||
gateway=fake_route["gateway"],
|
||||
metric=fake_route["metric"],
|
||||
)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_delete_route(self, mock_sysinvclient_init):
|
||||
fake_route = utils.create_route_dict(base.ROUTE_0)
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.route.delete = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.route.list_by_interface = mock.MagicMock()
|
||||
existing_route_0 = FakeRoute(utils.create_route_dict(base.ROUTE_0))
|
||||
existing_route_1 = FakeRoute(utils.create_route_dict(base.ROUTE_1))
|
||||
sysinv_client.sysinv_client.route.list_by_interface.return_value = [
|
||||
existing_route_0, existing_route_1]
|
||||
sysinv_client.delete_route(fake_route['uuid'],
|
||||
fake_route['network'],
|
||||
fake_route['prefix'],
|
||||
fake_route['gateway'],
|
||||
fake_route['metric'])
|
||||
existing_route_0,
|
||||
existing_route_1,
|
||||
]
|
||||
sysinv_client.delete_route(
|
||||
fake_route["uuid"],
|
||||
fake_route["network"],
|
||||
fake_route["prefix"],
|
||||
fake_route["gateway"],
|
||||
fake_route["metric"],
|
||||
)
|
||||
sysinv_client.sysinv_client.route.delete.assert_called_with(
|
||||
existing_route_0.uuid)
|
||||
existing_route_0.uuid
|
||||
)
|
||||
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, '__init__')
|
||||
@mock.patch.object(sysinv_v1.SysinvClient, "__init__")
|
||||
def test_delete_route_not_exist(self, mock_sysinvclient_init):
|
||||
fake_route = utils.create_route_dict(base.ROUTE_0)
|
||||
mock_sysinvclient_init.return_value = None
|
||||
sysinv_client = sysinv_v1.SysinvClient(dccommon_consts.DEFAULT_REGION_NAME,
|
||||
None)
|
||||
sysinv_client = sysinv_v1.SysinvClient(
|
||||
dccommon_consts.DEFAULT_REGION_NAME, None
|
||||
)
|
||||
sysinv_client.sysinv_client = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.route.delete = mock.MagicMock()
|
||||
sysinv_client.sysinv_client.route.list_by_interface = mock.MagicMock()
|
||||
existing_route_1 = FakeRoute(utils.create_route_dict(base.ROUTE_1))
|
||||
sysinv_client.sysinv_client.route.list_by_interface.return_value = [
|
||||
existing_route_1]
|
||||
sysinv_client.delete_route(fake_route['uuid'],
|
||||
fake_route['network'],
|
||||
fake_route['prefix'],
|
||||
fake_route['gateway'],
|
||||
fake_route['metric'])
|
||||
existing_route_1
|
||||
]
|
||||
sysinv_client.delete_route(
|
||||
fake_route["uuid"],
|
||||
fake_route["network"],
|
||||
fake_route["prefix"],
|
||||
fake_route["gateway"],
|
||||
fake_route["metric"],
|
||||
)
|
||||
sysinv_client.sysinv_client.route.delete.assert_not_called()
|
||||
|
@ -36,14 +36,20 @@ CENTRAL_REGION = "RegionOne"
|
||||
SUBCLOUD1_REGION = "subcloud1"
|
||||
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP = {
|
||||
CENTRAL_REGION: {"sysinv": FAKE_REGIONONE_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_REGIONONE_KEYSTONE_ENDPOINT},
|
||||
SUBCLOUD1_REGION: {"sysinv": FAKE_SUBCLOUD1_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_SUBCLOUD1_KEYSTONE_ENDPOINT}
|
||||
CENTRAL_REGION: {
|
||||
"sysinv": FAKE_REGIONONE_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_REGIONONE_KEYSTONE_ENDPOINT,
|
||||
},
|
||||
SUBCLOUD1_REGION: {
|
||||
"sysinv": FAKE_SUBCLOUD1_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_SUBCLOUD1_KEYSTONE_ENDPOINT,
|
||||
},
|
||||
}
|
||||
|
||||
FAKE_SERVICE_ENDPOINT_MAP = {"sysinv": FAKE_REGIONONE_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_REGIONONE_KEYSTONE_ENDPOINT}
|
||||
FAKE_SERVICE_ENDPOINT_MAP = {
|
||||
"sysinv": FAKE_REGIONONE_SYSINV_ENDPOINT,
|
||||
"keystone": FAKE_REGIONONE_KEYSTONE_ENDPOINT,
|
||||
}
|
||||
|
||||
|
||||
class FakeKeystoneClient(object):
|
||||
@ -60,32 +66,29 @@ class FakeService(object):
|
||||
self.enabled = enabled
|
||||
|
||||
|
||||
FAKE_SERVICES_LIST = [FakeService(1, "keystone", "identity", True),
|
||||
FakeService(2, "sysinv", "platform", True),
|
||||
FakeService(3, "patching", "patching", True),
|
||||
FakeService(4, "barbican", "key-manager", True),
|
||||
FakeService(5, "vim", "nfv", True),
|
||||
FakeService(6, "dcmanager", "dcmanager", True),
|
||||
FakeService(7, "dcorch", "dcorch", True)]
|
||||
FAKE_SERVICES_LIST = [
|
||||
FakeService(1, "keystone", "identity", True),
|
||||
FakeService(2, "sysinv", "platform", True),
|
||||
FakeService(3, "patching", "patching", True),
|
||||
FakeService(4, "barbican", "key-manager", True),
|
||||
FakeService(5, "vim", "nfv", True),
|
||||
FakeService(6, "dcmanager", "dcmanager", True),
|
||||
FakeService(7, "dcorch", "dcorch", True),
|
||||
]
|
||||
|
||||
|
||||
class EndpointCacheTest(base.DCCommonTestCase):
|
||||
def setUp(self):
|
||||
super(EndpointCacheTest, self).setUp()
|
||||
auth_uri_opts = [
|
||||
cfg.StrOpt('auth_uri',
|
||||
default="fake_auth_uri"),
|
||||
cfg.StrOpt('username',
|
||||
default="fake_user"),
|
||||
cfg.StrOpt('password',
|
||||
default="fake_password"),
|
||||
cfg.StrOpt('project_name',
|
||||
default="fake_project_name"),
|
||||
cfg.StrOpt('user_domain_name',
|
||||
default="fake_user_domain_name"),
|
||||
cfg.StrOpt('project_domain_name',
|
||||
default="fake_project_domain_name")]
|
||||
cfg.CONF.register_opts(auth_uri_opts, 'endpoint_cache')
|
||||
cfg.StrOpt("auth_uri", default="fake_auth_uri"),
|
||||
cfg.StrOpt("username", default="fake_user"),
|
||||
cfg.StrOpt("password", default="fake_password"),
|
||||
cfg.StrOpt("project_name", default="fake_project_name"),
|
||||
cfg.StrOpt("user_domain_name", default="fake_user_domain_name"),
|
||||
cfg.StrOpt("project_domain_name", default="fake_project_domain_name"),
|
||||
]
|
||||
cfg.CONF.register_opts(auth_uri_opts, "endpoint_cache")
|
||||
|
||||
# Mock the token validator (which is confusing so here is the info)
|
||||
# endpoint_cache.py has an import:
|
||||
@ -94,13 +97,13 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
# patch.object(endpoint_cache, 'is_token_expiring_soon')
|
||||
# instead of:
|
||||
# patch.object(dccommon.utils, 'is_token_expiring_soon')
|
||||
p = mock.patch.object(endpoint_cache, 'is_token_expiring_soon')
|
||||
p = mock.patch.object(endpoint_cache, "is_token_expiring_soon")
|
||||
self.mock_is_token_expiring_soon = p.start()
|
||||
self.mock_is_token_expiring_soon.return_value = True
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
# Mock the get_admin_session method
|
||||
p = mock.patch.object(endpoint_cache.EndpointCache, 'get_admin_session')
|
||||
p = mock.patch.object(endpoint_cache.EndpointCache, "get_admin_session")
|
||||
self.mock_get_admin_session = p.start()
|
||||
self.addCleanup(p.stop)
|
||||
|
||||
@ -111,22 +114,27 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
endpoint_cache.EndpointCache.master_keystone_client = None
|
||||
endpoint_cache.EndpointCache.master_token = {}
|
||||
endpoint_cache.EndpointCache.master_services_list = None
|
||||
endpoint_cache.EndpointCache.master_service_endpoint_map = \
|
||||
endpoint_cache.EndpointCache.master_service_endpoint_map = (
|
||||
collections.defaultdict(dict)
|
||||
)
|
||||
|
||||
@mock.patch.object(
|
||||
endpoint_cache.EndpointCache,
|
||||
'get_cached_master_keystone_client_and_region_endpoint_map')
|
||||
"get_cached_master_keystone_client_and_region_endpoint_map",
|
||||
)
|
||||
def test_get_endpoint(self, mock_get_cached_data):
|
||||
mock_get_cached_data.return_value = (
|
||||
FakeKeystoneClient(), FAKE_SERVICE_ENDPOINT_MAP)
|
||||
FakeKeystoneClient(),
|
||||
FAKE_SERVICE_ENDPOINT_MAP,
|
||||
)
|
||||
cache = endpoint_cache.EndpointCache("RegionOne", None)
|
||||
endpoint = cache.get_endpoint("sysinv")
|
||||
self.assertEqual(endpoint, FAKE_REGIONONE_SYSINV_ENDPOINT)
|
||||
|
||||
@mock.patch.object(tokens.TokenManager, 'validate')
|
||||
@mock.patch.object(endpoint_cache.EndpointCache,
|
||||
'_generate_master_service_endpoint_map')
|
||||
@mock.patch.object(tokens.TokenManager, "validate")
|
||||
@mock.patch.object(
|
||||
endpoint_cache.EndpointCache, "_generate_master_service_endpoint_map"
|
||||
)
|
||||
def test_get_all_regions(self, mock_generate_cached_data, mock_tokens_validate):
|
||||
mock_generate_cached_data.return_value = FAKE_MASTER_SERVICE_ENDPOINT_MAP
|
||||
cache = endpoint_cache.EndpointCache("RegionOne", None)
|
||||
@ -134,39 +142,44 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
self.assertIn(CENTRAL_REGION, region_list)
|
||||
self.assertIn(SUBCLOUD1_REGION, region_list)
|
||||
|
||||
@mock.patch.object(tokens.TokenManager, 'validate')
|
||||
@mock.patch.object(services.ServiceManager, 'list')
|
||||
@mock.patch.object(endpoint_cache.EndpointCache,
|
||||
'_generate_master_service_endpoint_map')
|
||||
def test_get_services_list(self, mock_generate_cached_data, mock_services_list,
|
||||
mock_tokens_validate):
|
||||
@mock.patch.object(tokens.TokenManager, "validate")
|
||||
@mock.patch.object(services.ServiceManager, "list")
|
||||
@mock.patch.object(
|
||||
endpoint_cache.EndpointCache, "_generate_master_service_endpoint_map"
|
||||
)
|
||||
def test_get_services_list(
|
||||
self, mock_generate_cached_data, mock_services_list, mock_tokens_validate
|
||||
):
|
||||
mock_services_list.return_value = FAKE_SERVICES_LIST
|
||||
mock_generate_cached_data.return_value = FAKE_MASTER_SERVICE_ENDPOINT_MAP
|
||||
endpoint_cache.EndpointCache("RegionOne", None)
|
||||
services_list = endpoint_cache.EndpointCache.get_master_services_list()
|
||||
self.assertEqual(FAKE_SERVICES_LIST, services_list)
|
||||
|
||||
@mock.patch.object(tokens.TokenManager, 'validate')
|
||||
@mock.patch.object(endpoint_cache.EndpointCache,
|
||||
'_generate_master_service_endpoint_map')
|
||||
@mock.patch.object(tokens.TokenManager, "validate")
|
||||
@mock.patch.object(
|
||||
endpoint_cache.EndpointCache, "_generate_master_service_endpoint_map"
|
||||
)
|
||||
def test_update_master_service_endpoint_region(
|
||||
self, mock_generate_cached_data, mock_tokens_validate):
|
||||
mock_generate_cached_data.return_value = (
|
||||
copy.deepcopy(FAKE_MASTER_SERVICE_ENDPOINT_MAP))
|
||||
self, mock_generate_cached_data, mock_tokens_validate
|
||||
):
|
||||
mock_generate_cached_data.return_value = copy.deepcopy(
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP
|
||||
)
|
||||
region_name = SUBCLOUD1_REGION
|
||||
new_endpoints = {
|
||||
'sysinv': 'https://[fake_ip]:6386/v1',
|
||||
'keystone': 'https://[fake_ip]:5001/v3'
|
||||
"sysinv": "https://[fake_ip]:6386/v1",
|
||||
"keystone": "https://[fake_ip]:5001/v3",
|
||||
}
|
||||
cache = endpoint_cache.EndpointCache("RegionOne", None)
|
||||
self.assertEqual(
|
||||
endpoint_cache.EndpointCache.master_service_endpoint_map,
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP,
|
||||
)
|
||||
cache.update_master_service_endpoint_region(region_name, new_endpoints)
|
||||
self.assertNotEqual(
|
||||
endpoint_cache.EndpointCache.master_service_endpoint_map,
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP
|
||||
FAKE_MASTER_SERVICE_ENDPOINT_MAP,
|
||||
)
|
||||
|
||||
def _get_expected_endpoints(self, ip: str) -> dict:
|
||||
@ -211,8 +224,7 @@ class EndpointCacheTest(base.DCCommonTestCase):
|
||||
|
||||
for subcloud_mgmt_ips in subcloud_mgmt_ips_dict:
|
||||
expected_result = {
|
||||
k: self._get_expected_endpoints(v)
|
||||
for k, v in subcloud_mgmt_ips.items()
|
||||
k: self._get_expected_endpoints(v) for k, v in subcloud_mgmt_ips.items()
|
||||
}
|
||||
self.assertEqual(
|
||||
expected_result,
|
||||
|
@ -8,8 +8,8 @@ from dccommon.exceptions import PlaybookExecutionTimeout
|
||||
from dccommon.tests import base
|
||||
from dccommon import utils
|
||||
|
||||
FAKE_SUBCLOUD_NAME = 'subcloud1'
|
||||
FAKE_LOG_FILE = '/dev/null'
|
||||
FAKE_SUBCLOUD_NAME = "subcloud1"
|
||||
FAKE_LOG_FILE = "/dev/null"
|
||||
|
||||
|
||||
class TestUtils(base.DCCommonTestCase):
|
||||
@ -22,25 +22,31 @@ class TestUtils(base.DCCommonTestCase):
|
||||
|
||||
def test_exec_playbook(self):
|
||||
# no timeout:
|
||||
testscript = ['dccommon/tests/unit/test_utils_script.sh', '1']
|
||||
testscript = ["dccommon/tests/unit/test_utils_script.sh", "1"]
|
||||
ansible = utils.AnsiblePlaybook(FAKE_SUBCLOUD_NAME)
|
||||
ansible.run_playbook(FAKE_LOG_FILE, testscript)
|
||||
|
||||
def test_exec_playbook_timeout(self):
|
||||
testscript = ['dccommon/tests/unit/test_utils_script.sh', '30']
|
||||
testscript = ["dccommon/tests/unit/test_utils_script.sh", "30"]
|
||||
ansible = utils.AnsiblePlaybook(FAKE_SUBCLOUD_NAME)
|
||||
self.assertRaises(
|
||||
PlaybookExecutionTimeout, ansible.run_playbook, FAKE_LOG_FILE,
|
||||
testscript, timeout=2
|
||||
PlaybookExecutionTimeout,
|
||||
ansible.run_playbook,
|
||||
FAKE_LOG_FILE,
|
||||
testscript,
|
||||
timeout=2,
|
||||
)
|
||||
|
||||
def test_exec_playbook_timeout_requires_kill(self):
|
||||
# This option ignores a regular TERM signal, and requires a
|
||||
# kill -9 (KILL signal) to terminate. We're using this to simulate
|
||||
# a hung process
|
||||
script = ['dccommon/tests/unit/test_utils_script.sh', '30', 'TERM']
|
||||
script = ["dccommon/tests/unit/test_utils_script.sh", "30", "TERM"]
|
||||
ansible = utils.AnsiblePlaybook(FAKE_SUBCLOUD_NAME)
|
||||
self.assertRaises(
|
||||
PlaybookExecutionTimeout, ansible.run_playbook, FAKE_LOG_FILE, script,
|
||||
timeout=2
|
||||
PlaybookExecutionTimeout,
|
||||
ansible.run_playbook,
|
||||
FAKE_LOG_FILE,
|
||||
script,
|
||||
timeout=2,
|
||||
)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright (c) 2015 Ericsson AB
|
||||
# Copyright (c) 2020-2021 Wind River Systems, Inc.
|
||||
# Copyright (c) 2020-2021, 2024 Wind River Systems, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -19,36 +19,41 @@ from oslo_context import context
|
||||
|
||||
|
||||
def create_route_dict(data_list):
|
||||
return {'created-at': data_list[0],
|
||||
'updated-at': data_list[1],
|
||||
'deleted-at': data_list[2],
|
||||
'id': data_list[3],
|
||||
'uuid': data_list[4],
|
||||
'family': data_list[5],
|
||||
'network': data_list[6],
|
||||
'prefix': data_list[7],
|
||||
'gateway': data_list[8],
|
||||
'metric': data_list[9],
|
||||
'interface-id': data_list[10]}
|
||||
return {
|
||||
"created-at": data_list[0],
|
||||
"updated-at": data_list[1],
|
||||
"deleted-at": data_list[2],
|
||||
"id": data_list[3],
|
||||
"uuid": data_list[4],
|
||||
"family": data_list[5],
|
||||
"network": data_list[6],
|
||||
"prefix": data_list[7],
|
||||
"gateway": data_list[8],
|
||||
"metric": data_list[9],
|
||||
"interface-id": data_list[10],
|
||||
}
|
||||
|
||||
|
||||
def create_endpoint_dict(data_list):
|
||||
return {'id': data_list[0],
|
||||
'legacy_endpoint_id': data_list[1],
|
||||
'interface': data_list[2],
|
||||
'service_id': data_list[3],
|
||||
'url': data_list[4],
|
||||
'extra': data_list[5],
|
||||
'enabled': data_list[6],
|
||||
'region_id': data_list[7]}
|
||||
return {
|
||||
"id": data_list[0],
|
||||
"legacy_endpoint_id": data_list[1],
|
||||
"interface": data_list[2],
|
||||
"service_id": data_list[3],
|
||||
"url": data_list[4],
|
||||
"extra": data_list[5],
|
||||
"enabled": data_list[6],
|
||||
"region_id": data_list[7],
|
||||
}
|
||||
|
||||
|
||||
def dummy_context(user='test_username', tenant='test_project_id',
|
||||
region_name=None):
|
||||
return context.RequestContext.from_dict({
|
||||
'auth_token': 'abcd1234',
|
||||
'user': user,
|
||||
'project': tenant,
|
||||
'is_admin': True,
|
||||
'region_name': region_name
|
||||
})
|
||||
def dummy_context(user="test_username", tenant="test_project_id", region_name=None):
|
||||
return context.RequestContext.from_dict(
|
||||
{
|
||||
"auth_token": "abcd1234",
|
||||
"user": user,
|
||||
"project": tenant,
|
||||
"is_admin": True,
|
||||
"region_name": region_name,
|
||||
}
|
||||
)
|
||||
|
@ -36,8 +36,8 @@ from dccommon.subprocess_cleanup import SubprocessCleanup
|
||||
from dcorch.common.i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
ANSIBLE_PASSWD_PARMS = ['ansible_ssh_pass', 'ansible_become_pass']
|
||||
SCRIPT_PASSWD_PARMS = ['sysadmin_password', 'password']
|
||||
ANSIBLE_PASSWD_PARMS = ["ansible_ssh_pass", "ansible_become_pass"]
|
||||
SCRIPT_PASSWD_PARMS = ["sysadmin_password", "password"]
|
||||
|
||||
# Gap, in seconds, to determine whether the given token is about to expire
|
||||
# These values are used to randomize the token early renewal duration and
|
||||
@ -82,11 +82,11 @@ class memoized(object):
|
||||
return value
|
||||
|
||||
def __repr__(self):
|
||||
'''Return the function's docstring.'''
|
||||
"""Return the function's docstring."""
|
||||
return self.func.__doc__
|
||||
|
||||
def __get__(self, obj, objtype):
|
||||
'''Support instance methods.'''
|
||||
"""Support instance methods."""
|
||||
return functools.partial(self.__call__, obj)
|
||||
|
||||
|
||||
@ -108,6 +108,7 @@ class AnsiblePlaybook(object):
|
||||
is waiting, the playbook_failed flag will indicate to the
|
||||
original process to raise PlaybookExecutionFailed.
|
||||
"""
|
||||
|
||||
abort_status = {}
|
||||
lock = threading.Lock()
|
||||
|
||||
@ -126,12 +127,12 @@ class AnsiblePlaybook(object):
|
||||
param timeout: Timeout in seconds.
|
||||
"""
|
||||
with AnsiblePlaybook.lock:
|
||||
AnsiblePlaybook.abort_status[self.subcloud_name]['abort'] = True
|
||||
AnsiblePlaybook.abort_status[self.subcloud_name]["abort"] = True
|
||||
unabortable_flag = os.path.join(
|
||||
consts.ANSIBLE_OVERRIDES_PATH,
|
||||
'.%s_deploy_not_abortable' % self.subcloud_name
|
||||
".%s_deploy_not_abortable" % self.subcloud_name,
|
||||
)
|
||||
subp = AnsiblePlaybook.abort_status[self.subcloud_name]['subp']
|
||||
subp = AnsiblePlaybook.abort_status[self.subcloud_name]["subp"]
|
||||
while os.path.exists(unabortable_flag) and timeout > 0:
|
||||
# If subprocess ended (subp.poll is not None), no further abort
|
||||
# action is necessary
|
||||
@ -141,8 +142,9 @@ class AnsiblePlaybook(object):
|
||||
timeout -= 1
|
||||
return kill_subprocess_group(subp)
|
||||
|
||||
def run_playbook(self, log_file, playbook_command, timeout=None,
|
||||
register_cleanup=True):
|
||||
def run_playbook(
|
||||
self, log_file, playbook_command, timeout=None, register_cleanup=True
|
||||
):
|
||||
"""Run ansible playbook via subprocess.
|
||||
|
||||
:param log_file: Logs output to file
|
||||
@ -159,16 +161,16 @@ class AnsiblePlaybook(object):
|
||||
if timeout:
|
||||
timeout_log_str = " (timeout: %ss)" % timeout
|
||||
else:
|
||||
timeout_log_str = ''
|
||||
timeout_log_str = ""
|
||||
|
||||
with open(log_file, "a+") as f_out_log:
|
||||
try:
|
||||
logged_playbook_command = \
|
||||
_strip_password_from_command(playbook_command)
|
||||
txt = "%s Executing playbook command%s: %s\n" \
|
||||
% (datetime.today().strftime('%Y-%m-%d-%H:%M:%S'),
|
||||
timeout_log_str,
|
||||
logged_playbook_command)
|
||||
logged_playbook_command = _strip_password_from_command(playbook_command)
|
||||
txt = "%s Executing playbook command%s: %s\n" % (
|
||||
datetime.today().strftime("%Y-%m-%d-%H:%M:%S"),
|
||||
timeout_log_str,
|
||||
logged_playbook_command,
|
||||
)
|
||||
f_out_log.write(txt)
|
||||
f_out_log.flush()
|
||||
|
||||
@ -176,23 +178,26 @@ class AnsiblePlaybook(object):
|
||||
# if present from previous executions
|
||||
unabortable_flag = os.path.join(
|
||||
consts.ANSIBLE_OVERRIDES_PATH,
|
||||
'.%s_deploy_not_abortable' % self.subcloud_name
|
||||
".%s_deploy_not_abortable" % self.subcloud_name,
|
||||
)
|
||||
if os.path.exists(unabortable_flag):
|
||||
os.remove(unabortable_flag)
|
||||
|
||||
subp = subprocess.Popen(playbook_command,
|
||||
stdout=f_out_log,
|
||||
stderr=f_out_log,
|
||||
env=exec_env,
|
||||
start_new_session=register_cleanup)
|
||||
subp = subprocess.Popen(
|
||||
playbook_command,
|
||||
stdout=f_out_log,
|
||||
stderr=f_out_log,
|
||||
env=exec_env,
|
||||
start_new_session=register_cleanup,
|
||||
)
|
||||
try:
|
||||
if register_cleanup:
|
||||
SubprocessCleanup.register_subprocess_group(subp)
|
||||
with AnsiblePlaybook.lock:
|
||||
AnsiblePlaybook.abort_status[self.subcloud_name] = {
|
||||
'abort': False,
|
||||
'subp': subp}
|
||||
"abort": False,
|
||||
"subp": subp,
|
||||
}
|
||||
|
||||
subp.wait(timeout)
|
||||
subp_rc = subp.poll()
|
||||
@ -214,11 +219,13 @@ class AnsiblePlaybook(object):
|
||||
# - playbook_failure is True with subp_rc != 0,
|
||||
# aborted is True, unabortable_flag_exists is False
|
||||
with AnsiblePlaybook.lock:
|
||||
aborted = \
|
||||
AnsiblePlaybook.abort_status[self.subcloud_name]['abort']
|
||||
aborted = AnsiblePlaybook.abort_status[self.subcloud_name][
|
||||
"abort"
|
||||
]
|
||||
unabortable_flag_exists = os.path.exists(unabortable_flag)
|
||||
playbook_failure = (subp_rc != 0 and
|
||||
(not aborted or unabortable_flag_exists))
|
||||
playbook_failure = subp_rc != 0 and (
|
||||
not aborted or unabortable_flag_exists
|
||||
)
|
||||
|
||||
# Raise PlaybookExecutionFailed if the playbook fails when
|
||||
# on normal conditions (no abort issued) or fails while
|
||||
@ -229,11 +236,12 @@ class AnsiblePlaybook(object):
|
||||
except subprocess.TimeoutExpired:
|
||||
kill_subprocess_group(subp)
|
||||
f_out_log.write(
|
||||
"%s TIMEOUT (%ss) - playbook is terminated\n" %
|
||||
(datetime.today().strftime('%Y-%m-%d-%H:%M:%S'), timeout)
|
||||
"%s TIMEOUT (%ss) - playbook is terminated\n"
|
||||
% (datetime.today().strftime("%Y-%m-%d-%H:%M:%S"), timeout)
|
||||
)
|
||||
raise PlaybookExecutionTimeout(
|
||||
playbook_cmd=playbook_command, timeout=timeout
|
||||
)
|
||||
raise PlaybookExecutionTimeout(playbook_cmd=playbook_command,
|
||||
timeout=timeout)
|
||||
finally:
|
||||
f_out_log.flush()
|
||||
if register_cleanup:
|
||||
@ -256,26 +264,27 @@ def _strip_password_from_command(script_command):
|
||||
logged_command.append(item)
|
||||
else:
|
||||
tmpl = item.split()
|
||||
tmpstr = ''
|
||||
tmpstr = ""
|
||||
for tmp in tmpl:
|
||||
if any(parm in tmp for parm in SCRIPT_PASSWD_PARMS):
|
||||
tmpstr = tmpstr + tmp[:tmp.index('=') + 1] + ' '
|
||||
tmpstr = tmpstr + tmp[: tmp.index("=") + 1] + " "
|
||||
else:
|
||||
tmpstr = tmpstr + tmp + ' '
|
||||
tmpstr = tmpstr + tmp + " "
|
||||
tmpstr = tmpstr[:-1]
|
||||
logged_command.append(tmpstr)
|
||||
return logged_command
|
||||
|
||||
|
||||
def is_token_expiring_soon(token,
|
||||
stale_token_duration_min=STALE_TOKEN_DURATION_MIN,
|
||||
stale_token_duration_max=STALE_TOKEN_DURATION_MAX,
|
||||
stale_token_duration_step=STALE_TOKEN_DURATION_STEP):
|
||||
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(
|
||||
token['expires_at']))
|
||||
duration = random.randrange(stale_token_duration_min,
|
||||
stale_token_duration_max,
|
||||
stale_token_duration_step)
|
||||
def is_token_expiring_soon(
|
||||
token,
|
||||
stale_token_duration_min=STALE_TOKEN_DURATION_MIN,
|
||||
stale_token_duration_max=STALE_TOKEN_DURATION_MAX,
|
||||
stale_token_duration_step=STALE_TOKEN_DURATION_STEP,
|
||||
):
|
||||
expiry_time = timeutils.normalize_time(timeutils.parse_isotime(token["expires_at"]))
|
||||
duration = random.randrange(
|
||||
stale_token_duration_min, stale_token_duration_max, stale_token_duration_step
|
||||
)
|
||||
if timeutils.is_soon(expiry_time, duration):
|
||||
return True
|
||||
return False
|
||||
@ -289,12 +298,12 @@ def _get_key_from_file(file_contents, key):
|
||||
:param key: key to search
|
||||
:return: found value or ''
|
||||
"""
|
||||
r = re.compile('^{}\=[\'\"]*([^\'\"\n]*)'.format(key), re.MULTILINE)
|
||||
r = re.compile("^{}\=['\"]*([^'\"\n]*)".format(key), re.MULTILINE)
|
||||
match = r.search(file_contents)
|
||||
if match:
|
||||
return match.group(1)
|
||||
else:
|
||||
return ''
|
||||
return ""
|
||||
|
||||
|
||||
@memoized
|
||||
@ -305,21 +314,24 @@ def get_os_release(release_file=consts.OS_RELEASE_FILE):
|
||||
:param release_file: file to read from
|
||||
:return: a tuple of (ID, VERSION)
|
||||
"""
|
||||
linux_distro = ('', '')
|
||||
linux_distro = ("", "")
|
||||
|
||||
try:
|
||||
with open(release_file, 'r') as f:
|
||||
with open(release_file, "r") as f:
|
||||
data = f.read()
|
||||
linux_distro = (
|
||||
_get_key_from_file(data, 'ID'),
|
||||
_get_key_from_file(data, 'VERSION'))
|
||||
_get_key_from_file(data, "ID"),
|
||||
_get_key_from_file(data, "VERSION"),
|
||||
)
|
||||
except Exception as e:
|
||||
raise exceptions.DCCommonException(
|
||||
msg=_("Failed to open %s : %s" % (release_file, str(e))))
|
||||
msg=_("Failed to open %s : %s" % (release_file, str(e)))
|
||||
)
|
||||
|
||||
if linux_distro[0] == '':
|
||||
if linux_distro[0] == "":
|
||||
raise exceptions.DCCommonException(
|
||||
msg=_("Could not determine os type from %s" % release_file))
|
||||
msg=_("Could not determine os type from %s" % release_file)
|
||||
)
|
||||
|
||||
# Hint: This code is added here to aid future unit test.
|
||||
# Probably running unit tests on a non-supported OS (example at
|
||||
@ -329,7 +341,8 @@ def get_os_release(release_file=consts.OS_RELEASE_FILE):
|
||||
# (get_os_release) for each supported OS.
|
||||
if linux_distro[0] not in consts.SUPPORTED_OS_TYPES:
|
||||
raise exceptions.DCCommonException(
|
||||
msg=_("Unsupported OS detected %s" % linux_distro[0]))
|
||||
msg=_("Unsupported OS detected %s" % linux_distro[0])
|
||||
)
|
||||
|
||||
return linux_distro
|
||||
|
||||
@ -365,7 +378,8 @@ def is_centos(software_version=None):
|
||||
def get_ssl_cert_ca_file():
|
||||
return os.path.join(
|
||||
consts.SSL_CERT_CA_DIR,
|
||||
consts.CERT_CA_FILE_DEBIAN if is_debian() else consts.CERT_CA_FILE_CENTOS)
|
||||
consts.CERT_CA_FILE_DEBIAN if is_debian() else consts.CERT_CA_FILE_CENTOS,
|
||||
)
|
||||
|
||||
|
||||
def send_subcloud_shutdown_signal(subcloud_name):
|
||||
@ -376,9 +390,9 @@ def send_subcloud_shutdown_signal(subcloud_name):
|
||||
"""
|
||||
# All logs are expected to originate from the rvmc module,
|
||||
# so the log churn from the 'redfish.rest.v1' module is disabled.
|
||||
logging.getLogger('redfish.rest.v1').setLevel(logging.CRITICAL)
|
||||
logging.getLogger("redfish.rest.v1").setLevel(logging.CRITICAL)
|
||||
|
||||
rvmc_config_file = os.path.join(consts.ANSIBLE_OVERRIDES_PATH,
|
||||
subcloud_name,
|
||||
consts.RVMC_CONFIG_FILE_NAME)
|
||||
rvmc_config_file = os.path.join(
|
||||
consts.ANSIBLE_OVERRIDES_PATH, subcloud_name, consts.RVMC_CONFIG_FILE_NAME
|
||||
)
|
||||
rvmc.power_off(subcloud_name, rvmc_config_file, LOG)
|
||||
|
@ -23,7 +23,7 @@ modules = [
|
||||
]
|
||||
|
||||
# List of modules that are already formatted with black
|
||||
formatted_modules = []
|
||||
formatted_modules = ["dccommon"]
|
||||
|
||||
|
||||
# Function to run black check
|
||||
|
@ -128,13 +128,13 @@ from dccommon import rvmc
|
||||
|
||||
# Constants
|
||||
# ---------
|
||||
FEATURE_NAME = 'Redfish Virtual Media Controller'
|
||||
FEATURE_NAME = "Redfish Virtual Media Controller"
|
||||
VERSION_MAJOR = 3
|
||||
VERSION_MINOR = 1
|
||||
|
||||
# The path for RVMC PID file
|
||||
RVMC_PID_FILE_PATH = '/var/run/rvmc/'
|
||||
RVMC_PID_FILENAME_POSTFIX = '_rvmc.pid'
|
||||
RVMC_PID_FILE_PATH = "/var/run/rvmc/"
|
||||
RVMC_PID_FILENAME_POSTFIX = "_rvmc.pid"
|
||||
|
||||
# The signals to be caught for abnormal termination
|
||||
EXIT_SIGNALS = [signal.SIGTERM, signal.SIGABRT, signal.SIGINT]
|
||||
@ -154,14 +154,21 @@ def parse_arguments():
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description=FEATURE_NAME)
|
||||
|
||||
parser.add_argument("--debug", type=int, required=False, default=0,
|
||||
help="Optional debug level ; 0..4")
|
||||
parser.add_argument(
|
||||
"--debug",
|
||||
type=int,
|
||||
required=False,
|
||||
default=0,
|
||||
help="Optional debug level ; 0..4",
|
||||
)
|
||||
|
||||
parser.add_argument("--subcloud_name", type=str, required=False,
|
||||
help="Subcloud name")
|
||||
parser.add_argument(
|
||||
"--subcloud_name", type=str, required=False, help="Subcloud name"
|
||||
)
|
||||
|
||||
parser.add_argument("--config_file", type=str, required=True,
|
||||
help="RVMC config file")
|
||||
parser.add_argument(
|
||||
"--config_file", type=str, required=True, help="RVMC config file"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
@ -181,7 +188,7 @@ def prepare_execution(rvmc_pid_file):
|
||||
# Check if the PID file exists.
|
||||
# Usually, it exists only when the parent process was manually killed.
|
||||
if os.path.exists(rvmc_pid_file):
|
||||
with open(rvmc_pid_file, 'r') as pid_file:
|
||||
with open(rvmc_pid_file, "r") as pid_file:
|
||||
pid = pid_file.read()
|
||||
# Attempt to kill the previous RVMC process using SIGTERM (15)
|
||||
if pid:
|
||||
@ -189,14 +196,13 @@ def prepare_execution(rvmc_pid_file):
|
||||
os.kill(int(pid), 15)
|
||||
except ProcessLookupError:
|
||||
# Ignore the error if the process with this PID doesn't exit
|
||||
logging_util.ilog(
|
||||
"Process %s not found or already terminated." % pid)
|
||||
logging_util.ilog("Process %s not found or already terminated." % pid)
|
||||
except Exception:
|
||||
logging_util.elog(
|
||||
"Failed to terminate the previous process %s," % pid)
|
||||
logging_util.elog("Failed to terminate the previous process %s," % pid)
|
||||
logging_util.alog(
|
||||
"Please terminate the previous process %s "
|
||||
"before running the RVMC script again." % pid)
|
||||
"before running the RVMC script again." % pid
|
||||
)
|
||||
exit_handler.exit(2)
|
||||
# Give some time between reading and writing to the same PID file
|
||||
time.sleep(3)
|
||||
@ -205,9 +211,10 @@ def prepare_execution(rvmc_pid_file):
|
||||
current_pid = os.getpid()
|
||||
|
||||
# Write the PID to the file
|
||||
logging_util.dlog1("Save process ID %d to the file %s." %
|
||||
(current_pid, rvmc_pid_file))
|
||||
with open(rvmc_pid_file, 'w') as pid_file:
|
||||
logging_util.dlog1(
|
||||
"Save process ID %d to the file %s." % (current_pid, rvmc_pid_file)
|
||||
)
|
||||
with open(rvmc_pid_file, "w") as pid_file:
|
||||
pid_file.write(str(current_pid))
|
||||
|
||||
|
||||
@ -222,6 +229,7 @@ class ExitHandler(rvmc.ExitHandler):
|
||||
|
||||
Provides methods to manage the process exit in various situations.
|
||||
"""
|
||||
|
||||
def __init__(self, rvmc_pid_file):
|
||||
"""Handler object constructor.
|
||||
|
||||
@ -266,27 +274,31 @@ if __name__ == "__main__":
|
||||
|
||||
# RVMC PID file
|
||||
rvmc_pid_file = os.path.join(
|
||||
RVMC_PID_FILE_PATH, subcloud_name + RVMC_PID_FILENAME_POSTFIX)
|
||||
RVMC_PID_FILE_PATH, subcloud_name + RVMC_PID_FILENAME_POSTFIX
|
||||
)
|
||||
|
||||
# Set logging utility and exit handler
|
||||
logging_util = rvmc.LoggingUtil(debug_level=debug)
|
||||
exit_handler = ExitHandler(rvmc_pid_file)
|
||||
|
||||
logging_util.ilog("%s version %d.%d\n" %
|
||||
(FEATURE_NAME, VERSION_MAJOR, VERSION_MINOR))
|
||||
logging_util.ilog(
|
||||
"%s version %d.%d\n" % (FEATURE_NAME, VERSION_MAJOR, VERSION_MINOR)
|
||||
)
|
||||
|
||||
# Register the signal handler
|
||||
for sig in EXIT_SIGNALS:
|
||||
signal.signal(sig, signal_handler)
|
||||
|
||||
config, target_object = rvmc.parse_config_file(
|
||||
subcloud_name, config_file, logging_util, exit_handler)
|
||||
subcloud_name, config_file, logging_util, exit_handler
|
||||
)
|
||||
|
||||
if target_object:
|
||||
prepare_execution(rvmc_pid_file)
|
||||
# TODO(lzhu1): support --timeout <value> option
|
||||
script_timeout = eventlet.timeout.Timeout(
|
||||
int(os.environ.get('RVMC_SCRIPT_TIMEOUT', 1800)))
|
||||
int(os.environ.get("RVMC_SCRIPT_TIMEOUT", 1800))
|
||||
)
|
||||
try:
|
||||
# Load the Iso for the target
|
||||
logging_util.ilog("BMC Target : %s" % target_object.target)
|
||||
@ -294,8 +306,7 @@ if __name__ == "__main__":
|
||||
logging_util.ilog("Host Image : %s" % target_object.img)
|
||||
|
||||
excluded_operations = []
|
||||
if (os.path.basename(target_object.img) ==
|
||||
consts.ENROLL_INIT_SEED_ISO_NAME):
|
||||
if os.path.basename(target_object.img) == consts.ENROLL_INIT_SEED_ISO_NAME:
|
||||
# If the host image is a seed ISO,
|
||||
# the boot order should not be changed.
|
||||
excluded_operations = ["set_boot_override"]
|
||||
|
@ -30,15 +30,14 @@ setenv =
|
||||
OSLO_LOCK_PATH={toxinidir}
|
||||
deps =
|
||||
-c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/starlingx/root/raw/branch/master/build-tools/requirements/debian/upper-constraints.txt}
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
-e{[dc]cgcs_patch_src_dir}
|
||||
-e{[dc]cgtsclient_src_dir}
|
||||
-e{[dc]fmclient_src_dir}
|
||||
-e{[dc]fm_api_src_dir}
|
||||
-e{[dc]nfv_client_src_dir}
|
||||
-e{[dc]tsconfig_src_dir}
|
||||
|
||||
allowlist_externals =
|
||||
rm
|
||||
find
|
||||
@ -48,7 +47,6 @@ commands =
|
||||
find {toxinidir} -not -path '{toxinidir}/.tox/*' -name '*.py[c|o]' -delete
|
||||
python setup.py testr --slowest --testr-args='{posargs}'
|
||||
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8
|
||||
|
||||
@ -70,11 +68,12 @@ commands = oslo_debug_helper {posargs}
|
||||
show-source = True
|
||||
max-line-length = 88
|
||||
# Suppressed flake8 codes
|
||||
# W503 line break before binary operator
|
||||
# H301 one import per line; conflict with Black
|
||||
# W503 line break before binary operator; conflict with Black
|
||||
# W504 line break after binary operator
|
||||
# W605 invalid escape sequence
|
||||
# E731 do not assign a lambda expression, use a def
|
||||
ignore = W503,W504,W605,E731
|
||||
ignore = H301,W503,W504,W605,E731
|
||||
builtins = _
|
||||
|
||||
[testenv:genconfig]
|
||||
|
Loading…
Reference in New Issue
Block a user