Drydock focal related upgrades

This PS implements the following changes:
- switches freeze approach to requirements-direct.txt and
  requirements-frozen.txt files
- adjusts code tabulation style according to  yapf recommendations
- replaces deprecated usage of responce.body attribute with
  responce.text
- fixes integration tests in controlled by Makefile + tox
- uplifts Helm to v3.9.4

Change-Id: I751db72eb8f670825382f11a36657112faeb169a
This commit is contained in:
Sergiy Markin 2023-04-26 12:31:09 +00:00
parent 415a8b52c5
commit d00eaf0303
181 changed files with 2811 additions and 4894 deletions

View File

@ -22,5 +22,5 @@ python:
version: 3.8
install:
- requirements: doc/requirements-doc.txt
- requirements: python/requirements-lock.txt
- requirements: python/requirements-test.txt
- requirements: python/requirements-frozen.txt
- requirements: python/test-requirements.txt

View File

@ -19,7 +19,7 @@ IMAGE_PREFIX ?= airshipit
IMAGE_TAG ?= dev
HELM := $(shell realpath $(BUILD_DIR))/helm
UBUNTU_BASE_IMAGE ?=
DISTRO ?= ubuntu_focal
DISTRO ?= ubuntu_focal
PROXY ?= http://proxy.foo.com:8000
NO_PROXY ?= localhost,127.0.0.1,.svc.cluster.local
USE_PROXY ?= false

View File

@ -32,7 +32,7 @@ and policy file templates to be customized
$ tox -e genpolicy
$ virtualenv -p python3.5 /var/tmp/drydock
$ . /var/tmp/drydock/bin/activate
$ pip install -r requirements-lock.txt
$ pip install -r requirements-frozen.txt
$ pip install .
$ cp -r etc/drydock /etc/drydock

View File

@ -39,8 +39,9 @@ def run_migrations_offline():
"""
return # We don't support offline
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
context.configure(url=url,
target_metadata=target_metadata,
literal_binds=True)
with context.begin_transaction():
context.run_migrations()
@ -55,15 +56,15 @@ def run_migrations_online():
"""
db_url = os.environ['DRYDOCK_DB_URL']
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
url=db_url)
connectable = engine_from_config(config.get_section(
config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool,
url=db_url)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata)
context.configure(connection=connection,
target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()

View File

@ -15,7 +15,7 @@
apiVersion: v1
description: A Helm chart for Drydock
name: drydock
version: 0.1.1
version: 0.1.2
keywords:
- drydock
home: https://github.com/openstack/airship-drydock

View File

@ -1,5 +1,5 @@
sphinx_rtd_theme==1.2.0
pylibyaml==0.1.0
oslo_versionedobjects==3.1.0
falcon==3.1.1
keystoneauth1==5.1.2
oslo.versionedobjects==3.1.0
falcon
keystoneauth1<=5.1.1

View File

@ -1,112 +1,8 @@
[DEFAULT]
#
# From drydock_provisioner
#
# Polling interval in seconds for checking subtask or downstream status (integer
# value)
# Minimum value: 1
#poll_interval = 10
# How long a leader has to check-in before leadership can be usurped, in seconds
# (integer value)
#leader_grace_period = 300
# How often will an instance attempt to claim leadership, in seconds (integer
# value)
#leadership_claim_interval = 30
[database]
#
# From drydock_provisioner
#
# The URI database connect string. (string value)
#database_connect_string = <None>
# The SQLalchemy database connection pool size. (integer value)
#pool_size = 15
# Should DB connections be validated prior to use. (boolean value)
#pool_pre_ping = true
# How long a request for a connection should wait before one becomes available.
# (integer value)
#pool_timeout = 30
# How many connections above pool_size are allowed to be open during high usage.
# (integer value)
#pool_overflow = 10
# Time, in seconds, when a connection should be closed and re-established. -1
# for no recycling. (integer value)
#connection_recycle = -1
[keystone_authtoken]
#
# From drydock_provisioner
#
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_id
#project_id = <None>
# Project name to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_name
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# ID of the trust to use as a trustee use (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User id (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [keystone_authtoken]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
#
# From keystonemiddleware.auth_token
#
@ -266,84 +162,6 @@
#auth_section = <None>
[libvirt_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying libvirt status (integer value)
#poll_interval = 10
[logging]
#
# From drydock_provisioner
#
# Global log level for Drydock (string value)
#log_level = INFO
# Logger name for the top-level logger (string value)
#global_logger_name = drydock_provisioner
# Logger name for OOB driver logging (string value)
#oobdriver_logger_name = ${global_logger_name}.oobdriver
# Logger name for Node driver logging (string value)
#nodedriver_logger_name = ${global_logger_name}.nodedriver
# Logger name for Kubernetes driver logging (string value)
#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver
# Logger name for API server logging (string value)
#control_logger_name = ${global_logger_name}.control
[maasdriver]
#
# From drydock_provisioner
#
# The API key for accessing MaaS (string value)
#maas_api_key = <None>
# The URL for accessing MaaS API (string value)
#maas_api_url = <None>
# Update MAAS to use the provided Node OOB params, overwriting discovered values
# (boolean value)
#use_node_oob_params = false
# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean
# value)
#skip_bmc_config = false
# Polling interval for querying MaaS status in seconds (integer value)
#poll_interval = 10
[network]
#
# From drydock_provisioner
#
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_connect_timeout = 16
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_read_timeout = 300
# Number of retries for transient errors of outgoing HTTP calls from Drydock.
# (integer value)
#http_client_retries = 3
[oslo_policy]
#
@ -402,102 +220,3 @@
# Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None>
[plugins]
#
# From drydock_provisioner
#
# Module path string of a input ingester to enable (string value)
#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester
# List of module path strings of OOB drivers to enable (list value)
#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver
# Module path string of the Node driver to enable (string value)
#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver
# Module path string of the Kubernetes driver to enable (string value)
#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver
# Module path string of the Network driver enable (string value)
#network_driver = <None>
[pyghmi_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying IPMI status (integer value)
#poll_interval = 10
[redfish_driver]
#
# From drydock_provisioner
#
# Maximum number of connection retries to Redfish server (integer value)
# Minimum value: 1
#max_retries = 10
# Maximum reties to wait for power state change (integer value)
# Minimum value: 1
#power_state_change_max_retries = 18
# Polling interval in seconds between retries for power state change (integer
# value)
#power_state_change_retry_interval = 10
# Use SSL to communicate with Redfish API server (boolean value)
#use_ssl = true
[timeouts]
#
# From drydock_provisioner
#
# Fallback timeout when a specific one is not configured (integer value)
#drydock_timeout = 5
# Timeout in minutes for creating site network templates (integer value)
#create_network_template = 2
# Timeout in minutes for creating user credentials (integer value)
#configure_user_credentials = 2
# Timeout in minutes for initial node identification (integer value)
#identify_node = 10
# Timeout in minutes for node commissioning and hardware configuration (integer
# value)
#configure_hardware = 30
# Timeout in minutes for configuring node networking (integer value)
#apply_node_networking = 5
# Timeout in minutes for configuring node storage (integer value)
#apply_node_storage = 5
# Timeout in minutes for configuring node platform (integer value)
#apply_node_platform = 5
# Timeout in minutes for deploying a node (integer value)
#deploy_node = 45
# Timeout in minutes between deployment completion and the all boot actions
# reporting status (integer value)
#bootaction_final_status = 15
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -1,70 +0,0 @@
# Actions requiring admin authority
#"admin_required": "role:admin or is_admin:1"
# Get task status
# GET /api/v1.0/tasks
# GET /api/v1.0/tasks/{task_id}
#"physical_provisioner:read_task": "role:admin"
# Create a task
# POST /api/v1.0/tasks
#"physical_provisioner:create_task": "role:admin"
# Create validate_design task
# POST /api/v1.0/tasks
#"physical_provisioner:validate_design": "role:admin"
# Create verify_site task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_site": "role:admin"
# Create prepare_site task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_site": "role:admin"
# Create verify_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_nodes": "role:admin"
# Create prepare_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_nodes": "role:admin"
# Create deploy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:deploy_nodes": "role:admin"
# Create destroy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:destroy_nodes": "role:admin"
# Deletes tasks by age
# DELETE /api/v1.0/tasks
#"physical_provisioner:delete_tasks": "role:admin"
# Create relabel_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:relabel_nodes": "role:admin"
# Read build data for a node
# GET /api/v1.0/nodes/{nodename}/builddata
#"physical_provisioner:read_build_data": "role:admin"
# Read loaded design data
# GET /api/v1.0/designs
# GET /api/v1.0/designs/{design_id}
#"physical_provisioner:read_data": "role:admin"
# Load design data
# POST /api/v1.0/designs
# POST /api/v1.0/designs/{design_id}/parts
#"physical_provisioner:ingest_data": "role:admin"
# et health status
# GET /api/v1.0/health/extended
#"physical_provisioner:health_data": "role:admin"
# Validate site design
# POST /api/v1.0/validatedesign
#"physical_provisioner:validate_site_design": "role:admin"

View File

@ -1,112 +1,8 @@
[DEFAULT]
#
# From drydock_provisioner
#
# Polling interval in seconds for checking subtask or downstream status (integer
# value)
# Minimum value: 1
#poll_interval = 10
# How long a leader has to check-in before leadership can be usurped, in seconds
# (integer value)
#leader_grace_period = 300
# How often will an instance attempt to claim leadership, in seconds (integer
# value)
#leadership_claim_interval = 30
[database]
#
# From drydock_provisioner
#
# The URI database connect string. (string value)
#database_connect_string = <None>
# The SQLalchemy database connection pool size. (integer value)
#pool_size = 15
# Should DB connections be validated prior to use. (boolean value)
#pool_pre_ping = true
# How long a request for a connection should wait before one becomes available.
# (integer value)
#pool_timeout = 30
# How many connections above pool_size are allowed to be open during high usage.
# (integer value)
#pool_overflow = 10
# Time, in seconds, when a connection should be closed and re-established. -1
# for no recycling. (integer value)
#connection_recycle = -1
[keystone_authtoken]
#
# From drydock_provisioner
#
# Authentication URL (string value)
#auth_url = <None>
# Scope for system operations (string value)
#system_scope = <None>
# Domain ID to scope to (string value)
#domain_id = <None>
# Domain name to scope to (string value)
#domain_name = <None>
# Project ID to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_id
#project_id = <None>
# Project name to scope to (string value)
# Deprecated group/name - [keystone_authtoken]/tenant_name
#project_name = <None>
# Domain ID containing project (string value)
#project_domain_id = <None>
# Domain name containing project (string value)
#project_domain_name = <None>
# ID of the trust to use as a trustee use (string value)
#trust_id = <None>
# Optional domain ID to use with v3 and v2 parameters. It will be used for both
# the user and project domain in v3 and ignored in v2 authentication. (string
# value)
#default_domain_id = <None>
# Optional domain name to use with v3 API and v2 parameters. It will be used for
# both the user and project domain in v3 and ignored in v2 authentication.
# (string value)
#default_domain_name = <None>
# User id (string value)
#user_id = <None>
# Username (string value)
# Deprecated group/name - [keystone_authtoken]/user_name
#username = <None>
# User's domain id (string value)
#user_domain_id = <None>
# User's domain name (string value)
#user_domain_name = <None>
# User's password (string value)
#password = <None>
#
# From keystonemiddleware.auth_token
#
@ -266,84 +162,6 @@
#auth_section = <None>
[libvirt_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying libvirt status (integer value)
#poll_interval = 10
[logging]
#
# From drydock_provisioner
#
# Global log level for Drydock (string value)
#log_level = INFO
# Logger name for the top-level logger (string value)
#global_logger_name = drydock_provisioner
# Logger name for OOB driver logging (string value)
#oobdriver_logger_name = ${global_logger_name}.oobdriver
# Logger name for Node driver logging (string value)
#nodedriver_logger_name = ${global_logger_name}.nodedriver
# Logger name for Kubernetes driver logging (string value)
#kubernetesdriver_logger_name = ${global_logger_name}.kubernetesdriver
# Logger name for API server logging (string value)
#control_logger_name = ${global_logger_name}.control
[maasdriver]
#
# From drydock_provisioner
#
# The API key for accessing MaaS (string value)
#maas_api_key = <None>
# The URL for accessing MaaS API (string value)
#maas_api_url = <None>
# Update MAAS to use the provided Node OOB params, overwriting discovered values
# (boolean value)
#use_node_oob_params = false
# Skip BMC reconfiguration during commissioning (requires MAAS 2.7+) (boolean
# value)
#skip_bmc_config = false
# Polling interval for querying MaaS status in seconds (integer value)
#poll_interval = 10
[network]
#
# From drydock_provisioner
#
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_connect_timeout = 16
# Timeout for initial read of outgoing HTTP calls from Drydock in seconds.
# (integer value)
#http_client_read_timeout = 300
# Number of retries for transient errors of outgoing HTTP calls from Drydock.
# (integer value)
#http_client_retries = 3
[oslo_policy]
#
@ -402,102 +220,3 @@
# Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None>
[plugins]
#
# From drydock_provisioner
#
# Module path string of a input ingester to enable (string value)
#ingester = drydock_provisioner.ingester.plugins.yaml.YamlIngester
# List of module path strings of OOB drivers to enable (list value)
#oob_driver = drydock_provisioner.drivers.oob.pyghmi_driver.PyghmiDriver
# Module path string of the Node driver to enable (string value)
#node_driver = drydock_provisioner.drivers.node.maasdriver.driver.MaasNodeDriver
# Module path string of the Kubernetes driver to enable (string value)
#kubernetes_driver = drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver
# Module path string of the Network driver enable (string value)
#network_driver = <None>
[pyghmi_driver]
#
# From drydock_provisioner
#
# Polling interval in seconds for querying IPMI status (integer value)
#poll_interval = 10
[redfish_driver]
#
# From drydock_provisioner
#
# Maximum number of connection retries to Redfish server (integer value)
# Minimum value: 1
#max_retries = 10
# Maximum reties to wait for power state change (integer value)
# Minimum value: 1
#power_state_change_max_retries = 18
# Polling interval in seconds between retries for power state change (integer
# value)
#power_state_change_retry_interval = 10
# Use SSL to communicate with Redfish API server (boolean value)
#use_ssl = true
[timeouts]
#
# From drydock_provisioner
#
# Fallback timeout when a specific one is not configured (integer value)
#drydock_timeout = 5
# Timeout in minutes for creating site network templates (integer value)
#create_network_template = 2
# Timeout in minutes for creating user credentials (integer value)
#configure_user_credentials = 2
# Timeout in minutes for initial node identification (integer value)
#identify_node = 10
# Timeout in minutes for node commissioning and hardware configuration (integer
# value)
#configure_hardware = 30
# Timeout in minutes for configuring node networking (integer value)
#apply_node_networking = 5
# Timeout in minutes for configuring node storage (integer value)
#apply_node_storage = 5
# Timeout in minutes for configuring node platform (integer value)
#apply_node_platform = 5
# Timeout in minutes for deploying a node (integer value)
#deploy_node = 45
# Timeout in minutes between deployment completion and the all boot actions
# reporting status (integer value)
#bootaction_final_status = 15
# Timeout in minutes for releasing a node (integer value)
#destroy_node = 30
# Timeout in minutes for relabeling a node (integer value)
#relabel_node = 5

View File

@ -1,70 +0,0 @@
# Actions requiring admin authority
#"admin_required": "role:admin or is_admin:1"
# Get task status
# GET /api/v1.0/tasks
# GET /api/v1.0/tasks/{task_id}
#"physical_provisioner:read_task": "role:admin"
# Create a task
# POST /api/v1.0/tasks
#"physical_provisioner:create_task": "role:admin"
# Create validate_design task
# POST /api/v1.0/tasks
#"physical_provisioner:validate_design": "role:admin"
# Create verify_site task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_site": "role:admin"
# Create prepare_site task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_site": "role:admin"
# Create verify_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:verify_nodes": "role:admin"
# Create prepare_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:prepare_nodes": "role:admin"
# Create deploy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:deploy_nodes": "role:admin"
# Create destroy_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:destroy_nodes": "role:admin"
# Deletes tasks by age
# DELETE /api/v1.0/tasks
#"physical_provisioner:delete_tasks": "role:admin"
# Create relabel_nodes task
# POST /api/v1.0/tasks
#"physical_provisioner:relabel_nodes": "role:admin"
# Read build data for a node
# GET /api/v1.0/nodes/{nodename}/builddata
#"physical_provisioner:read_build_data": "role:admin"
# Read loaded design data
# GET /api/v1.0/designs
# GET /api/v1.0/designs/{design_id}
#"physical_provisioner:read_data": "role:admin"
# Load design data
# POST /api/v1.0/designs
# POST /api/v1.0/designs/{design_id}/parts
#"physical_provisioner:ingest_data": "role:admin"
# et health status
# GET /api/v1.0/health/extended
#"physical_provisioner:health_data": "role:admin"
# Validate site design
# POST /api/v1.0/validatedesign
#"physical_provisioner:validate_site_design": "role:admin"

View File

@ -92,10 +92,10 @@ ENV LD_LIBRARY_PATH=/usr/local/lib
COPY --from=baclient_builder /usr/local/lib /usr/local/lib
COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h
COPY ./python/requirements-lock.txt /tmp/drydock/
COPY ./python/requirements-frozen.txt /tmp/drydock/
RUN pip3 install \
--no-cache-dir \
-r /tmp/drydock/requirements-lock.txt
-r /tmp/drydock/requirements-frozen.txt
COPY ./python /tmp/drydock/python
WORKDIR /tmp/drydock/python

View File

@ -106,14 +106,15 @@ ENV LD_LIBRARY_PATH=/usr/local/lib
COPY --from=baclient_builder /usr/local/lib /usr/local/lib
COPY --from=baclient_builder /usr/local/include/yaml.h /usr/local/include/yaml.h
COPY ./python/requirements-lock.txt /tmp/drydock/
COPY ./python/requirements-frozen.txt /tmp/drydock/
RUN pip3 install \
--no-cache-dir \
-r /tmp/drydock/requirements-lock.txt
-r /tmp/drydock/requirements-frozen.txt
COPY ./python /tmp/drydock/python
WORKDIR /tmp/drydock/python
RUN python3 setup.py install
RUN cd /tmp/drydock/python \
&& pip3 install $(pwd)
COPY ./alembic /tmp/drydock/alembic
COPY ./alembic.ini /tmp/drydock/alembic.ini

View File

@ -25,28 +25,27 @@ from .node import commands as node
@click.group()
@click.option(
'--debug/--no-debug', help='Enable or disable debugging', default=False)
@click.option('--debug/--no-debug',
help='Enable or disable debugging',
default=False)
# Supported Environment Variables
@click.option(
'--os_project_domain_name',
envvar='OS_PROJECT_DOMAIN_NAME',
required=False)
@click.option(
'--os_user_domain_name', envvar='OS_USER_DOMAIN_NAME', required=False)
@click.option('--os_project_domain_name',
envvar='OS_PROJECT_DOMAIN_NAME',
required=False)
@click.option('--os_user_domain_name',
envvar='OS_USER_DOMAIN_NAME',
required=False)
@click.option('--os_project_name', envvar='OS_PROJECT_NAME', required=False)
@click.option('--os_username', envvar='OS_USERNAME', required=False)
@click.option('--os_password', envvar='OS_PASSWORD', required=False)
@click.option('--os_auth_url', envvar='OS_AUTH_URL', required=False)
@click.option(
'--os_token',
help='The Keystone token to be used',
default=lambda: os.environ.get('OS_TOKEN', ''))
@click.option(
'--url',
'-u',
help='The url of the running drydock instance',
default=lambda: os.environ.get('DD_URL', ''))
@click.option('--os_token',
help='The Keystone token to be used',
default=lambda: os.environ.get('OS_TOKEN', ''))
@click.option('--url',
'-u',
help='The url of the running drydock instance',
default=lambda: os.environ.get('DD_URL', ''))
@click.pass_context
def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
os_project_name, os_username, os_password, os_auth_url, os_token):
@ -83,8 +82,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
str(keystone_env))
ks_sess = KeystoneClient.get_ks_session(**keystone_env)
else:
logger.debug(
"Generating Keystone session by explicit token: %s" % os_token)
logger.debug("Generating Keystone session by explicit token: %s" %
os_token)
ks_sess = KeystoneClient.get_ks_session(token=os_token)
KeystoneClient.get_token(ks_sess=ks_sess)
except Exception as ex:
@ -94,8 +93,8 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
try:
if not url:
url = KeystoneClient.get_endpoint(
'physicalprovisioner', ks_sess=ks_sess)
url = KeystoneClient.get_endpoint('physicalprovisioner',
ks_sess=ks_sess)
except Exception as ex:
logger.debug("Exception getting Drydock endpoint.", exc_info=ex)
ctx.fail('Error: Unable to discover Drydock API URL')
@ -109,11 +108,10 @@ def drydock(ctx, debug, url, os_project_domain_name, os_user_domain_name,
if not url_parse_result.scheme:
ctx.fail('URL must specify a scheme and hostname, optionally a port')
ctx.obj['CLIENT'] = DrydockClient(
DrydockSession(
scheme=url_parse_result.scheme,
host=url_parse_result.hostname,
port=url_parse_result.port,
auth_gen=auth_gen))
DrydockSession(scheme=url_parse_result.scheme,
host=url_parse_result.hostname,
port=url_parse_result.port,
auth_gen=auth_gen))
drydock.add_command(task.task)

View File

@ -59,8 +59,8 @@ class DesignShow(CliAction): # pylint: disable=too-few-public-methods
design_id)
def invoke(self):
return self.api_client.get_design(
design_id=self.design_id, source=self.source)
return self.api_client.get_design(design_id=self.design_id,
source=self.source)
class DesignValidate(CliAction): # pylint: disable=too-few-public-methods

View File

@ -31,10 +31,9 @@ def design():
@design.command(name='create')
@click.option(
'--base-design',
'-b',
help='The base design to model this new design after')
@click.option('--base-design',
'-b',
help='The base design to model this new design after')
@click.pass_context
def design_create(ctx, base_design=None):
"""Create a design."""
@ -61,8 +60,9 @@ def design_show(ctx, design_id):
@design.command(name='validate')
@click.option(
'--design-href', '-h', help='The design href key to the design ref')
@click.option('--design-href',
'-h',
help='The design href key to the design ref')
@click.pass_context
def design_validate(ctx, design_href=None):
"""Validate a design."""

View File

@ -47,5 +47,5 @@ class NodeBuildData(CliAction):
self.logger.debug('NodeBuildData action initialized')
def invoke(self):
return self.api_client.get_node_build_data(
self.nodename, latest=self.latest)
return self.api_client.get_node_build_data(self.nodename,
latest=self.latest)

View File

@ -31,8 +31,10 @@ def node():
@node.command(name='list')
@click.option(
'--output', '-o', help='Output format: table|json', default='table')
@click.option('--output',
'-o',
help='Output format: table|json',
default='table')
@click.pass_context
def node_list(ctx, output='table'):
"""List nodes."""
@ -59,12 +61,13 @@ def node_list(ctx, output='table'):
@node.command(name='builddata')
@click.option(
'--latest/--no-latest',
help='Retrieve only the latest data items.',
default=True)
@click.option(
'--output', '-o', help='Output format: yaml|json', default='yaml')
@click.option('--latest/--no-latest',
help='Retrieve only the latest data items.',
default=True)
@click.option('--output',
'-o',
help='Output format: yaml|json',
default='yaml')
@click.argument('nodename')
@click.pass_context
def node_builddata(ctx, nodename, latest=True, output='yaml'):
@ -78,5 +81,6 @@ def node_builddata(ctx, nodename, latest=True, output='yaml'):
click.echo(
"Invalid output format {}, default to YAML.".format(output))
click.echo(
yaml.safe_dump(
node_bd, allow_unicode=True, default_flow_style=False))
yaml.safe_dump(node_bd,
allow_unicode=True,
default_flow_style=False))

View File

@ -85,8 +85,7 @@ class PartShow(PartBase): # pylint: disable=too-few-public-methods
' kind=%s, key=%s, source=%s', design_id, kind, key, source)
def invoke(self):
return self.api_client.get_part(
design_id=self.design_id,
kind=self.kind,
key=self.key,
source=self.source)
return self.api_client.get_part(design_id=self.design_id,
kind=self.kind,
key=self.key,
source=self.source)

View File

@ -25,10 +25,9 @@ from drydock_provisioner.cli.part.actions import PartCreate
@click.group()
@click.option(
'--design-id',
'-d',
help='The id of the design containing the target parts')
@click.option('--design-id',
'-d',
help='The id of the design containing the target parts')
@click.pass_context
def part(ctx, design_id=None):
"""Drydock part commands."""
@ -39,8 +38,9 @@ def part(ctx, design_id=None):
@part.command(name='create')
@click.option(
'--file', '-f', help='The file name containing the part to create')
@click.option('--file',
'-f',
help='The file name containing the part to create')
@click.pass_context
def part_create(ctx, file=None):
"""Create a part."""
@ -52,10 +52,9 @@ def part_create(ctx, file=None):
# here is where some potential validation could be done on the input file
click.echo(
json.dumps(
PartCreate(
ctx.obj['CLIENT'],
design_id=ctx.obj['DESIGN_ID'],
in_file=file_contents).invoke()))
PartCreate(ctx.obj['CLIENT'],
design_id=ctx.obj['DESIGN_ID'],
in_file=file_contents).invoke()))
@part.command(name='list')
@ -83,9 +82,8 @@ def part_show(ctx, source, kind, key):
click.echo(
json.dumps(
PartShow(
ctx.obj['CLIENT'],
design_id=ctx.obj['DESIGN_ID'],
kind=kind,
key=key,
source=source).invoke()))
PartShow(ctx.obj['CLIENT'],
design_id=ctx.obj['DESIGN_ID'],
kind=kind,
key=key,
source=source).invoke()))

View File

@ -90,10 +90,9 @@ class TaskCreate(CliAction): # pylint: disable=too-few-public-methods
def invoke(self):
"""Invoke execution of this action."""
task = self.api_client.create_task(
design_ref=self.design_ref,
task_action=self.action_name,
node_filter=self.node_filter)
task = self.api_client.create_task(design_ref=self.design_ref,
task_action=self.action_name,
node_filter=self.node_filter)
if not self.block:
return task
@ -157,6 +156,7 @@ class TaskBuildData(CliAction):
def invoke(self):
return self.api_client.get_task_build_data(self.task_id)
class TasksDelete(CliAction):
"""Action to delete tasks in database."""

View File

@ -29,17 +29,16 @@ def task():
@task.command(name='create')
@click.option(
'--design-ref', '-d', help='The design reference for this action')
@click.option('--design-ref',
'-d',
help='The design reference for this action')
@click.option('--action', '-a', help='The action to perform')
@click.option(
'--node-names',
'-n',
help='The nodes targeted by this action, comma separated')
@click.option(
'--rack-names',
'-r',
help='The racks targeted by this action, comma separated')
@click.option('--node-names',
'-n',
help='The nodes targeted by this action, comma separated')
@click.option('--rack-names',
'-r',
help='The racks targeted by this action, comma separated')
@click.option(
'--node-tags',
'-t',
@ -49,10 +48,9 @@ def task():
'-b',
help='The CLI will wait until the created completes before exitting',
default=False)
@click.option(
'--poll-interval',
help='Polling interval to check task status in blocking mode.',
default=15)
@click.option('--poll-interval',
help='Polling interval to check task status in blocking mode.',
default=15)
@click.pass_context
def task_create(ctx,
design_ref=None,
@ -112,8 +110,10 @@ def task_show(ctx, task_id=None, block=False):
@task.command(name='builddata')
@click.option('--task-id', '-t', help='The required task id')
@click.option(
'--output', '-o', help='The output format (yaml|json)', default='yaml')
@click.option('--output',
'-o',
help='The output format (yaml|json)',
default='yaml')
@click.pass_context
def task_builddata(ctx, task_id=None, output='yaml'):
"""Show builddata assoicated with ``task_id``."""
@ -129,17 +129,19 @@ def task_builddata(ctx, task_id=None, output='yaml'):
click.echo(
'Invalid output format {}, defaulting to YAML.'.format(output))
click.echo(
yaml.safe_dump(
task_bd, allow_unicode=True, default_flow_style=False))
yaml.safe_dump(task_bd,
allow_unicode=True,
default_flow_style=False))
@task.command(name='delete')
@click.option('--days', '-d', help='The required number of days to retain tasks')
@click.option('--days',
'-d',
help='The required number of days to retain tasks')
@click.pass_context
def task_delete(ctx, days=None):
"""Delete tasks from database"""
if not days:
ctx.fail('The number of days must be specified using --days or -d')
click.echo(
TasksDelete(ctx.obj['CLIENT'], days=days).invoke())
click.echo(TasksDelete(ctx.obj['CLIENT'], days=days).invoke())

View File

@ -67,43 +67,36 @@ class DrydockConfig(object):
# Logging options
logging_options = [
cfg.StrOpt(
'log_level', default='INFO', help='Global log level for Drydock'),
cfg.StrOpt(
'global_logger_name',
default='drydock_provisioner',
help='Logger name for the top-level logger'),
cfg.StrOpt(
'oobdriver_logger_name',
default='${global_logger_name}.oobdriver',
help='Logger name for OOB driver logging'),
cfg.StrOpt(
'nodedriver_logger_name',
default='${global_logger_name}.nodedriver',
help='Logger name for Node driver logging'),
cfg.StrOpt(
'kubernetesdriver_logger_name',
default='${global_logger_name}.kubernetesdriver',
help='Logger name for Kubernetes driver logging'),
cfg.StrOpt(
'control_logger_name',
default='${global_logger_name}.control',
help='Logger name for API server logging'),
cfg.StrOpt('log_level',
default='INFO',
help='Global log level for Drydock'),
cfg.StrOpt('global_logger_name',
default='drydock_provisioner',
help='Logger name for the top-level logger'),
cfg.StrOpt('oobdriver_logger_name',
default='${global_logger_name}.oobdriver',
help='Logger name for OOB driver logging'),
cfg.StrOpt('nodedriver_logger_name',
default='${global_logger_name}.nodedriver',
help='Logger name for Node driver logging'),
cfg.StrOpt('kubernetesdriver_logger_name',
default='${global_logger_name}.kubernetesdriver',
help='Logger name for Kubernetes driver logging'),
cfg.StrOpt('control_logger_name',
default='${global_logger_name}.control',
help='Logger name for API server logging'),
]
# Database options
database_options = [
cfg.StrOpt(
'database_connect_string',
help='The URI database connect string.'),
cfg.IntOpt(
'pool_size',
default=15,
help='The SQLalchemy database connection pool size.'),
cfg.BoolOpt(
'pool_pre_ping',
default=True,
help='Should DB connections be validated prior to use.'),
cfg.StrOpt('database_connect_string',
help='The URI database connect string.'),
cfg.IntOpt('pool_size',
default=15,
help='The SQLalchemy database connection pool size.'),
cfg.BoolOpt('pool_pre_ping',
default=True,
help='Should DB connections be validated prior to use.'),
cfg.IntOpt(
'pool_timeout',
default=30,
@ -126,9 +119,8 @@ class DrydockConfig(object):
# Options for the boot action framework
bootactions_options = [
cfg.StrOpt(
'report_url',
default='http://localhost:9000/api/v1.0/bootactions/')
cfg.StrOpt('report_url',
default='http://localhost:9000/api/v1.0/bootactions/')
]
# Options for network traffic
@ -176,10 +168,9 @@ class DrydockConfig(object):
'drydock_provisioner.drivers.kubernetes.promenade_driver.driver.PromenadeDriver',
help='Module path string of the Kubernetes driver to enable'),
# TODO(sh8121att) Network driver not yet implemented
cfg.StrOpt(
'network_driver',
default=None,
help='Module path string of the Network driver enable'),
cfg.StrOpt('network_driver',
default=None,
help='Module path string of the Network driver enable'),
]
# Timeouts for various tasks specified in minutes
@ -192,36 +183,30 @@ class DrydockConfig(object):
'create_network_template',
default=2,
help='Timeout in minutes for creating site network templates'),
cfg.IntOpt(
'configure_user_credentials',
default=2,
help='Timeout in minutes for creating user credentials'),
cfg.IntOpt(
'identify_node',
default=10,
help='Timeout in minutes for initial node identification'),
cfg.IntOpt('configure_user_credentials',
default=2,
help='Timeout in minutes for creating user credentials'),
cfg.IntOpt('identify_node',
default=10,
help='Timeout in minutes for initial node identification'),
cfg.IntOpt(
'configure_hardware',
default=30,
help=
'Timeout in minutes for node commissioning and hardware configuration'
),
cfg.IntOpt(
'apply_node_networking',
default=5,
help='Timeout in minutes for configuring node networking'),
cfg.IntOpt(
'apply_node_storage',
default=5,
help='Timeout in minutes for configuring node storage'),
cfg.IntOpt(
'apply_node_platform',
default=5,
help='Timeout in minutes for configuring node platform'),
cfg.IntOpt(
'deploy_node',
default=45,
help='Timeout in minutes for deploying a node'),
cfg.IntOpt('apply_node_networking',
default=5,
help='Timeout in minutes for configuring node networking'),
cfg.IntOpt('apply_node_storage',
default=5,
help='Timeout in minutes for configuring node storage'),
cfg.IntOpt('apply_node_platform',
default=5,
help='Timeout in minutes for configuring node platform'),
cfg.IntOpt('deploy_node',
default=45,
help='Timeout in minutes for deploying a node'),
cfg.IntOpt(
'bootaction_final_status',
default=15,
@ -233,10 +218,9 @@ class DrydockConfig(object):
default=30,
help='Timeout in minutes for releasing a node',
),
cfg.IntOpt(
'relabel_node',
default=5,
help='Timeout in minutes for relabeling a node'),
cfg.IntOpt('relabel_node',
default=5,
help='Timeout in minutes for relabeling a node'),
]
def __init__(self):
@ -244,15 +228,15 @@ class DrydockConfig(object):
def register_options(self, enable_keystone=True):
self.conf.register_opts(DrydockConfig.options)
self.conf.register_opts(
DrydockConfig.bootactions_options, group='bootactions')
self.conf.register_opts(DrydockConfig.bootactions_options,
group='bootactions')
self.conf.register_opts(DrydockConfig.logging_options, group='logging')
self.conf.register_opts(DrydockConfig.plugin_options, group='plugins')
self.conf.register_opts(DrydockConfig.network_options, group='network')
self.conf.register_opts(
DrydockConfig.database_options, group='database')
self.conf.register_opts(
DrydockConfig.timeout_options, group='timeouts')
self.conf.register_opts(DrydockConfig.database_options,
group='database')
self.conf.register_opts(DrydockConfig.timeout_options,
group='timeouts')
if enable_keystone:
self.conf.register_opts(
loading.get_auth_plugin_conf_options('password'),

View File

@ -45,13 +45,12 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
part input
:param orchestrator: Instance of drydock_provisioner.orchestrator.Orchestrator for managing tasks
"""
control_api = falcon.App(
request_type=DrydockRequest,
middleware=[
AuthMiddleware(),
ContextMiddleware(),
LoggingMiddleware()
])
control_api = falcon.App(request_type=DrydockRequest,
middleware=[
AuthMiddleware(),
ContextMiddleware(),
LoggingMiddleware()
])
control_api.add_route('/versions', VersionsResource())
@ -59,11 +58,11 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
v1_0_routes = [
# API for managing orchestrator tasks
('/health',
HealthResource(
state_manager=state_manager, orchestrator=orchestrator)),
HealthResource(state_manager=state_manager,
orchestrator=orchestrator)),
('/health/extended',
HealthExtendedResource(
state_manager=state_manager, orchestrator=orchestrator)),
HealthExtendedResource(state_manager=state_manager,
orchestrator=orchestrator)),
('/tasks',
TasksResource(state_manager=state_manager,
orchestrator=orchestrator)),
@ -74,15 +73,15 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
# API for managing site design data
('/designs', DesignsResource(state_manager=state_manager)),
('/designs/{design_id}',
DesignResource(
state_manager=state_manager, orchestrator=orchestrator)),
DesignResource(state_manager=state_manager,
orchestrator=orchestrator)),
('/designs/{design_id}/parts',
DesignsPartsResource(state_manager=state_manager, ingester=ingester)),
('/designs/{design_id}/parts/{kind}',
DesignsPartsKindsResource(state_manager=state_manager)),
('/designs/{design_id}/parts/{kind}/{name}',
DesignsPartResource(
state_manager=state_manager, orchestrator=orchestrator)),
DesignsPartResource(state_manager=state_manager,
orchestrator=orchestrator)),
# API to list current MaaS nodes
('/nodes', NodesResource()),
@ -91,23 +90,23 @@ def start_api(state_manager=None, ingester=None, orchestrator=None):
NodeBuildDataResource(state_manager=state_manager)),
# API to list current node names based
('/nodefilter',
NodeFilterResource(
state_manager=state_manager, orchestrator=orchestrator)),
NodeFilterResource(state_manager=state_manager,
orchestrator=orchestrator)),
# API for nodes to discover their boot actions during curtin install
('/bootactions/nodes/{hostname}/units',
BootactionUnitsResource(
state_manager=state_manager, orchestrator=orchestrator)),
BootactionUnitsResource(state_manager=state_manager,
orchestrator=orchestrator)),
('/bootactions/nodes/{hostname}/files',
BootactionFilesResource(
state_manager=state_manager, orchestrator=orchestrator)),
BootactionFilesResource(state_manager=state_manager,
orchestrator=orchestrator)),
('/bootactions/{action_id}',
BootactionResource(
state_manager=state_manager, orchestrator=orchestrator)),
BootactionResource(state_manager=state_manager,
orchestrator=orchestrator)),
# API to validate schemas
('/validatedesign',
ValidationResource(
state_manager=state_manager, orchestrator=orchestrator)),
ValidationResource(state_manager=state_manager,
orchestrator=orchestrator)),
]
for path, res in v1_0_routes:
@ -122,10 +121,9 @@ class VersionsResource(BaseResource):
"""
def on_get(self, req, resp):
resp.body = self.to_json({
'v1.0': {
resp.text = self.to_json(
{'v1.0': {
'path': '/api/v1.0',
'status': 'stable'
}
})
}})
resp.status = falcon.HTTP_200

View File

@ -22,6 +22,7 @@ import drydock_provisioner.error as errors
class BaseResource(object):
def __init__(self):
self.logger = logging.getLogger('drydock')
@ -52,18 +53,18 @@ class BaseResource(object):
json_body = json.loads(raw_body.decode('utf-8'))
return json_body
except json.JSONDecodeError as jex:
print(
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
print("Invalid JSON in request: \n%s" %
raw_body.decode('utf-8'))
self.error(
req.context,
"Invalid JSON in request: \n%s" % raw_body.decode('utf-8'))
raise errors.InvalidFormat(
"%s: Invalid JSON in body: %s" % (req.path, jex))
raise errors.InvalidFormat("%s: Invalid JSON in body: %s" %
(req.path, jex))
else:
raise errors.InvalidFormat("Requires application/json payload")
def return_error(self, resp, status_code, message="", retry=False):
resp.body = json.dumps({
resp.text = json.dumps({
'type': 'error',
'message': message,
'retry': retry
@ -71,8 +72,12 @@ class BaseResource(object):
resp.status = status_code
def log_error(self, ctx, level, msg):
extra = {'user': 'N/A', 'req_id': 'N/A', 'external_ctx': 'N/A',
'end_user': 'N/A'}
extra = {
'user': 'N/A',
'req_id': 'N/A',
'external_ctx': 'N/A',
'end_user': 'N/A'
}
if ctx is not None:
extra = {
@ -104,6 +109,7 @@ class BaseResource(object):
class StatefulResource(BaseResource):
def __init__(self, state_manager=None, **kwargs):
super(StatefulResource, self).__init__(**kwargs)
@ -119,6 +125,7 @@ class StatefulResource(BaseResource):
class DrydockRequestContext(object):
def __init__(self):
self.log_level = 'ERROR'
self.user = None # Username

View File

@ -76,8 +76,8 @@ class BootactionResource(StatefulResource):
try:
ba_entry = self.state_manager.get_boot_action(action_id)
except Exception as ex:
self.logger.error(
"Error querying for boot action %s" % action_id, exc_info=ex)
self.logger.error("Error querying for boot action %s" % action_id,
exc_info=ex)
raise falcon.HTTPInternalServerError(str(ex))
if ba_entry is None:
@ -103,8 +103,8 @@ class BootactionResource(StatefulResource):
action_id)
for m in json_body.get('details', []):
rm = objects.TaskStatusMessage(
m.get('message'), m.get('error'), 'bootaction', action_id)
rm = objects.TaskStatusMessage(m.get('message'), m.get('error'),
'bootaction', action_id)
for f, v in m.items():
if f not in ['message', 'error']:
rm['extra'] = dict()
@ -124,11 +124,12 @@ class BootactionResource(StatefulResource):
resp.content_type = 'application/json'
ba_entry['task_id'] = str(ba_entry['task_id'])
ba_entry['action_id'] = ulid2.encode_ulid_base32(ba_entry['action_id'])
resp.body = json.dumps(ba_entry)
resp.text = json.dumps(ba_entry)
return
class BootactionAssetsResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs):
super().__init__(**kwargs)
self.orchestrator = orchestrator
@ -149,8 +150,8 @@ class BootactionAssetsResource(StatefulResource):
try:
ba_ctx = self.state_manager.get_boot_action_context(hostname)
except Exception as ex:
self.logger.error(
"Error locating boot action for %s" % hostname, exc_info=ex)
self.logger.error("Error locating boot action for %s" % hostname,
exc_info=ex)
raise falcon.HTTPNotFound()
if ba_ctx is None:
@ -178,19 +179,19 @@ class BootactionAssetsResource(StatefulResource):
action_id = ba_status.get('action_id')
action_key = ba_status.get('identity_key')
assets.extend(
ba.render_assets(
hostname,
site_design,
action_id,
action_key,
task.design_ref,
type_filter=asset_type_filter))
ba.render_assets(hostname,
site_design,
action_id,
action_key,
task.design_ref,
type_filter=asset_type_filter))
tarball = BootactionUtils.tarbuilder(asset_list=assets)
resp.set_header('Content-Type', 'application/gzip')
resp.set_header(
'Content-Disposition', "attachment; filename=\"%s-%s.tar.gz\""
% (hostname, asset_type))
'Content-Disposition',
"attachment; filename=\"%s-%s.tar.gz\"" %
(hostname, asset_type))
resp.data = tarball
resp.status = falcon.HTTP_200
return
@ -200,16 +201,18 @@ class BootactionAssetsResource(StatefulResource):
class BootactionUnitsResource(BootactionAssetsResource):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_get(self, req, resp, hostname):
self.logger.debug(
"Accessing boot action units resource for host %s." % hostname)
self.logger.debug("Accessing boot action units resource for host %s." %
hostname)
self.do_get(req, resp, hostname, 'unit')
class BootactionFilesResource(BootactionAssetsResource):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@ -233,18 +236,17 @@ class BootactionUtils(object):
identity_key = req.get_header('X-Bootaction-Key', default='')
if identity_key == '':
raise falcon.HTTPUnauthorized(
title='Unauthorized',
description='No X-Bootaction-Key',
challenges=['Bootaction-Key'])
raise falcon.HTTPUnauthorized(title='Unauthorized',
description='No X-Bootaction-Key',
challenges=['Bootaction-Key'])
if ba_ctx['identity_key'] != bytes.fromhex(identity_key):
logger.warn(
"Forbidding boot action access - node: %s, identity_key: %s, req header: %s"
% (ba_ctx['node_name'], str(ba_ctx['identity_key']),
str(bytes.fromhex(identity_key))))
raise falcon.HTTPForbidden(
title='Unauthorized', description='Invalid X-Bootaction-Key')
% (ba_ctx['node_name'], str(
ba_ctx['identity_key']), str(bytes.fromhex(identity_key))))
raise falcon.HTTPForbidden(title='Unauthorized',
description='Invalid X-Bootaction-Key')
@staticmethod
def tarbuilder(asset_list=None):
@ -259,8 +261,9 @@ class BootactionUtils(object):
:param asset_list: list of objects.BootActionAsset instances
"""
tarbytes = io.BytesIO()
tarball = tarfile.open(
mode='w:gz', fileobj=tarbytes, format=tarfile.GNU_FORMAT)
tarball = tarfile.open(mode='w:gz',
fileobj=tarbytes,
format=tarfile.GNU_FORMAT)
asset_list = [
a for a in asset_list if a.type != BootactionAssetType.PackageList
]

View File

@ -44,15 +44,14 @@ class DesignsResource(StatefulResource):
try:
designs = list(state.designs.keys())
resp.body = json.dumps(designs)
resp.text = json.dumps(designs)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(req.context, "Exception raised: %s" % str(ex))
self.return_error(
resp,
falcon.HTTP_500,
message="Error accessing design list",
retry=True)
self.return_error(resp,
falcon.HTTP_500,
message="Error accessing design list",
retry=True)
@policy.ApiEnforcer('physical_provisioner:ingest_data')
def on_post(self, req, resp):
@ -75,19 +74,20 @@ class DesignsResource(StatefulResource):
design.assign_id()
design.create(req.context, self.state_manager)
resp.body = json.dumps(design.obj_to_simple())
resp.text = json.dumps(design.obj_to_simple())
resp.status = falcon.HTTP_201
except errors.StateError:
self.error(req.context, "Error updating persistence")
self.return_error(
resp,
falcon.HTTP_500,
message="Error updating persistence",
retry=True)
self.return_error(resp,
falcon.HTTP_500,
message="Error updating persistence",
retry=True)
except errors.InvalidFormat as fex:
self.error(req.context, str(fex))
self.return_error(
resp, falcon.HTTP_400, message=str(fex), retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=str(fex),
retry=False)
class DesignResource(StatefulResource):
@ -115,17 +115,17 @@ class DesignResource(StatefulResource):
elif source == 'designed':
design = self.orchestrator.get_described_site(design_id)
resp.body = json.dumps(design.obj_to_simple())
resp.text = json.dumps(design.obj_to_simple())
except errors.DesignError:
self.error(req.context, "Design %s not found" % design_id)
self.return_error(
resp,
falcon.HTTP_404,
message="Design %s not found" % design_id,
retry=False)
self.return_error(resp,
falcon.HTTP_404,
message="Design %s not found" % design_id,
retry=False)
class DesignsPartsResource(StatefulResource):
def __init__(self, ingester=None, **kwargs):
super(DesignsPartsResource, self).__init__(**kwargs)
self.ingester = ingester
@ -146,11 +146,10 @@ class DesignsPartsResource(StatefulResource):
self.error(
None,
"DesignsPartsResource POST requires parameter 'ingester'")
self.return_error(
resp,
falcon.HTTP_400,
message="POST requires parameter 'ingester'",
retry=False)
self.return_error(resp,
falcon.HTTP_400,
message="POST requires parameter 'ingester'",
retry=False)
else:
try:
raw_body = req.stream.read(req.content_length or 0)
@ -162,37 +161,34 @@ class DesignsPartsResource(StatefulResource):
design_id=design_id,
context=req.context)
resp.status = falcon.HTTP_201
resp.body = json.dumps(
resp.text = json.dumps(
[x.obj_to_simple() for x in parsed_items])
else:
self.return_error(
resp,
falcon.HTTP_400,
message="Empty body not supported",
retry=False)
self.return_error(resp,
falcon.HTTP_400,
message="Empty body not supported",
retry=False)
except ValueError:
self.return_error(
resp,
falcon.HTTP_500,
message="Error processing input",
retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error processing input",
retry=False)
except LookupError:
self.return_error(
resp,
falcon.HTTP_400,
message="Ingester %s not registered" % ingester_name,
retry=False)
self.return_error(resp,
falcon.HTTP_400,
message="Ingester %s not registered" %
ingester_name,
retry=False)
@policy.ApiEnforcer('physical_provisioner:ingest_data')
def on_get(self, req, resp, design_id):
try:
design = self.state_manager.get_design(design_id)
except errors.DesignError:
self.return_error(
resp,
falcon.HTTP_404,
message="Design %s nout found" % design_id,
retry=False)
self.return_error(resp,
falcon.HTTP_404,
message="Design %s nout found" % design_id,
retry=False)
part_catalog = []
@ -225,12 +221,13 @@ class DesignsPartsResource(StatefulResource):
'key': n.get_id()
} for n in design.baremetal_nodes])
resp.body = json.dumps(part_catalog)
resp.text = json.dumps(part_catalog)
resp.status = falcon.HTTP_200
return
class DesignsPartsKindsResource(StatefulResource):
def __init__(self, **kwargs):
super(DesignsPartsKindsResource, self).__init__(**kwargs)
self.authorized_roles = ['user']
@ -242,6 +239,7 @@ class DesignsPartsKindsResource(StatefulResource):
class DesignsPartResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs):
super(DesignsPartResource, self).__init__(**kwargs)
self.authorized_roles = ['user']
@ -273,19 +271,21 @@ class DesignsPartResource(StatefulResource):
part = design.get_baremetal_node(name)
else:
self.error(req.context, "Kind %s unknown" % kind)
self.return_error(
resp,
falcon.HTTP_404,
message="Kind %s unknown" % kind,
retry=False)
self.return_error(resp,
falcon.HTTP_404,
message="Kind %s unknown" % kind,
retry=False)
return
resp.body = json.dumps(part.obj_to_simple())
resp.text = json.dumps(part.obj_to_simple())
except errors.DesignError as dex:
self.error(req.context, str(dex))
self.return_error(
resp, falcon.HTTP_404, message=str(dex), retry=False)
self.return_error(resp,
falcon.HTTP_404,
message=str(dex),
retry=False)
except Exception as exc:
self.error(req.context, str(exc))
self.return_error(
resp.falcon.HTTP_500, message=str(exc), retry=False)
self.return_error(resp.falcon.HTTP_500,
message=str(exc),
retry=False)

View File

@ -40,10 +40,9 @@ class HealthResource(StatefulResource):
"""
Returns 204 on healthy, otherwise 503, without response body.
"""
hc = HealthCheckCombined(
state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=False)
hc = HealthCheckCombined(state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=False)
return hc.get(req, resp)
@ -65,10 +64,9 @@ class HealthExtendedResource(StatefulResource):
"""
Returns 200 on success, otherwise 503, with a response body.
"""
hc = HealthCheckCombined(
state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=True)
hc = HealthCheckCombined(state_manager=self.state_manager,
orchestrator=self.orchestrator,
extended=True)
return hc.get(req, resp)
@ -97,8 +95,8 @@ class HealthCheckCombined(object):
if now is None:
raise Exception('None received from database for now()')
except Exception:
hcm = HealthCheckMessage(
msg='Unable to connect to database', error=True)
hcm = HealthCheckMessage(msg='Unable to connect to database',
error=True)
health_check.add_detail_msg(msg=hcm)
# Test MaaS connection
@ -111,12 +109,12 @@ class HealthCheckCombined(object):
if maas_validation.task.get_status() == ActionResult.Failure:
raise Exception('MaaS task failure')
except Exception:
hcm = HealthCheckMessage(
msg='Unable to connect to MaaS', error=True)
hcm = HealthCheckMessage(msg='Unable to connect to MaaS',
error=True)
health_check.add_detail_msg(msg=hcm)
if self.extended:
resp.body = json.dumps(health_check.to_dict())
resp.text = json.dumps(health_check.to_dict())
if health_check.is_healthy() and self.extended:
resp.status = falcon.HTTP_200

View File

@ -22,6 +22,7 @@ from drydock_provisioner import policy
class AuthMiddleware(object):
def __init__(self):
self.logger = logging.getLogger('drydock')
@ -31,8 +32,8 @@ class AuthMiddleware(object):
ctx.set_policy_engine(policy.policy_engine)
self.logger.debug(
"Request with headers: %s" % ','.join(req.headers.keys()))
self.logger.debug("Request with headers: %s" %
','.join(req.headers.keys()))
auth_status = req.get_header('X-SERVICE-IDENTITY-STATUS')
service = True
@ -78,6 +79,7 @@ class AuthMiddleware(object):
class ContextMiddleware(object):
def __init__(self):
# Setup validation pattern for external marker
UUIDv4_pattern = '^[0-9A-F]{8}-[0-9A-F]{4}-4[0-9A-F]{3}-[89AB][0-9A-F]{3}-[0-9A-F]{12}$'
@ -101,6 +103,7 @@ class ContextMiddleware(object):
class LoggingMiddleware(object):
def __init__(self):
self.logger = logging.getLogger(cfg.CONF.logging.control_logger_name)
@ -111,9 +114,9 @@ class LoggingMiddleware(object):
'external_ctx': req.context.external_marker,
'end_user': req.context.end_user,
}
self.logger.info(
"Request: %s %s %s" % (req.method, req.uri, req.query_string),
extra=extra)
self.logger.info("Request: %s %s %s" %
(req.method, req.uri, req.query_string),
extra=extra)
def process_response(self, req, resp, resource, req_succeeded):
ctx = req.context
@ -124,6 +127,6 @@ class LoggingMiddleware(object):
'end_user': ctx.end_user,
}
resp.append_header('X-Drydock-Req', ctx.request_id)
self.logger.info(
"Response: %s %s - %s" % (req.method, req.uri, resp.status),
extra=extra)
self.logger.info("Response: %s %s - %s" %
(req.method, req.uri, resp.status),
extra=extra)

View File

@ -24,6 +24,7 @@ from .base import BaseResource, StatefulResource
class NodesResource(BaseResource):
def __init__(self):
super().__init__()
@ -41,22 +42,23 @@ class NodesResource(BaseResource):
for m in machine_list:
m.get_power_params()
node_view.append(
dict(
hostname=m.hostname,
memory=m.memory,
cpu_count=m.cpu_count,
status_name=m.status_name,
boot_mac=m.boot_mac,
power_state=m.power_state,
power_address=m.power_parameters.get('power_address'),
boot_ip=m.boot_ip))
dict(hostname=m.hostname,
memory=m.memory,
cpu_count=m.cpu_count,
status_name=m.status_name,
boot_mac=m.boot_mac,
power_state=m.power_state,
power_address=m.power_parameters.get('power_address'),
boot_ip=m.boot_ip))
resp.body = json.dumps(node_view)
resp.text = json.dumps(node_view)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
class NodeBuildDataResource(StatefulResource):
@ -68,27 +70,29 @@ class NodeBuildDataResource(StatefulResource):
latest = req.params.get('latest', 'false').upper()
latest = True if latest == 'TRUE' else False
node_bd = self.state_manager.get_build_data(
node_name=hostname, latest=latest)
node_bd = self.state_manager.get_build_data(node_name=hostname,
latest=latest)
if not node_bd:
self.return_error(
resp,
falcon.HTTP_404,
message="No build data found",
retry=False)
self.return_error(resp,
falcon.HTTP_404,
message="No build data found",
retry=False)
else:
node_bd = [bd.to_dict() for bd in node_bd]
resp.status = falcon.HTTP_200
resp.body = json.dumps(node_bd)
resp.text = json.dumps(node_bd)
resp.content_type = falcon.MEDIA_JSON
except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
class NodeFilterResource(StatefulResource):
def __init__(self, orchestrator=None, **kwargs):
"""Object initializer.
@ -117,9 +121,11 @@ class NodeFilterResource(StatefulResource):
node_filter=node_filter, site_design=site_design)
resp_list = [n.name for n in nodes if nodes]
resp.body = json.dumps(resp_list)
resp.text = json.dumps(resp_list)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(req.context, "Unknown error: %s" % str(ex), exc_info=ex)
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)

View File

@ -42,14 +42,16 @@ class TasksResource(StatefulResource):
try:
task_model_list = self.state_manager.get_tasks()
task_list = [x.to_dict() for x in task_model_list]
resp.body = json.dumps(task_list)
resp.text = json.dumps(task_list)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(
req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
@policy.ApiEnforcer('physical_provisioner:create_task')
def on_post(self, req, resp):
@ -72,19 +74,20 @@ class TasksResource(StatefulResource):
action = json_data.get('action', None)
if supported_actions.get(action, None) is None:
self.error(req.context, "Unsupported action %s" % action)
self.return_error(
resp,
falcon.HTTP_400,
message="Unsupported action %s" % action,
retry=False)
self.return_error(resp,
falcon.HTTP_400,
message="Unsupported action %s" % action,
retry=False)
else:
supported_actions.get(action)(self, req, resp, json_data)
except Exception as ex:
self.error(
req.context,
"Unknown error: %s\n%s" % (str(ex), traceback.format_exc()))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
@policy.ApiEnforcer('physical_provisioner:delete_tasks')
def on_delete(self, req, resp):
@ -100,10 +103,10 @@ class TasksResource(StatefulResource):
if not retention_status:
resp.status = falcon.HTTP_404
return
resp.body = "Tables purged successfully."
resp.text = "Tables purged successfully."
except Exception as e:
self.error(req.context, "Unknown error: %s" % (str(e)))
resp.body = "Unexpected error."
resp.text = "Unexpected error."
resp.status = falcon.HTTP_500
return
resp.status = falcon.HTTP_200
@ -118,19 +121,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_validate_design"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:verify_site')
def task_verify_site(self, req, resp, json_data):
@ -142,19 +149,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_verify_site"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:prepare_site')
def task_prepare_site(self, req, resp, json_data):
@ -166,19 +177,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_prepare_site"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:verify_nodes')
def task_verify_nodes(self, req, resp, json_data):
@ -190,19 +205,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_verify_nodes"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:prepare_nodes')
def task_prepare_nodes(self, req, resp, json_data):
@ -214,19 +233,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_prepare_nodes"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:deploy_nodes')
def task_deploy_nodes(self, req, resp, json_data):
@ -238,19 +261,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_deploy_nodes"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:destroy_nodes')
def task_destroy_nodes(self, req, resp, json_data):
@ -262,19 +289,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_destroy_nodes"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
@policy.ApiEnforcer('physical_provisioner:relabel_nodes')
def task_relabel_nodes(self, req, resp, json_data):
@ -286,19 +317,23 @@ class TasksResource(StatefulResource):
req.context,
"Task body ended up in wrong handler: action %s in task_relabel_nodes"
% action)
self.return_error(
resp, falcon.HTTP_500, message="Error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Error",
retry=False)
try:
task = self.create_task(json_data, req.context)
resp.body = json.dumps(task.to_dict())
resp.text = json.dumps(task.to_dict())
resp.append_header('Location',
"/api/v1.0/tasks/%s" % str(task.task_id))
resp.status = falcon.HTTP_201
except errors.InvalidFormat as ex:
self.error(req.context, ex.msg)
self.return_error(
resp, falcon.HTTP_400, message=ex.msg, retry=False)
self.return_error(resp,
falcon.HTTP_400,
message=ex.msg,
retry=False)
def create_task(self, task_body, req_context):
"""General task creation.
@ -320,11 +355,10 @@ class TasksResource(StatefulResource):
raise errors.InvalidFormat(
'Task creation requires fields design_ref, action')
task = self.orchestrator.create_task(
design_ref=design_ref,
action=action,
node_filter=node_filter,
context=req_context)
task = self.orchestrator.create_task(design_ref=design_ref,
action=action,
node_filter=node_filter,
context=req_context)
task.set_status(hd_fields.TaskStatus.Queued)
task.save()
@ -357,11 +391,10 @@ class TaskResource(StatefulResource):
if first_task is None:
self.info(req.context, "Task %s does not exist" % task_id)
self.return_error(
resp,
falcon.HTTP_404,
message="Task %s does not exist" % task_id,
retry=False)
self.return_error(resp,
falcon.HTTP_404,
message="Task %s does not exist" % task_id,
retry=False)
else:
# If layers is passed in then it returns a dict of tasks instead of the task dict.
if layers:
@ -380,12 +413,14 @@ class TaskResource(StatefulResource):
1, first_task)
resp_data['subtask_errors'] = errors
resp.body = json.dumps(resp_data)
resp.text = json.dumps(resp_data)
resp.status = falcon.HTTP_200
except Exception as ex:
self.error(req.context, "Unknown error: %s" % (str(ex)))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
def get_task(self, req, resp, task_id, builddata):
try:
@ -403,8 +438,10 @@ class TaskResource(StatefulResource):
return task_dict
except Exception as ex:
self.error(req.context, "Unknown error: %s" % (str(ex)))
self.return_error(
resp, falcon.HTTP_500, message="Unknown error", retry=False)
self.return_error(resp,
falcon.HTTP_500,
message="Unknown error",
retry=False)
def handle_layers(self, req, resp, task_id, builddata, subtask_errors,
layers, first_task):
@ -450,10 +487,10 @@ class TaskBuilddataResource(StatefulResource):
if not bd_list:
resp.status = falcon.HTTP_404
return
resp.body = json.dumps([bd.to_dict() for bd in bd_list])
resp.text = json.dumps([bd.to_dict() for bd in bd_list])
except Exception as e:
self.error(req.context, "Unknown error: %s" % (str(e)))
resp.body = "Unexpected error."
resp.text = "Unexpected error."
resp.status = falcon.HTTP_500
return
resp.status = falcon.HTTP_200

View File

@ -25,8 +25,9 @@ def get_internal_api_href(ver):
if ver in supported_versions:
ks_sess = KeystoneUtils.get_session()
url = KeystoneClient.get_endpoint(
"physicalprovisioner", ks_sess=ks_sess, interface='internal')
url = KeystoneClient.get_endpoint("physicalprovisioner",
ks_sess=ks_sess,
interface='internal')
return url
else:
raise ApiError("API version %s unknown." % ver)

View File

@ -62,12 +62,12 @@ class ValidationResource(StatefulResource):
resp_message = validation.to_dict()
resp_message['code'] = 200
resp.status = falcon.HTTP_200
resp.body = json.dumps(resp_message)
resp.text = json.dumps(resp_message)
else:
resp_message = validation.to_dict()
resp_message['code'] = 400
resp.status = falcon.HTTP_400
resp.body = json.dumps(resp_message)
resp.text = json.dumps(resp_message)
except errors.InvalidFormat as e:
err_message = str(e)

View File

@ -64,6 +64,7 @@ class ProviderDriver(object):
# Execute a single task in a separate thread
class DriverActionRunner(Thread):
def __init__(self, action=None):
super().__init__()

View File

@ -22,6 +22,7 @@ from drydock_provisioner.orchestrator.actions.orchestrator import BaseAction
class PromenadeAction(BaseAction):
def __init__(self, *args, prom_client=None):
super().__init__(*args)
@ -42,11 +43,10 @@ class RelabelNode(PromenadeAction):
try:
site_design = self._load_site_design()
except errors.OrchestratorError:
self.task.add_status_msg(
msg="Error loading site design.",
error=True,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="Error loading site design.",
error=True,
ctx='NA',
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.failure()
self.task.save()
@ -58,14 +58,16 @@ class RelabelNode(PromenadeAction):
for n in nodes:
# Relabel node through Promenade
try:
self.logger.info(
"Relabeling node %s with node label data." % n.name)
self.logger.info("Relabeling node %s with node label data." %
n.name)
labels_dict = n.get_node_labels()
msg = "Set labels %s for node %s" % (str(labels_dict), n.name)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
# Call promenade to invoke relabel node
self.promenade_client.relabel_node(n.get_id(), labels_dict)
@ -74,8 +76,10 @@ class RelabelNode(PromenadeAction):
msg = "Error relabeling node %s with label data" % n.name
self.logger.warning(msg + ": " + str(ex))
self.task.failure(focus=n.get_id())
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=True,
ctx=n.name,
ctx_type='node')
continue
self.task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -61,8 +61,9 @@ class PromenadeDriver(KubernetesDriver):
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -71,11 +72,10 @@ class PromenadeDriver(KubernetesDriver):
if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str(
task.get_id())
task.add_status_msg(
msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
task.add_status_msg(msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes(
task, failures=True)
else:
@ -108,22 +108,20 @@ class PromenadeDriver(KubernetesDriver):
for t, f in subtask_futures.items():
if not f.done():
task.add_status_msg(
"Subtask timed out before completing.",
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.add_status_msg("Subtask timed out before completing.",
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
else:
if f.exception():
msg = ("Subtask %s raised unexpected exception: %s" %
(str(uuid.UUID(bytes=t)), str(f.exception())))
self.logger.error(msg, exc_info=f.exception())
task.add_status_msg(
msg=msg,
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.add_status_msg(msg=msg,
error=True,
ctx=str(uuid.UUID(bytes=t)),
ctx_type='task')
task.failure()
task.bubble_results()
@ -138,14 +136,14 @@ class PromenadeDriver(KubernetesDriver):
prom_client=prom_client)
action.start()
except Exception as e:
msg = ("Subtask for action %s raised unexpected exception: %s"
% (task.action, str(e)))
msg = (
"Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
self.logger.error(msg, exc_info=e)
task.add_status_msg(
msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.add_status_msg(msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.failure()
task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -79,8 +79,9 @@ class PromenadeSession(object):
url = self.base_url + route
self.logger.debug('GET ' + url)
self.logger.debug('Query Params: ' + str(query))
resp = self.__session.get(
url, params=query, timeout=self._timeout(timeout))
resp = self.__session.get(url,
params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
@ -109,21 +110,19 @@ class PromenadeSession(object):
self.logger.debug('PUT ' + url)
self.logger.debug('Query Params: ' + str(query))
if body is not None:
self.logger.debug(
"Sending PUT with explicit body: \n%s" % body)
resp = self.__session.put(
self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
self.logger.debug("Sending PUT with explicit body: \n%s" %
body)
resp = self.__session.put(self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
else:
self.logger.debug(
"Sending PUT with JSON body: \n%s" % str(data))
resp = self.__session.put(
self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
self.logger.debug("Sending PUT with JSON body: \n%s" %
str(data))
resp = self.__session.put(self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
auth_refresh = True
@ -151,21 +150,19 @@ class PromenadeSession(object):
self.logger.debug('POST ' + url)
self.logger.debug('Query Params: ' + str(query))
if body is not None:
self.logger.debug(
"Sending POST with explicit body: \n%s" % body)
resp = self.__session.post(
self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
self.logger.debug("Sending POST with explicit body: \n%s" %
body)
resp = self.__session.post(self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
else:
self.logger.debug(
"Sending POST with JSON body: \n%s" % str(data))
resp = self.__session.post(
self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
self.logger.debug("Sending POST with JSON body: \n%s" %
str(data))
resp = self.__session.post(self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
auth_refresh = True
@ -284,9 +281,9 @@ class PromenadeClient(object):
raise errors.ClientUnauthorizedError(
"Unauthorized access to %s, include valid token." % resp.url)
elif resp.status_code == 403:
raise errors.ClientForbiddenError(
"Forbidden access to %s" % resp.url)
raise errors.ClientForbiddenError("Forbidden access to %s" %
resp.url)
elif not resp.ok:
raise errors.ClientError(
"Error - received %d: %s" % (resp.status_code, resp.text),
code=resp.status_code)
raise errors.ClientError("Error - received %d: %s" %
(resp.status_code, resp.text),
code=resp.status_code)

View File

@ -26,6 +26,7 @@ import drydock_provisioner.error as errors
class MaasOauth(req_auth.AuthBase):
def __init__(self, apikey):
self.consumer_key, self.token_key, self.token_secret = apikey.split(
':')
@ -55,18 +56,19 @@ class MaasOauth(req_auth.AuthBase):
class MaasRequestFactory(object):
def __init__(self, base_url, apikey):
# The URL in the config should end in /MAAS/, but the api is behind /MAAS/api/2.0/
self.base_url = base_url + "/api/2.0/"
self.apikey = apikey
# Adapter for maas for request retries
retry_strategy = Retry(
total=3,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=["HEAD", "GET", "POST", "PUT", "DELETE",
"OPTIONS", "TRACE"]
)
retry_strategy = Retry(total=3,
status_forcelist=[429, 500, 502, 503, 504],
method_whitelist=[
"HEAD", "GET", "POST", "PUT", "DELETE",
"OPTIONS", "TRACE"
])
self.maas_adapter = HTTPAdapter(max_retries=retry_strategy)
self.signer = MaasOauth(apikey)
@ -109,8 +111,8 @@ class MaasRequestFactory(object):
except requests.Timeout:
raise errors.TransientDriverError("Timeout connection to MaaS")
except Exception as ex:
raise errors.PersistentDriverError(
"Error accessing MaaS: %s" % str(ex))
raise errors.PersistentDriverError("Error accessing MaaS: %s" %
str(ex))
if resp.status_code in [401, 403]:
raise errors.PersistentDriverError(
@ -149,15 +151,15 @@ class MaasRequestFactory(object):
str(i).encode('utf-8')).decode('utf-8')
content_type = 'text/plain; charset="utf-8"'
part_headers = {'Content-Transfer-Encoding': 'base64'}
files_tuples.append((k, (None, value, content_type,
part_headers)))
files_tuples.append(
(k, (None, value, content_type, part_headers)))
else:
value = base64.b64encode(
str(v).encode('utf-8')).decode('utf-8')
content_type = 'text/plain; charset="utf-8"'
part_headers = {'Content-Transfer-Encoding': 'base64'}
files_tuples.append((k, (None, value, content_type,
part_headers)))
files_tuples.append(
(k, (None, value, content_type, part_headers)))
kwargs['files'] = files_tuples
params = kwargs.pop('params', None)
@ -174,13 +176,12 @@ class MaasRequestFactory(object):
if timeout is None:
timeout = (5, 60)
request = requests.Request(
method=method,
url=self.base_url + endpoint,
auth=self.signer,
headers=headers,
params=params,
**kwargs)
request = requests.Request(method=method,
url=self.base_url + endpoint,
auth=self.signer,
headers=headers,
params=params,
**kwargs)
prepared_req = self.http_session.prepare_request(request)
@ -191,6 +192,6 @@ class MaasRequestFactory(object):
"Received error response - URL: %s %s - RESPONSE: %s" %
(prepared_req.method, prepared_req.url, resp.status_code))
self.logger.debug("Response content: %s" % resp.text)
raise errors.DriverError(
"MAAS Error: %s - %s" % (resp.status_code, resp.text))
raise errors.DriverError("MAAS Error: %s - %s" %
(resp.status_code, resp.text))
return resp

View File

@ -45,19 +45,21 @@ from .actions.node import ConfigureNodeProvisioner
class MaasNodeDriver(NodeDriver):
maasdriver_options = [
cfg.StrOpt(
'maas_api_key', help='The API key for accessing MaaS',
secret=True),
cfg.StrOpt('maas_api_key',
help='The API key for accessing MaaS',
secret=True),
cfg.StrOpt('maas_api_url', help='The URL for accessing MaaS API'),
cfg.BoolOpt(
'use_node_oob_params',
default=False,
help='Update MAAS to use the provided Node OOB params, overwriting discovered values',
help=
'Update MAAS to use the provided Node OOB params, overwriting discovered values',
),
cfg.BoolOpt(
'skip_bmc_config',
default=False,
help='Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)',
help=
'Skip BMC reconfiguration during commissioning (requires MAAS 2.7+)',
),
cfg.IntOpt(
'poll_interval',
@ -105,8 +107,8 @@ class MaasNodeDriver(NodeDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
MaasNodeDriver.maasdriver_options, group=MaasNodeDriver.driver_key)
cfg.CONF.register_opts(MaasNodeDriver.maasdriver_options,
group=MaasNodeDriver.driver_key)
self.logger = logging.getLogger(
cfg.CONF.logging.nodedriver_logger_name)
@ -139,8 +141,9 @@ class MaasNodeDriver(NodeDriver):
raise errors.DriverError("Invalid task %s" % (task_id))
if task.action not in self.supported_actions:
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -149,11 +152,10 @@ class MaasNodeDriver(NodeDriver):
if task.retry > 0:
msg = "Retrying task %s on previous failed entities." % str(
task.get_id())
task.add_status_msg(
msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
task.add_status_msg(msg=msg,
error=False,
ctx=str(task.get_id()),
ctx_type='task')
target_nodes = self.orchestrator.get_target_nodes(
task, failures=True)
else:
@ -197,10 +199,9 @@ class MaasNodeDriver(NodeDriver):
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s." % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
self.logger.error("Uncaught exception in subtask %s." %
str(uuid.UUID(bytes=t)),
exc_info=f.exception())
task.failure()
task.bubble_results()
task.align_result()
@ -216,14 +217,14 @@ class MaasNodeDriver(NodeDriver):
maas_client=maas_client)
action.start()
except Exception as e:
msg = ("Subtask for action %s raised unexpected exception: %s"
% (task.action, str(e)))
msg = (
"Subtask for action %s raised unexpected exception: %s" %
(task.action, str(e)))
self.logger.error(msg, exc_info=e)
task.add_status_msg(
msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.add_status_msg(msg=msg,
error=True,
ctx=str(task.get_id()),
ctx_type='task')
task.failure()
task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -218,8 +218,9 @@ class ResourceCollectionBase(object):
res.set_resource_id(resp_json.get('id'))
return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
"""
Append a resource instance to the list locally only

View File

@ -93,8 +93,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data)
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -126,8 +126,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unformat')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \
@ -156,8 +156,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -183,8 +183,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='unmount')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -202,8 +202,8 @@ class BlockDevice(model_base.ResourceBase):
resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:

View File

@ -37,8 +37,8 @@ class Fabric(model_base.ResourceBase):
return
def refresh_vlans(self):
self.vlans = model_vlan.Vlans(
self.api_client, fabric_id=self.resource_id)
self.vlans = model_vlan.Vlans(self.api_client,
fabric_id=self.resource_id)
self.vlans.refresh()
def set_resource_id(self, res_id):

View File

@ -85,8 +85,8 @@ class Interface(model_base.ResourceBase):
fabric_vlan = fabric.vlans.singleton({'vid': 0})
if fabric_vlan is None:
self.logger.warning(
"Cannot locate untagged VLAN on fabric %s" % (fabric_id))
self.logger.warning("Cannot locate untagged VLAN on fabric %s" %
(fabric_id))
raise errors.DriverError(
"Cannot locate untagged VLAN on fabric %s" % (fabric_id))
@ -112,8 +112,8 @@ class Interface(model_base.ResourceBase):
"""Disconnect this interface from subnets and VLANs."""
url = self.interpolate_url()
self.logger.debug(
"Disconnecting interface %s from networks." % (self.name))
self.logger.debug("Disconnecting interface %s from networks." %
(self.name))
resp = self.api_client.post(url, op='disconnect')
if not resp.ok:
@ -242,7 +242,8 @@ class Interface(model_base.ResourceBase):
:return: true if this interface will respond to this MAC
"""
if mac_address.replace(':', '').upper() == self.mac_address.replace(':', '').upper():
if mac_address.replace(':', '').upper() == self.mac_address.replace(
':', '').upper():
return True
return False
@ -311,10 +312,10 @@ class Interfaces(model_base.ResourceCollectionBase):
parent_iface = self.singleton({'name': parent_name})
if parent_iface is None:
self.logger.error(
"Cannot locate parent interface %s" % (parent_name))
raise errors.DriverError(
"Cannot locate parent interface %s" % (parent_name))
self.logger.error("Cannot locate parent interface %s" %
(parent_name))
raise errors.DriverError("Cannot locate parent interface %s" %
(parent_name))
if parent_iface.vlan is None:
self.logger.error(
@ -324,8 +325,8 @@ class Interfaces(model_base.ResourceCollectionBase):
"Cannot create VLAN interface on disconnected parent %s" %
(parent_iface.resource_id))
vlans = maas_vlan.Vlans(
self.api_client, fabric_id=parent_iface.fabric_id)
vlans = maas_vlan.Vlans(self.api_client,
fabric_id=parent_iface.fabric_id)
vlans.refresh()
vlan = vlans.singleton({'vid': vlan_tag})

View File

@ -72,5 +72,6 @@ class IpRanges(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('id'))
return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))

View File

@ -31,6 +31,7 @@ LOG = logging.getLogger(__name__)
power_lock = Lock()
power_cv = Condition(lock=power_lock)
class Machine(model_base.ResourceBase):
resource_url = 'machines/{resource_id}/'
@ -62,8 +63,8 @@ class Machine(model_base.ResourceBase):
api_client, system_id=self.resource_id)
self.volume_groups.refresh()
except Exception:
self.logger.warning(
"Failed load node %s volume groups." % (self.resource_id))
self.logger.warning("Failed load node %s volume groups." %
(self.resource_id))
else:
self.interfaces = None
self.block_devices = None
@ -123,28 +124,28 @@ class Machine(model_base.ResourceBase):
Removes all the volume groups/logical volumes and all the physical
device partitions on this machine.
"""
self.logger.info(
"Resetting storage configuration on node %s" % (self.resource_id))
self.logger.info("Resetting storage configuration on node %s" %
(self.resource_id))
if self.volume_groups is not None and self.volume_groups.len() > 0:
for vg in self.volume_groups:
self.logger.debug("Removing VG %s" % vg.name)
vg.delete()
else:
self.logger.debug(
"No VGs configured on node %s" % (self.resource_id))
self.logger.debug("No VGs configured on node %s" %
(self.resource_id))
if self.block_devices is not None:
for d in self.block_devices:
if d.partitions is not None and d.partitions.len() > 0:
self.logger.debug(
"Clearing partitions on device %s" % d.name)
self.logger.debug("Clearing partitions on device %s" %
d.name)
d.clear_partitions()
else:
self.logger.debug(
"No partitions found on device %s" % d.name)
self.logger.debug("No partitions found on device %s" %
d.name)
else:
self.logger.debug(
"No block devices found on node %s" % (self.resource_id))
self.logger.debug("No block devices found on node %s" %
(self.resource_id))
def set_storage_layout(self,
layout_type='flat',
@ -199,12 +200,13 @@ class Machine(model_base.ResourceBase):
if root_lv_name:
data['lv_name'] = root_lv_name
resp = self.api_client.post(
url, op='set_storage_layout', files=data)
resp = self.api_client.post(url,
op='set_storage_layout',
files=data)
if not resp.ok:
raise Exception(
"MAAS Error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS Error: %s - %s" %
(resp.status_code, resp.text))
except Exception as ex:
msg = "Error: failed configuring node %s storage layout: %s" % (
self.resource_id, str(ex))
@ -356,10 +358,9 @@ class Machine(model_base.ResourceBase):
:param str result_type: the type of results to return. One of
``all``, ``commissioning``, ``testing``, ``deploy``
"""
node_results = maas_nr.NodeResults(
self.api_client,
system_id_list=[self.resource_id],
result_type=result_type)
node_results = maas_nr.NodeResults(self.api_client,
system_id_list=[self.resource_id],
result_type=result_type)
node_results.refresh()
return node_results
@ -375,8 +376,9 @@ class Machine(model_base.ResourceBase):
"""
url = self.interpolate_url()
resp = self.api_client.post(
url, op='set_workload_annotations', files={key: value})
resp = self.api_client.post(url,
op='set_workload_annotations',
files={key: value})
if resp.status_code != 200:
self.logger.error(
@ -406,29 +408,26 @@ class Machine(model_base.ResourceBase):
if kwargs:
power_params = dict()
self.logger.debug("Setting node power type to %s." % power_type)
self.logger.debug("Setting node power type to %s." %
power_type)
self.power_type = power_type
power_params['power_type'] = power_type
for k, v in kwargs.items():
power_params['power_parameters_' + k] = v
self.logger.debug(
"Updating node %s power parameters: %s"
% (
self.hostname,
str(
{
**power_params,
**{
k: "<redacted>"
for k in power_params
if k in ["power_parameters_power_pass"]
},
}
),
)
)
self.logger.debug("Updating node %s power parameters: %s" % (
self.hostname,
str({
**power_params,
**{
k: "<redacted>"
for k in power_params if k in [
"power_parameters_power_pass"
]
},
}),
))
resp = self.api_client.put(url, files=power_params)
if resp.status_code == 200:
@ -448,8 +447,9 @@ class Machine(model_base.ResourceBase):
with power_cv:
url = self.interpolate_url()
self.logger.debug("Resetting node power type for machine {}".format(
self.resource_id))
self.logger.debug(
"Resetting node power type for machine {}".format(
self.resource_id))
self.power_type = 'manual'
power_params = {'power_type': 'manual'}
resp = self.api_client.put(url, files=power_params)
@ -482,12 +482,11 @@ class Machine(model_base.ResourceBase):
'virsh',
power_address=oob_params.get('libvirt_uri'),
power_id=n.name)
elif use_node_oob_params and (n.oob_type == "ipmi" or n.oob_type == "redfish"):
elif use_node_oob_params and (n.oob_type == "ipmi"
or n.oob_type == "redfish"):
self.logger.debug(
"Updating node {} MaaS power parameters for {}.".format(
n.name, n.oob_type
)
)
n.name, n.oob_type))
oob_params = n.oob_parameters
oob_network = oob_params.get("network")
oob_address = n.get_network_address(oob_network)
@ -585,21 +584,20 @@ class Machines(model_base.ResourceCollectionBase):
url = self.interpolate_url()
resp = self.api_client.post(
url, op='allocate', files={'system_id': node.resource_id})
resp = self.api_client.post(url,
op='allocate',
files={'system_id': node.resource_id})
if not resp.ok:
self.logger.error(
"Error acquiring node, MaaS returned %s" % resp.status_code)
self.logger.error("Error acquiring node, MaaS returned %s" %
resp.status_code)
self.logger.debug("MaaS response: %s" % resp.text)
raise errors.DriverError(
"Error acquiring node, MaaS returned %s" % resp.status_code)
raise errors.DriverError("Error acquiring node, MaaS returned %s" %
resp.status_code)
return node
def identify_baremetal_node(self,
node_model,
probably_exists=True):
def identify_baremetal_node(self, node_model, probably_exists=True):
"""Find MaaS node resource matching Drydock BaremetalNode.
Performs one or more queries to the MaaS API to find a Machine matching
@ -642,8 +640,8 @@ class Machines(model_base.ResourceCollectionBase):
maas_node = self.find_node_with_mac(node_model.boot_mac)
if maas_node is None:
self.logger.info(
"Could not locate node %s in MaaS" % node_model.name)
self.logger.info("Could not locate node %s in MaaS" %
node_model.name)
else:
self.logger.debug("Found MaaS resource %s matching Node %s" %
(maas_node.resource_id, node_model.get_id()))
@ -656,11 +654,8 @@ class Machines(model_base.ResourceCollectionBase):
# query the MaaS API for machines with a matching mac address.
# this call returns a json list, each member representing a complete
# Machine
self.logger.debug(
"Finding {} with hostname: {}".format(
self.collection_resource.__name__, hostname
)
)
self.logger.debug("Finding {} with hostname: {}".format(
self.collection_resource.__name__, hostname))
resp = self.api_client.get(url, params={"hostname": hostname})
@ -675,9 +670,9 @@ class Machines(model_base.ResourceCollectionBase):
hostname,
node.get("system_id"),
node.get("hostname"),
)
)
return self.collection_resource.from_dict(self.api_client, node)
))
return self.collection_resource.from_dict(
self.api_client, node)
return None
@ -687,11 +682,8 @@ class Machines(model_base.ResourceCollectionBase):
# query the MaaS API for all power parameters at once.
# this call returns a json dict, mapping system id to power parameters
self.logger.debug(
"Finding {} with power address: {}".format(
self.collection_resource.__name__, power_address
)
)
self.logger.debug("Finding {} with power address: {}".format(
self.collection_resource.__name__, power_address))
resp = self.api_client.get(url, op="power_parameters")
@ -700,22 +692,22 @@ class Machines(model_base.ResourceCollectionBase):
for system_id, power_params in json_dict.items():
self.logger.debug(
"Finding {} with power address: {}: Considering: {}: {}".format(
"Finding {} with power address: {}: Considering: {}: {}".
format(
self.collection_resource.__name__,
power_address,
system_id,
power_params.get("power_address"),
)
)
))
if power_params.get("power_address") == power_address:
self.logger.debug(
"Finding {} with power address: {}: Found: {}: {}".format(
"Finding {} with power address: {}: Found: {}: {}".
format(
self.collection_resource.__name__,
power_address,
system_id,
power_params.get("power_address"),
)
)
))
# the API result isn't quite enough to contruct a Machine,
# so construct one with the system_id and then refresh
@ -758,8 +750,8 @@ class Machines(model_base.ResourceCollectionBase):
field = k[13:]
result = [
i for i in result if str(
getattr(i, 'power_parameters', {}).
get(field, None)) == str(v)
getattr(i, 'power_parameters', {}).get(field, None))
== str(v)
]
else:
result = [
@ -785,8 +777,9 @@ class Machines(model_base.ResourceCollectionBase):
res.set_resource_id(resp_json.get('system_id'))
return res
raise errors.DriverError("Failed updating MAAS url %s - return code %s"
% (url, resp.status_code))
raise errors.DriverError(
"Failed updating MAAS url %s - return code %s" %
(url, resp.status_code))
def empty_refresh(self):
"""Check connectivity to MAAS machines API

View File

@ -77,8 +77,8 @@ class Partition(model_base.ResourceBase):
resp = self.api_client.post(url, op='format', files=data)
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
@ -109,8 +109,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id))
resp = self.api_client.post(url, op='unformat')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unformat of device %s on node %s failed: %s" \
@ -138,8 +138,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id, mount_point))
resp = self.api_client.post(url, op='mount', files=data)
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: mount of device %s on node %s failed: %s" \
@ -163,8 +163,8 @@ class Partition(model_base.ResourceBase):
(self.name, self.system_id))
resp = self.api_client.post(url, op='unmount')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: unmount of device %s on node %s failed: %s" \
@ -180,8 +180,8 @@ class Partition(model_base.ResourceBase):
(self.resource_id, self.system_id))
resp = self.api_client.post(url, op='set_boot_disk')
if not resp.ok:
raise Exception(
"MAAS error: %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error: %s - %s" %
(resp.status_code, resp.text))
self.refresh()
except Exception as ex:
msg = "Error: setting device %s on node %s to boot failed: %s" \

View File

@ -66,8 +66,9 @@ class RackController(maas_machine.Machine):
def update_identity(self, n, domain="local"):
"""Cannot update rack controller identity."""
self.logger.debug("Cannot update rack controller identity for %s, no-op." %
self.hostname)
self.logger.debug(
"Cannot update rack controller identity for %s, no-op." %
self.hostname)
return
def is_healthy(self):
@ -82,6 +83,7 @@ class RackController(maas_machine.Machine):
healthy = False
return healthy
class RackControllers(maas_machine.Machines):
"""Model for a collection of rack controllers."""

View File

@ -44,10 +44,8 @@ class Subnet(model_base.ResourceBase):
current_ranges.refresh()
exists = current_ranges.query({
'start_ip':
addr_range.get('start', None),
'end_ip':
addr_range.get('end', None)
'start_ip': addr_range.get('start', None),
'end_ip': addr_range.get('end', None)
})
if len(exists) > 0:
@ -90,12 +88,11 @@ class Subnet(model_base.ResourceBase):
if current_route is not None:
current_route.delete()
new_route = maas_route.StaticRoute(
self.api_client,
source=self.resource_id,
destination=dest_subnet,
gateway_ip=gateway,
metric=metric)
new_route = maas_route.StaticRoute(self.api_client,
source=self.resource_id,
destination=dest_subnet,
gateway_ip=gateway,
metric=metric)
new_route = sr.add(new_route)
return new_route

View File

@ -64,13 +64,14 @@ class Tag(model_base.ResourceBase):
"""
if system_id in self.get_applied_nodes():
self.logger.debug(
"Tag %s already applied to node %s" % (self.name, system_id))
self.logger.debug("Tag %s already applied to node %s" %
(self.name, system_id))
else:
url = self.interpolate_url()
resp = self.api_client.post(
url, op='update_nodes', files={'add': system_id})
resp = self.api_client.post(url,
op='update_nodes',
files={'add': system_id})
if not resp.ok:
self.logger.error(

View File

@ -74,10 +74,13 @@ class Vlan(model_base.ResourceBase):
raise RackControllerConflict exception.
"""
if not self.primary_rack or self.primary_rack == rack_id:
self.logger.debug("Setting primary DHCP controller %s on VLAN %s", rack_id, self.resource_id)
self.logger.debug("Setting primary DHCP controller %s on VLAN %s",
rack_id, self.resource_id)
self.primary_rack = rack_id
elif not self.secondary_rack or self.secondary_rack == rack_id:
self.logger.debug("Setting secondary DHCP controller %s on VLAN %s.", rack_id, self.resource_id)
self.logger.debug(
"Setting secondary DHCP controller %s on VLAN %s.", rack_id,
self.resource_id)
self.secondary_rack = rack_id
else:
raise RackControllerConflict(
@ -92,7 +95,8 @@ class Vlan(model_base.ResourceBase):
:param bool commit: Whether to commit reset to MAAS API
"""
self.logger.debug("Resetting DHCP control on VLAN %s.", self.resource_id)
self.logger.debug("Resetting DHCP control on VLAN %s.",
self.resource_id)
self.relay_vlan = None
self.dhcp_on = False
self.primary_rack = None

View File

@ -60,17 +60,19 @@ class VolumeGroup(model_base.ResourceBase):
data = {'name': name, 'uuid': uuid_str, 'size': size}
self.logger.debug("Creating logical volume %s in VG %s on node %s"
% (name, self.name, self.system_id))
self.logger.debug(
"Creating logical volume %s in VG %s on node %s" %
(name, self.name, self.system_id))
url = self.interpolate_url()
resp = self.api_client.post(
url, op='create_logical_volume', files=data)
resp = self.api_client.post(url,
op='create_logical_volume',
files=data)
if not resp.ok:
raise Exception(
"MAAS error - %s - %s" % (resp.status_code, resp.txt))
raise Exception("MAAS error - %s - %s" %
(resp.status_code, resp.txt))
res = resp.json()
if 'id' in res:
@ -101,12 +103,13 @@ class VolumeGroup(model_base.ResourceBase):
url = self.interpolate_url()
resp = self.api_client.post(
url, op='delete_logical_volume', files={'id': target_lv})
resp = self.api_client.post(url,
op='delete_logical_volume',
files={'id': target_lv})
if not resp.ok:
raise Exception(
"MAAS error - %s - %s" % (resp.status_code, resp.text))
raise Exception("MAAS error - %s - %s" %
(resp.status_code, resp.text))
else:
raise Exception("VG %s has no logical volumes" % self.name)
except Exception as ex:

View File

@ -35,14 +35,14 @@ class LibvirtBaseAction(BaseAction):
:param node: instance of objects.BaremetalNode
"""
if node.oob_type != 'libvirt':
raise errors.DriverError(
"Node OOB type %s is not 'libvirt'" % node.oob_type)
raise errors.DriverError("Node OOB type %s is not 'libvirt'" %
node.oob_type)
virsh_url = node.oob_parameters.get('libvirt_uri', None)
if not virsh_url:
raise errors.DriverError(
"Node %s has no 'libvirt_url' defined" % (node.name))
raise errors.DriverError("Node %s has no 'libvirt_url' defined" %
(node.name))
url_parts = urlparse(virsh_url)
@ -51,8 +51,8 @@ class LibvirtBaseAction(BaseAction):
"Node %s has invalid libvirt URL scheme %s. "
"Only 'qemu+ssh' supported." % (node.name, url_parts.scheme))
self.logger.debug(
"Starting libvirt session to hypervisor %s " % (virsh_url))
self.logger.debug("Starting libvirt session to hypervisor %s " %
(virsh_url))
virsh_ses = libvirt.open(virsh_url)
if not virsh_ses:
@ -148,11 +148,10 @@ class ValidateOobServices(LibvirtBaseAction):
"""Action to validation OOB services are available."""
def start(self):
self.task.add_status_msg(
msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success()
self.task.save()
@ -198,11 +197,10 @@ class SetNodeBoot(LibvirtBaseAction):
for n in node_list:
self.logger.debug("Setting bootdev to PXE for %s" % n.name)
self.task.add_status_msg(
msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
try:
self.set_node_pxe(n)
@ -213,14 +211,13 @@ class SetNodeBoot(LibvirtBaseAction):
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning(
"Unable to set node %s to PXE boot." % (n.name))
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
else:
self.task.add_status_msg(
msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name)
@ -244,21 +241,27 @@ class PowerOffNode(LibvirtBaseAction):
for n in node_list:
msg = "Shutting down domain %s" % n.name
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
try:
self.poweroff_node(n)
except Exception as ex:
msg = "Node failed to power off: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s powered off." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
@ -282,21 +285,27 @@ class PowerOnNode(LibvirtBaseAction):
for n in node_list:
msg = "Starting domain %s" % n.name
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
try:
self.poweron_node(n)
except Exception as ex:
msg = "Node failed to power on: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s powered on." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
@ -320,22 +329,28 @@ class PowerCycleNode(LibvirtBaseAction):
for n in node_list:
msg = ("Power cycling domain for node %s" % n.name)
self.logger.debug(msg)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
try:
self.poweroff_node(n)
self.poweron_node(n)
except Exception as ex:
msg = "Node failed to power cycle: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s power cycled." % n.name
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)
@ -361,14 +376,18 @@ class InterrogateOob(LibvirtBaseAction):
node_status = self.get_node_status(n)
except Exception as ex:
msg = "Node failed tatus check: %s" % str(ex)
self.task.add_status_msg(
msg=msg, error=True, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error(msg)
self.task.failure(focus=n.name)
else:
msg = "Node %s status is %s." % (n.name, node_status)
self.task.add_status_msg(
msg=msg, error=False, ctx=n.name, ctx_type='node')
self.task.add_status_msg(msg=msg,
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(msg)
self.task.success(focus=n.name)

View File

@ -65,9 +65,8 @@ class LibvirtDriver(oob_driver.OobDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
LibvirtDriver.libvirt_driver_options,
group=LibvirtDriver.driver_key)
cfg.CONF.register_opts(LibvirtDriver.libvirt_driver_options,
group=LibvirtDriver.driver_key)
self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name)
@ -82,8 +81,9 @@ class LibvirtDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -129,10 +129,9 @@ class LibvirtDriver(oob_driver.OobDriver):
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s" % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
self.logger.error("Uncaught exception in subtask %s" %
str(uuid.UUID(bytes=t)),
exc_info=f.exception())
task.align_result()
task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -48,20 +48,22 @@ class ManualDriver(oob.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
design_ref = task.design_ref
if design_ref is None:
raise errors.DriverError(
"No design ID specified in task %s" % (task_id))
raise errors.DriverError("No design ID specified in task %s" %
(task_id))
self.orchestrator.task_field_update(
task.get_id(), status=hd_fields.TaskStatus.Running)
self.logger.info("Sleeping 60s to allow time for manual OOB %s action"
% task.action)
self.logger.info(
"Sleeping 60s to allow time for manual OOB %s action" %
task.action)
time.sleep(60)

View File

@ -44,16 +44,17 @@ class PyghmiBaseAction(BaseAction):
ipmi_address = node.get_network_address(ipmi_network)
if ipmi_address is None:
raise errors.DriverError(
"Node %s has no IPMI address" % (node.name))
raise errors.DriverError("Node %s has no IPMI address" %
(node.name))
ipmi_account = node.oob_parameters['account']
ipmi_credential = node.oob_parameters['credential']
self.logger.debug("Starting IPMI session to %s with %s/%s" %
(ipmi_address, ipmi_account, ipmi_credential[:1]))
ipmi_session = Command(
bmc=ipmi_address, userid=ipmi_account, password=ipmi_credential)
ipmi_session = Command(bmc=ipmi_address,
userid=ipmi_account,
password=ipmi_credential)
return ipmi_session
@ -99,11 +100,10 @@ class ValidateOobServices(PyghmiBaseAction):
"""Action to validation OOB services are available."""
def start(self):
self.task.add_status_msg(
msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success()
self.task.save()
@ -149,35 +149,32 @@ class SetNodeBoot(PyghmiBaseAction):
for n in node_list:
self.logger.debug("Setting bootdev to PXE for %s" % n.name)
self.task.add_status_msg(
msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
self.exec_ipmi_command(n, Command.set_bootdev, 'pxe')
time.sleep(3)
bootdev = self.exec_ipmi_command(n, Command.get_bootdev)
if bootdev is not None and (bootdev.get('bootdev',
'') == 'network'):
self.task.add_status_msg(
msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
if bootdev is not None and (bootdev.get('bootdev', '')
== 'network'):
self.task.add_status_msg(msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name)
else:
self.task.add_status_msg(
msg="Unable to set bootdev to PXE.",
error=True,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Unable to set bootdev to PXE.",
error=True,
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning(
"Unable to set node %s to PXE boot." % (n.name))
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
@ -198,11 +195,10 @@ class PowerOffNode(PyghmiBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg(
msg="Sending set_power = off command.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Sending set_power = off command.",
error=False,
ctx=n.name,
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off')
i = 18
@ -212,13 +208,12 @@ class PowerOffNode(PyghmiBaseAction):
power_state = self.exec_ipmi_command(n, Command.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'off'):
self.task.add_status_msg(
msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(
"Node %s reports powerstate of off" % n.name)
self.task.add_status_msg(msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("Node %s reports powerstate of off" %
n.name)
self.task.success(focus=n.name)
break
time.sleep(10)
@ -226,11 +221,10 @@ class PowerOffNode(PyghmiBaseAction):
if power_state is not None and (power_state.get('powerstate', '')
!= 'off'):
self.task.add_status_msg(
msg="Node failed to power off.",
error=True,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Node failed to power off.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on IPMI command to %s" % n.name)
self.task.failure(focus=n.name)
@ -253,11 +247,10 @@ class PowerOnNode(PyghmiBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg(
msg="Sending set_power = on command.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Sending set_power = on command.",
error=False,
ctx=n.name,
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off')
i = 18
@ -267,13 +260,12 @@ class PowerOnNode(PyghmiBaseAction):
power_state = self.exec_ipmi_command(n, Command.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'on'):
self.logger.debug(
"Node %s reports powerstate of on" % n.name)
self.task.add_status_msg(
msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("Node %s reports powerstate of on" %
n.name)
self.task.add_status_msg(msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.success(focus=n.name)
break
time.sleep(10)
@ -281,11 +273,10 @@ class PowerOnNode(PyghmiBaseAction):
if power_state is not None and (power_state.get('powerstate', '')
!= 'on'):
self.task.add_status_msg(
msg="Node failed to power on.",
error=True,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Node failed to power on.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on IPMI command to %s" % n.name)
self.task.failure(focus=n.name)
@ -308,11 +299,10 @@ class PowerCycleNode(PyghmiBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg(
msg="Power cycling node via IPMI.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Power cycling node via IPMI.",
error=False,
ctx=n.name,
ctx_type='node')
self.exec_ipmi_command(n, Command.set_power, 'off')
# Wait for power state of off before booting back up
@ -326,8 +316,8 @@ class PowerCycleNode(PyghmiBaseAction):
self.logger.debug("%s reports powerstate of off" % n.name)
break
elif power_state is None:
self.logger.debug(
"No response on IPMI power query to %s" % n.name)
self.logger.debug("No response on IPMI power query to %s" %
n.name)
time.sleep(10)
i = i - 1
@ -355,18 +345,17 @@ class PowerCycleNode(PyghmiBaseAction):
self.logger.debug("%s reports powerstate of on" % n.name)
break
elif power_state is None:
self.logger.debug(
"No response on IPMI power query to %s" % n.name)
self.logger.debug("No response on IPMI power query to %s" %
n.name)
time.sleep(10)
i = i - 1
if power_state is not None and (power_state.get('powerstate',
'') == 'on'):
self.task.add_status_msg(
msg="Node power cycle complete.",
error=False,
ctx=n.name,
ctx_type='node')
if power_state is not None and (power_state.get('powerstate', '')
== 'on'):
self.task.add_status_msg(msg="Node power cycle complete.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.success(focus=n.name)
else:
self.task.add_status_msg(
@ -398,8 +387,8 @@ class InterrogateOob(PyghmiBaseAction):
for n in node_list:
try:
self.logger.debug(
"Interrogating node %s IPMI interface." % n.name)
self.logger.debug("Interrogating node %s IPMI interface." %
n.name)
powerstate = self.exec_ipmi_command(n, Command.get_power)
if powerstate is None:
raise errors.DriverError()

View File

@ -70,8 +70,8 @@ class PyghmiDriver(oob_driver.OobDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
PyghmiDriver.pyghmi_driver_options, group=PyghmiDriver.driver_key)
cfg.CONF.register_opts(PyghmiDriver.pyghmi_driver_options,
group=PyghmiDriver.driver_key)
self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name)
@ -86,8 +86,9 @@ class PyghmiDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -133,10 +134,9 @@ class PyghmiDriver(oob_driver.OobDriver):
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s" % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
self.logger.error("Uncaught exception in subtask %s" %
str(uuid.UUID(bytes=t)),
exc_info=f.exception())
task.align_result()
task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -29,6 +29,7 @@ import drydock_provisioner.objects.fields as hd_fields
REDFISH_MAX_ATTEMPTS = 3
class RedfishBaseAction(BaseAction):
"""Base action for Redfish executed actions."""
@ -44,8 +45,8 @@ class RedfishBaseAction(BaseAction):
oob_network = node.oob_parameters['network']
oob_address = node.get_network_address(oob_network)
if oob_address is None:
raise errors.DriverError(
"Node %s has no OOB Redfish address" % (node.name))
raise errors.DriverError("Node %s has no OOB Redfish address" %
(node.name))
oob_account = node.oob_parameters['account']
oob_credential = node.oob_parameters['credential']
@ -53,11 +54,12 @@ class RedfishBaseAction(BaseAction):
self.logger.debug("Starting Redfish session to %s with %s" %
(oob_address, oob_account))
try:
redfish_obj = RedfishSession(host=oob_address,
account=oob_account,
password=oob_credential,
use_ssl=cfg.CONF.redfish_driver.use_ssl,
connection_retries=cfg.CONF.redfish_driver.max_retries)
redfish_obj = RedfishSession(
host=oob_address,
account=oob_account,
password=oob_credential,
use_ssl=cfg.CONF.redfish_driver.use_ssl,
connection_retries=cfg.CONF.redfish_driver.max_retries)
except (RedfishException, errors.DriverError) as iex:
self.logger.error(
"Error initializing Redfish session for node %s" % node.name)
@ -81,7 +83,8 @@ class RedfishBaseAction(BaseAction):
return response
except RedfishException as iex:
self.logger.error(
"Error executing Redfish command %s for node %s" % (func.__name__, node.name))
"Error executing Redfish command %s for node %s" %
(func.__name__, node.name))
self.logger.error("Redfish Exception: %s" % str(iex))
raise errors.DriverError("Redfish command failed.")
@ -91,11 +94,10 @@ class ValidateOobServices(RedfishBaseAction):
"""Action to validate OOB services are available."""
def start(self):
self.task.add_status_msg(
msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="OOB does not require services.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.success()
self.task.save()
@ -134,34 +136,38 @@ class SetNodeBoot(RedfishBaseAction):
node_list = self.orchestrator.get_target_nodes(self.task)
for n in node_list:
self.task.add_status_msg(
msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Setting node to PXE boot.",
error=False,
ctx=n.name,
ctx_type='node')
for i in range(REDFISH_MAX_ATTEMPTS):
bootdev = None
self.logger.debug("Setting bootdev to PXE for %s attempt #%s" % (n.name, i + 1))
self.logger.debug("Setting bootdev to PXE for %s attempt #%s" %
(n.name, i + 1))
try:
session = self.get_redfish_session(n)
bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev)
bootdev = self.exec_redfish_command(
n, session, RedfishSession.get_bootdev)
if bootdev.get('bootdev', '') != 'Pxe':
self.exec_redfish_command(n, session, RedfishSession.set_bootdev, 'Pxe')
self.exec_redfish_command(n, session,
RedfishSession.set_bootdev,
'Pxe')
time.sleep(1)
bootdev = self.exec_redfish_command(n, session, RedfishSession.get_bootdev)
bootdev = self.exec_redfish_command(
n, session, RedfishSession.get_bootdev)
session.close_session()
except errors.DriverError as e:
self.logger.warning(
"An exception '%s' occurred while attempting to set boot device on %s" % (e, n.name))
"An exception '%s' occurred while attempting to set boot device on %s"
% (e, n.name))
if bootdev is not None and (bootdev.get('bootdev',
'') == 'Pxe'):
self.task.add_status_msg(
msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
if bootdev is not None and (bootdev.get('bootdev', '')
== 'Pxe'):
self.task.add_status_msg(msg="Set bootdev to PXE.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("%s reports bootdev of network" % n.name)
self.task.success(focus=n.name)
break
@ -173,8 +179,8 @@ class SetNodeBoot(RedfishBaseAction):
ctx=n.name,
ctx_type='node')
self.task.failure(focus=n.name)
self.logger.warning(
"Unable to set node %s to PXE boot." % (n.name))
self.logger.warning("Unable to set node %s to PXE boot." %
(n.name))
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
@ -192,56 +198,58 @@ class PowerOffNode(RedfishBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg(
msg="Sending set_power = off command.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Sending set_power = off command.",
error=False,
ctx=n.name,
ctx_type='node')
session = self.get_redfish_session(n)
# If power is already off, continue with the next node
power_state = self.exec_redfish_command(n, RedfishSession.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'Off'):
self.task.add_status_msg(
msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
power_state = self.exec_redfish_command(n,
RedfishSession.get_power)
if power_state is not None and (power_state.get('powerstate', '')
== 'Off'):
self.task.add_status_msg(msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(
"Node %s reports powerstate already off. No action required" % n.name)
"Node %s reports powerstate already off. No action required"
% n.name)
self.task.success(focus=n.name)
continue
self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff')
self.exec_redfish_command(n, session, RedfishSession.set_power,
'ForceOff')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0:
self.logger.debug("Polling powerstate waiting for success.")
power_state = self.exec_redfish_command(n, RedfishSession.get_power)
power_state = self.exec_redfish_command(
n, RedfishSession.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'Off'):
self.task.add_status_msg(
msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(
"Node %s reports powerstate of off" % n.name)
self.task.add_status_msg(msg="Node reports power off.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("Node %s reports powerstate of off" %
n.name)
self.task.success(focus=n.name)
break
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval)
time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate', '')
!= 'Off'):
self.task.add_status_msg(
msg="Node failed to power off.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on Redfish command to %s" % n.name)
self.task.add_status_msg(msg="Node failed to power off.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on Redfish command to %s" %
n.name)
self.task.failure(focus=n.name)
session.close_session()
@ -262,56 +270,58 @@ class PowerOnNode(RedfishBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = on command to %s" % n.name)
self.task.add_status_msg(
msg="Sending set_power = on command.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Sending set_power = on command.",
error=False,
ctx=n.name,
ctx_type='node')
session = self.get_redfish_session(n)
# If power is already on, continue with the next node
power_state = self.exec_redfish_command(n, RedfishSession.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'On'):
self.task.add_status_msg(
msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
power_state = self.exec_redfish_command(n,
RedfishSession.get_power)
if power_state is not None and (power_state.get('powerstate', '')
== 'On'):
self.task.add_status_msg(msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug(
"Node %s reports powerstate already on. No action required" % n.name)
"Node %s reports powerstate already on. No action required"
% n.name)
self.task.success(focus=n.name)
continue
self.exec_redfish_command(n, session, RedfishSession.set_power, 'On')
self.exec_redfish_command(n, session, RedfishSession.set_power,
'On')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0:
self.logger.debug("Polling powerstate waiting for success.")
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power)
power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and (power_state.get(
'powerstate', '') == 'On'):
self.logger.debug(
"Node %s reports powerstate of on" % n.name)
self.task.add_status_msg(
msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
self.logger.debug("Node %s reports powerstate of on" %
n.name)
self.task.add_status_msg(msg="Node reports power on.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.success(focus=n.name)
break
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval)
time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate', '')
!= 'On'):
self.task.add_status_msg(
msg="Node failed to power on.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on Redfish command to %s" % n.name)
self.task.add_status_msg(msg="Node failed to power on.",
error=True,
ctx=n.name,
ctx_type='node')
self.logger.error("Giving up on Redfish command to %s" %
n.name)
self.task.failure(focus=n.name)
session.close_session()
@ -332,19 +342,20 @@ class PowerCycleNode(RedfishBaseAction):
for n in node_list:
self.logger.debug("Sending set_power = off command to %s" % n.name)
self.task.add_status_msg(
msg="Power cycling node via Redfish.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.add_status_msg(msg="Power cycling node via Redfish.",
error=False,
ctx=n.name,
ctx_type='node')
session = self.get_redfish_session(n)
self.exec_redfish_command(n, session, RedfishSession.set_power, 'ForceOff')
self.exec_redfish_command(n, session, RedfishSession.set_power,
'ForceOff')
# Wait for power state of off before booting back up
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0:
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power)
power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and power_state.get(
'powerstate', '') == 'Off':
self.logger.debug("%s reports powerstate of off" % n.name)
@ -352,7 +363,8 @@ class PowerCycleNode(RedfishBaseAction):
elif power_state is None:
self.logger.debug(
"No response on Redfish power query to %s" % n.name)
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval)
time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1
if power_state.get('powerstate', '') != 'Off':
@ -368,12 +380,14 @@ class PowerCycleNode(RedfishBaseAction):
break
self.logger.debug("Sending set_power = on command to %s" % n.name)
self.exec_redfish_command(n, session, RedfishSession.set_power, 'On')
self.exec_redfish_command(n, session, RedfishSession.set_power,
'On')
attempts = cfg.CONF.redfish_driver.power_state_change_max_retries
while attempts > 0:
power_state = self.exec_redfish_command(n, session, RedfishSession.get_power)
power_state = self.exec_redfish_command(
n, session, RedfishSession.get_power)
if power_state is not None and power_state.get(
'powerstate', '') == 'On':
self.logger.debug("%s reports powerstate of on" % n.name)
@ -381,16 +395,16 @@ class PowerCycleNode(RedfishBaseAction):
elif power_state is None:
self.logger.debug(
"No response on Redfish power query to %s" % n.name)
time.sleep(cfg.CONF.redfish_driver.power_state_change_retry_interval)
time.sleep(
cfg.CONF.redfish_driver.power_state_change_retry_interval)
attempts = attempts - 1
if power_state is not None and (power_state.get('powerstate',
'') == 'On'):
self.task.add_status_msg(
msg="Node power cycle complete.",
error=False,
ctx=n.name,
ctx_type='node')
if power_state is not None and (power_state.get('powerstate', '')
== 'On'):
self.task.add_status_msg(msg="Node power cycle complete.",
error=False,
ctx=n.name,
ctx_type='node')
self.task.success(focus=n.name)
else:
self.task.add_status_msg(
@ -421,16 +435,17 @@ class InterrogateOob(RedfishBaseAction):
for n in node_list:
try:
self.logger.debug(
"Interrogating node %s Redfish interface." % n.name)
self.logger.debug("Interrogating node %s Redfish interface." %
n.name)
session = self.get_redfish_session(n)
powerstate = self.exec_redfish_command(n, session, RedfishSession.get_power)
powerstate = self.exec_redfish_command(
n, session, RedfishSession.get_power)
session.close_session()
if powerstate is None:
raise errors.DriverError()
self.task.add_status_msg(
msg="Redfish interface interrogation yielded powerstate %s" %
powerstate.get('powerstate'),
msg="Redfish interface interrogation yielded powerstate %s"
% powerstate.get('powerstate'),
error=False,
ctx=n.name,
ctx_type='node')

View File

@ -21,10 +21,16 @@ from redfish.rest.v1 import ServerDownOrUnreachableError
from redfish.rest.v1 import InvalidCredentialsError
from redfish.rest.v1 import RetriesExhaustedError
class RedfishSession(object):
"""Redfish Client to provide OOB commands"""
def __init__(self, host, account, password, use_ssl=True, connection_retries=10):
def __init__(self,
host,
account,
password,
use_ssl=True,
connection_retries=10):
try:
if use_ssl:
redfish_url = 'https://' + host
@ -57,7 +63,8 @@ class RedfishSession(object):
# Assumption that only one system is available on Node
if response.dict["Members@odata.count"] != 1:
raise RedfishException("Number of systems are more than one in the node")
raise RedfishException(
"Number of systems are more than one in the node")
instance = response.dict["Members"][0]["@odata.id"]
return instance
@ -152,7 +159,9 @@ class RedfishSession(object):
"""
instance = self.get_system_instance()
if powerstate not in ["On", "ForceOff", "PushPowerButton", "GracefulRestart"]:
if powerstate not in [
"On", "ForceOff", "PushPowerButton", "GracefulRestart"
]:
raise RedfishException("Unsupported powerstate")
current_state = self.get_power()
@ -160,9 +169,7 @@ class RedfishSession(object):
(powerstate == "ForceOff" and current_state["powerstate"] == "Off"):
return {'powerstate': powerstate}
payload = {
"ResetType": powerstate
}
payload = {"ResetType": powerstate}
url = instance + "/Actions/ComputerSystem.Reset"
response = self.redfish_client.post(path=url, body=payload)

View File

@ -48,19 +48,19 @@ class RedfishDriver(oob_driver.OobDriver):
default=10,
min=1,
help='Maximum number of connection retries to Redfish server'),
cfg.IntOpt(
'power_state_change_max_retries',
default=18,
min=1,
help='Maximum reties to wait for power state change'),
cfg.IntOpt('power_state_change_max_retries',
default=18,
min=1,
help='Maximum reties to wait for power state change'),
cfg.IntOpt(
'power_state_change_retry_interval',
default=10,
help='Polling interval in seconds between retries for power state change'),
cfg.BoolOpt(
'use_ssl',
default=True,
help='Use SSL to communicate with Redfish API server'),
help=
'Polling interval in seconds between retries for power state change'
),
cfg.BoolOpt('use_ssl',
default=True,
help='Use SSL to communicate with Redfish API server'),
]
oob_types_supported = ['redfish']
@ -82,8 +82,8 @@ class RedfishDriver(oob_driver.OobDriver):
def __init__(self, **kwargs):
super().__init__(**kwargs)
cfg.CONF.register_opts(
RedfishDriver.redfish_driver_options, group=RedfishDriver.driver_key)
cfg.CONF.register_opts(RedfishDriver.redfish_driver_options,
group=RedfishDriver.driver_key)
self.logger = logging.getLogger(
config.config_mgr.conf.logging.oobdriver_logger_name)
@ -98,8 +98,9 @@ class RedfishDriver(oob_driver.OobDriver):
if task.action not in self.supported_actions:
self.logger.error("Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
raise errors.DriverError("Driver %s doesn't support task action %s"
% (self.driver_desc, task.action))
raise errors.DriverError(
"Driver %s doesn't support task action %s" %
(self.driver_desc, task.action))
task.set_status(hd_fields.TaskStatus.Running)
task.save()
@ -145,10 +146,9 @@ class RedfishDriver(oob_driver.OobDriver):
task.failure()
else:
if f.exception():
self.logger.error(
"Uncaught exception in subtask %s" % str(
uuid.UUID(bytes=t)),
exc_info=f.exception())
self.logger.error("Uncaught exception in subtask %s" %
str(uuid.UUID(bytes=t)),
exc_info=f.exception())
task.align_result()
task.bubble_results()
task.set_status(hd_fields.TaskStatus.Complete)

View File

@ -34,8 +34,10 @@ def start_drydock(enable_keystone=True):
# Setup configuration parsing
cli_options = [
cfg.BoolOpt(
'debug', short='d', default=False, help='Enable debug logging'),
cfg.BoolOpt('debug',
short='d',
default=False,
help='Enable debug logging'),
]
config.config_mgr.conf.register_cli_opts(cli_options)
@ -43,8 +45,9 @@ def start_drydock(enable_keystone=True):
config.config_mgr.conf(sys.argv[1:])
if config.config_mgr.conf.debug:
config.config_mgr.conf.set_override(
name='log_level', override='DEBUG', group='logging')
config.config_mgr.conf.set_override(name='log_level',
override='DEBUG',
group='logging')
# Setup root logger
logger = logging.getLogger(
@ -64,8 +67,7 @@ def start_drydock(enable_keystone=True):
logger.propagate = False
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(user)s - %(req_id)s"
" - %(external_ctx)s - %(end_user)s - %(message)s"
)
" - %(external_ctx)s - %(end_user)s - %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
@ -77,10 +79,9 @@ def start_drydock(enable_keystone=True):
input_ingester = Ingester()
input_ingester.enable_plugin(config.config_mgr.conf.plugins.ingester)
orchestrator = Orchestrator(
enabled_drivers=config.config_mgr.conf.plugins,
state_manager=state,
ingester=input_ingester)
orchestrator = Orchestrator(enabled_drivers=config.config_mgr.conf.plugins,
state_manager=state,
ingester=input_ingester)
orch_thread = threading.Thread(target=orchestrator.watch_for_tasks)
orch_thread.start()
@ -98,10 +99,9 @@ def start_drydock(enable_keystone=True):
policy.policy_engine.register_policy()
# Ensure that the policy_engine is initialized before starting the API
wsgi_callable = api.start_api(
state_manager=state,
ingester=input_ingester,
orchestrator=orchestrator)
wsgi_callable = api.start_api(state_manager=state,
ingester=input_ingester,
orchestrator=orchestrator)
# Now that loggers are configured, log the effective config
config.config_mgr.conf.log_opt_values(

View File

@ -192,9 +192,9 @@ class DrydockClient(object):
raise errors.ClientUnauthorizedError(
"Unauthorized access to %s, include valid token." % resp.url)
elif resp.status_code == 403:
raise errors.ClientForbiddenError(
"Forbidden access to %s" % resp.url)
raise errors.ClientForbiddenError("Forbidden access to %s" %
resp.url)
elif not resp.ok:
raise errors.ClientError(
"Error - received %d: %s" % (resp.status_code, resp.text),
code=resp.status_code)
raise errors.ClientError("Error - received %d: %s" %
(resp.status_code, resp.text),
code=resp.status_code)

View File

@ -91,8 +91,9 @@ class DrydockSession(object):
url = self.base_url + endpoint
self.logger.debug('GET ' + url)
self.logger.debug('Query Params: ' + str(query))
resp = self.__session.get(
url, params=query, timeout=self._timeout(timeout))
resp = self.__session.get(url,
params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
@ -121,21 +122,19 @@ class DrydockSession(object):
self.logger.debug('POST ' + url)
self.logger.debug('Query Params: ' + str(query))
if body is not None:
self.logger.debug(
"Sending POST with explicit body: \n%s" % body)
resp = self.__session.post(
self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
self.logger.debug("Sending POST with explicit body: \n%s" %
body)
resp = self.__session.post(self.base_url + endpoint,
params=query,
data=body,
timeout=self._timeout(timeout))
else:
self.logger.debug(
"Sending POST with JSON body: \n%s" % str(data))
resp = self.__session.post(
self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
self.logger.debug("Sending POST with JSON body: \n%s" %
str(data))
resp = self.__session.post(self.base_url + endpoint,
params=query,
json=data,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
auth_refresh = True
@ -161,8 +160,9 @@ class DrydockSession(object):
url = self.base_url + endpoint
self.logger.debug('DELETE ' + url)
self.logger.debug('Query Params: ' + str(query))
resp = self.__session.delete(
url, params=query, timeout=self._timeout(timeout))
resp = self.__session.delete(url,
params=query,
timeout=self._timeout(timeout))
if resp.status_code == 401 and not auth_refresh:
self.set_auth()
@ -212,6 +212,7 @@ class DrydockSession(object):
class KeystoneClient(object):
@staticmethod
def get_endpoint(endpoint,
ks_sess=None,

View File

@ -381,6 +381,7 @@ class InvalidSizeFormat(DriverError):
class ApiError(Exception):
def __init__(self, msg, code=500):
super().__init__(msg)
self.message = msg

View File

@ -29,6 +29,7 @@ import drydock_provisioner.objects.bootaction as bootaction
class Ingester(object):
def __init__(self):
self.logger = logging.getLogger("drydock.ingester")
self.registered_plugin = None
@ -50,8 +51,8 @@ class Ingester(object):
klass = getattr(mod, classname)
self.registered_plugin = klass()
except Exception as ex:
self.logger.error(
"Could not enable plugin %s - %s" % (plugin, str(ex)))
self.logger.error("Could not enable plugin %s - %s" %
(plugin, str(ex)))
if self.registered_plugin is None:
self.logger.error("Could not enable at least one plugin")
@ -89,8 +90,8 @@ class Ingester(object):
"Ingester:ingest_data ingesting design parts for design %s" %
design_ref)
design_blob = design_state.get_design_documents(design_ref)
self.logger.debug(
"Ingesting design data of %d bytes." % len(design_blob))
self.logger.debug("Ingesting design data of %d bytes." %
len(design_blob))
try:
status, design_items = self.registered_plugin.ingest_data(
@ -100,8 +101,8 @@ class Ingester(object):
"Ingester:ingest_data - Unexpected error processing data - %s"
% (str(vex)))
return None, None
self.logger.debug("Ingester:ingest_data parsed %s design parts" % str(
len(design_items)))
self.logger.debug("Ingester:ingest_data parsed %s design parts" %
str(len(design_items)))
design_data = objects.SiteDesign()
for m in design_items:
if context is not None:

View File

@ -19,6 +19,7 @@ import logging
class IngesterPlugin(object):
def __init__(self):
self.log = logging.Logger('ingester')
return

View File

@ -37,7 +37,9 @@ cache_opts = {
cache = CacheManager(**parse_cache_config_options(cache_opts))
class DeckhandIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.deckhand')
@ -54,6 +56,7 @@ class DeckhandIngester(IngesterPlugin):
:returns: a tuple of a status response and a list of parsed objects from drydock_provisioner.objects
"""
def local_parse():
return self.parse_docs(kwargs.get('content'))
@ -66,7 +69,9 @@ class DeckhandIngester(IngesterPlugin):
results = local_cache.get(key=hv, createfunc=local_parse)
parse_status, models = results
except Exception as ex:
self.logger.debug("Error parsing design - hash %s", hv, exc_info=ex)
self.logger.debug("Error parsing design - hash %s",
hv,
exc_info=ex)
raise ex
else:
raise ValueError('Missing parameter "content"')
@ -103,8 +108,8 @@ class DeckhandIngester(IngesterPlugin):
(schema_ns, doc_kind, doc_version) = d.get('schema',
'').split('/')
except ValueError as ex:
self.logger.error(
"Error with document structure.", exc_info=ex)
self.logger.error("Error with document structure.",
exc_info=ex)
self.logger.debug("Error document\n%s" % yaml.dump(d))
continue
if schema_ns == 'drydock':
@ -230,9 +235,9 @@ class DeckhandIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
raise errors.IngesterError('Unknown definition_type in '
'tag_definition instance: %s' %
(t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
@ -419,8 +424,9 @@ class DeckhandIngester(IngesterPlugin):
model.hugepages_confs = objects.HugepagesConfList()
for c, d in data.get('hugepages', {}).items():
conf = objects.HugepagesConf(
name=c, size=d.get('size'), count=d.get('count'))
conf = objects.HugepagesConf(name=c,
size=d.get('size'),
count=d.get('count'))
model.hugepages_confs.append(conf)
return model
@ -589,8 +595,8 @@ class DeckhandIngester(IngesterPlugin):
if 'sriov' in v:
int_model.sriov = True
int_model.vf_count = v.get('sriov', {}).get('vf_count', 0)
int_model.trustedmode = v.get('sriov', {}).get(
'trustedmode', False)
int_model.trustedmode = v.get('sriov',
{}).get('trustedmode', False)
model.interfaces.append(int_model)
@ -705,8 +711,8 @@ class DeckhandIngester(IngesterPlugin):
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.logger.debug("Loaded schema for document kind %s." %
schema_for)
self.v1_doc_schemas[schema_for] = schema.get('data')
f.close()

View File

@ -28,6 +28,7 @@ from drydock_provisioner.ingester.plugins import IngesterPlugin
class YamlIngester(IngesterPlugin):
def __init__(self):
super().__init__()
self.logger = logging.getLogger('drydock.ingester.yaml')
@ -96,8 +97,10 @@ class YamlIngester(IngesterPlugin):
ctx = d.get('metadata').get('name')
else:
ctx = 'Unknown'
ps.add_status_msg(
msg=msg, error=True, ctx_type='document', ctx=ctx)
ps.add_status_msg(msg=msg,
error=True,
ctx_type='document',
ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure)
except Exception as ex:
msg = "Unexpected error processing document: %s" % str(ex)
@ -106,8 +109,10 @@ class YamlIngester(IngesterPlugin):
ctx = d.get('metadata').get('name')
else:
ctx = 'Unknown'
ps.add_status_msg(
msg=msg, error=True, ctx_type='document', ctx=ctx)
ps.add_status_msg(msg=msg,
error=True,
ctx_type='document',
ctx=ctx)
ps.set_status(hd_fields.ValidationResult.Failure)
elif api.startswith('promenade/'):
(foo, api_version) = api.split('/')
@ -193,9 +198,9 @@ class YamlIngester(IngesterPlugin):
tag_model.definition = t.get('definition', '')
if tag_model.type not in ['lshw_xpath']:
raise errors.IngesterError(
'Unknown definition_type in '
'tag_definition instance: %s' % (t.definition_type))
raise errors.IngesterError('Unknown definition_type in '
'tag_definition instance: %s' %
(t.definition_type))
model.tag_definitions.append(tag_model)
auth_keys = data.get('authorized_keys', [])
@ -637,8 +642,8 @@ class YamlIngester(IngesterPlugin):
self.logger.warning(
"Duplicate document schemas found for document kind %s."
% schema_for)
self.logger.debug(
"Loaded schema for document kind %s." % schema_for)
self.logger.debug("Loaded schema for document kind %s." %
schema_for)
self.v1_doc_schemas[schema_for] = schema
f.close()

View File

@ -104,10 +104,10 @@ class Utils(object):
effective_list.extend(
[x for x in child_list if not x.startswith("!")])
effective_list.extend(
[x for x in parent_list
if ("!" + x) not in child_list
and x not in effective_list])
effective_list.extend([
x for x in parent_list
if ("!" + x) not in child_list and x not in effective_list
])
except TypeError:
raise TypeError("Error iterating list argument")

View File

@ -91,6 +91,7 @@ class DrydockPersistentObject(base.VersionedObject):
class DrydockObjectListBase(base.ObjectListBase):
def __init__(self, **kwargs):
super(DrydockObjectListBase, self).__init__(**kwargs)

View File

@ -34,18 +34,13 @@ class BootAction(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name':
ovo_fields.StringField(),
'source':
hd_fields.ModelSourceField(nullable=False),
'asset_list':
ovo_fields.ObjectField('BootActionAssetList', nullable=False),
'node_filter':
ovo_fields.ObjectField('NodeFilterSet', nullable=True),
'target_nodes':
ovo_fields.ListOfStringsField(nullable=True),
'signaling':
ovo_fields.BooleanField(default=True),
'name': ovo_fields.StringField(),
'source': hd_fields.ModelSourceField(nullable=False),
'asset_list': ovo_fields.ObjectField('BootActionAssetList',
nullable=False),
'node_filter': ovo_fields.ObjectField('NodeFilterSet', nullable=True),
'target_nodes': ovo_fields.ListOfStringsField(nullable=True),
'signaling': ovo_fields.BooleanField(default=True),
}
def __init__(self, **kwargs):
@ -160,8 +155,9 @@ class BootActionAsset(base.DrydockObject):
action_key, design_ref)
if self.location is not None:
rendered_location = self.execute_pipeline(
self.location, self.location_pipeline, tpl_ctx=tpl_ctx)
rendered_location = self.execute_pipeline(self.location,
self.location_pipeline,
tpl_ctx=tpl_ctx)
data_block = self.resolve_asset_location(rendered_location)
if self.type == hd_fields.BootactionAssetType.PackageList:
self._parse_package_list(data_block)
@ -169,8 +165,9 @@ class BootActionAsset(base.DrydockObject):
data_block = self.data.encode('utf-8')
if self.type != hd_fields.BootactionAssetType.PackageList:
value = self.execute_pipeline(
data_block, self.data_pipeline, tpl_ctx=tpl_ctx)
value = self.execute_pipeline(data_block,
self.data_pipeline,
tpl_ctx=tpl_ctx)
if isinstance(value, str):
value = value.encode('utf-8')
@ -222,9 +219,9 @@ class BootActionAsset(base.DrydockObject):
:param design_ref: The design reference representing ``site_design``
"""
return dict(
node=self._get_node_context(nodename, site_design),
action=self._get_action_context(action_id, action_key, design_ref))
return dict(node=self._get_node_context(nodename, site_design),
action=self._get_action_context(action_id, action_key,
design_ref))
def _get_action_context(self, action_id, action_key, design_ref):
"""Create the action-specific context items for template rendering.
@ -233,11 +230,10 @@ class BootActionAsset(base.DrydockObject):
:param action_key: random key of this boot action
:param design_ref: Design reference representing the site design
"""
return dict(
action_id=ulid2.ulid_to_base32(action_id),
action_key=action_key.hex(),
report_url=config.config_mgr.conf.bootactions.report_url,
design_ref=design_ref)
return dict(action_id=ulid2.ulid_to_base32(action_id),
action_key=action_key.hex(),
report_url=config.config_mgr.conf.bootactions.report_url,
design_ref=design_ref)
def _get_node_context(self, nodename, site_design):
"""Create the node-specific context items for template rendering.
@ -246,14 +242,13 @@ class BootActionAsset(base.DrydockObject):
:param site_design: full site design
"""
node = site_design.get_baremetal_node(nodename)
return dict(
hostname=nodename,
domain=node.get_domain(site_design),
tags=[t for t in node.tags],
labels={k: v
for (k, v) in node.owner_data.items()},
network=self._get_node_network_context(node, site_design),
interfaces=self._get_node_interface_context(node))
return dict(hostname=nodename,
domain=node.get_domain(site_design),
tags=[t for t in node.tags],
labels={k: v
for (k, v) in node.owner_data.items()},
network=self._get_node_network_context(node, site_design),
interfaces=self._get_node_interface_context(node))
def _get_node_network_context(self, node, site_design):
"""Create a node's network configuration context.
@ -298,8 +293,8 @@ class BootActionAsset(base.DrydockObject):
return ReferenceResolver.resolve_reference(asset_url)
except Exception as ex:
raise errors.InvalidAssetLocation(
"Unable to resolve asset reference %s: %s" % (asset_url,
str(ex)))
"Unable to resolve asset reference %s: %s" %
(asset_url, str(ex)))
def execute_pipeline(self, data, pipeline, tpl_ctx=None):
"""Execute a pipeline against a data element.

View File

@ -16,6 +16,7 @@ from oslo_versionedobjects import fields
class BaseDrydockEnum(fields.Enum):
def __init__(self):
super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL)

View File

@ -116,8 +116,9 @@ class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
for f in inheritable_field_list:
setattr(
self, f,
objects.Utils.apply_field_inheritance(
getattr(self, f, None), getattr(parent, f, None)))
objects.Utils.apply_field_inheritance(getattr(self, f, None),
getattr(parent, f,
None)))
# Now compute inheritance for complex types
self.oob_parameters = objects.Utils.merge_dicts(
@ -310,8 +311,8 @@ class HostVolumeGroup(base.DrydockObject):
fields = {
'name': obj_fields.StringField(),
'vg_uuid': obj_fields.StringField(nullable=True),
'logical_volumes': obj_fields.ObjectField(
'HostVolumeList', nullable=True),
'logical_volumes': obj_fields.ObjectField('HostVolumeList',
nullable=True),
}
def __init__(self, **kwargs):
@ -431,8 +432,8 @@ class HostStorageDevice(base.DrydockObject):
'name': obj_fields.StringField(),
'volume_group': obj_fields.StringField(nullable=True),
'labels': obj_fields.DictOfStringsField(nullable=True),
'partitions': obj_fields.ObjectField(
'HostPartitionList', nullable=True),
'partitions': obj_fields.ObjectField('HostPartitionList',
nullable=True),
}
def __init__(self, **kwargs):
@ -535,28 +536,18 @@ class HostPartition(base.DrydockObject):
VERSION = '1.0'
fields = {
'name':
obj_fields.StringField(),
'source':
hd_fields.ModelSourceField(),
'bootable':
obj_fields.BooleanField(default=False),
'volume_group':
obj_fields.StringField(nullable=True),
'part_uuid':
obj_fields.UUIDField(nullable=True),
'size':
obj_fields.StringField(nullable=True),
'mountpoint':
obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(),
'source': hd_fields.ModelSourceField(),
'bootable': obj_fields.BooleanField(default=False),
'volume_group': obj_fields.StringField(nullable=True),
'part_uuid': obj_fields.UUIDField(nullable=True),
'size': obj_fields.StringField(nullable=True),
'mountpoint': obj_fields.StringField(nullable=True),
'fstype': obj_fields.StringField(nullable=True, default='ext4'),
'mount_options': obj_fields.StringField(nullable=True,
default='defaults'),
'fs_uuid': obj_fields.UUIDField(nullable=True),
'fs_label': obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
@ -672,24 +663,16 @@ class HostVolume(base.DrydockObject):
VERSION = '1.0'
fields = {
'name':
obj_fields.StringField(),
'source':
hd_fields.ModelSourceField(),
'lv_uuid':
obj_fields.UUIDField(nullable=True),
'size':
obj_fields.StringField(nullable=True),
'mountpoint':
obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(),
'source': hd_fields.ModelSourceField(),
'lv_uuid': obj_fields.UUIDField(nullable=True),
'size': obj_fields.StringField(nullable=True),
'mountpoint': obj_fields.StringField(nullable=True),
'fstype': obj_fields.StringField(nullable=True, default='ext4'),
'mount_options': obj_fields.StringField(nullable=True,
default='defaults'),
'fs_uuid': obj_fields.UUIDField(nullable=True),
'fs_label': obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):

View File

@ -60,8 +60,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
self.source = hd_fields.ModelSource.Compiled
self.resolve_kernel_params(site_design)
if resolve_aliases:
self.logger.debug(
"Resolving device aliases on node %s" % self.name)
self.logger.debug("Resolving device aliases on node %s" %
self.name)
self.apply_logicalnames(site_design, state_manager)
return
@ -261,8 +261,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"""
if "regexp:" in address:
self.logger.info(
"Regexp: prefix has been detected in address: %s" %
(address))
"Regexp: prefix has been detected in address: %s" % (address))
address_regexp = address.replace("regexp:", "")
nodes = xml_root.findall(".//node")
logicalnames = []
@ -272,8 +271,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
if node.get('class') == "network":
address = node.find('businfo').text.replace("pci@", "")
self.logger.debug(
"A network device PCI address found. Address=%s. Checking for regexp %s match..." %
(address, address_regexp))
"A network device PCI address found. Address=%s. Checking for regexp %s match..."
% (address, address_regexp))
if re.match(address_regexp, address):
logicalnames.append(node.find('logicalname').text)
addresses.append(address)
@ -282,26 +281,25 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
(address, address_regexp))
else:
self.logger.debug(
"A network device with PCI address=%s does not match the regex %s." %
(address, address_regexp))
"A network device with PCI address=%s does not match the regex %s."
% (address, address_regexp))
if len(logicalnames) >= 1 and logicalnames[0]:
if len(logicalnames) > 1:
self.logger.info(
"Multiple nodes found for businfo=%s@%s" %
(bus_type, address_regexp))
self.logger.info("Multiple nodes found for businfo=%s@%s" %
(bus_type, address_regexp))
for logicalname in reversed(logicalnames[0].split("/")):
address = addresses[0]
self.logger.info(
"Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address,
logicalname))
logicalname))
return logicalname
else:
self.logger.info(
"No prefix has been detected in address: %s" %
(address))
nodes = xml_root.findall(".//node[businfo='" + bus_type + "@" + address + "'].logicalname")
self.logger.info("No prefix has been detected in address: %s" %
(address))
nodes = xml_root.findall(".//node[businfo='" + bus_type + "@"
+ address + "'].logicalname")
if len(nodes) >= 1 and nodes[0].text:
if (len(nodes) > 1):
self.logger.info("Multiple nodes found for businfo=%s@%s" %
@ -311,7 +309,7 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"Logicalname build dict: node_name = %s, alias_name = %s, "
"bus_type = %s, address = %s, to logicalname = %s" %
(self.get_name(), alias_name, bus_type, address,
logicalname))
logicalname))
return logicalname
self.logger.debug(
"Logicalname build dict: alias_name = %s, bus_type = %s, address = %s, not found"
@ -327,8 +325,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"""
logicalnames = {}
results = state_manager.get_build_data(
node_name=self.get_name(), latest=True)
results = state_manager.get_build_data(node_name=self.get_name(),
latest=True)
xml_data = None
for result in results:
if result.generator == "lshw":
@ -351,8 +349,8 @@ class BaremetalNode(drydock_provisioner.objects.hostprofile.HostProfile):
"resolving logical names for node %s", self.get_name())
raise
else:
self.logger.info(
"No Build Data found for node_name %s" % (self.get_name()))
self.logger.info("No Build Data found for node_name %s" %
(self.get_name()))
self.logicalnames = logicalnames

View File

@ -29,8 +29,8 @@ class Rack(base.DrydockPersistentObject, base.DrydockObject):
'name': obj_fields.StringField(nullable=False),
'site': obj_fields.StringField(nullable=False),
'source': hd_fields.ModelSourceField(nullable=False),
'tor_switches': obj_fields.ObjectField(
'TorSwitchList', nullable=False),
'tor_switches': obj_fields.ObjectField('TorSwitchList',
nullable=False),
'location': obj_fields.DictOfStringsField(nullable=False),
'local_networks': obj_fields.ListOfStringsField(nullable=True),
}

View File

@ -208,8 +208,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if n.get_id() == network_key:
return n
raise errors.DesignError(
"Network %s not found in design state" % network_key)
raise errors.DesignError("Network %s not found in design state" %
network_key)
def add_network_link(self, new_network_link):
if new_network_link is None:
@ -226,8 +226,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if network_link.get_id() == link_key:
return network_link
raise errors.DesignError(
"NetworkLink %s not found in design state" % link_key)
raise errors.DesignError("NetworkLink %s not found in design state" %
link_key)
def add_rack(self, new_rack):
if new_rack is None:
@ -243,8 +243,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
for r in self.racks:
if r.get_id() == rack_key:
return r
raise errors.DesignError(
"Rack %s not found in design state" % rack_key)
raise errors.DesignError("Rack %s not found in design state" %
rack_key)
def add_bootaction(self, new_ba):
"""Add a bootaction definition to this site design.
@ -265,8 +265,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
for ba in self.bootactions:
if ba.get_id() == ba_key:
return ba
raise errors.DesignError(
"BootAction %s not found in design state" % ba_key)
raise errors.DesignError("BootAction %s not found in design state" %
ba_key)
def add_host_profile(self, new_host_profile):
if new_host_profile is None:
@ -283,8 +283,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if p.get_id() == profile_key:
return p
raise errors.DesignError(
"HostProfile %s not found in design state" % profile_key)
raise errors.DesignError("HostProfile %s not found in design state" %
profile_key)
def add_hardware_profile(self, new_hardware_profile):
if new_hardware_profile is None:
@ -319,8 +319,8 @@ class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
if n.get_id() == node_key:
return n
raise errors.DesignError(
"BaremetalNode %s not found in design state" % node_key)
raise errors.DesignError("BaremetalNode %s not found in design state" %
node_key)
def add_promenade_config(self, prom_conf):
if self.prom_configs is None:

View File

@ -91,11 +91,10 @@ class Task(object):
self.result.failures) > 0):
if not max_attempts or (max_attempts
and self.retry < max_attempts):
self.add_status_msg(
msg="Retrying task for failed entities.",
error=False,
ctx='NA',
ctx_type='NA')
self.add_status_msg(msg="Retrying task for failed entities.",
error=False,
ctx='NA',
ctx_type='NA')
self.retry = self.retry + 1
if len(self.result.successes) > 0:
self.result.status = hd_fields.ActionResult.Success
@ -104,11 +103,10 @@ class Task(object):
self.save()
return True
else:
self.add_status_msg(
msg="Retry requested, out of attempts.",
error=False,
ctx='NA',
ctx_type='NA')
self.add_status_msg(msg="Retry requested, out of attempts.",
error=False,
ctx='NA',
ctx_type='NA')
raise errors.MaxRetriesReached("Retries reached max attempts.")
else:
return False
@ -182,12 +180,11 @@ class Task(object):
raise errors.OrchestratorError("Cannot add subtask for parent"
" marked for termination")
if self.statemgr.add_subtask(self.task_id, subtask.task_id):
self.add_status_msg(
msg="Started subtask %s for action %s" % (str(
subtask.get_id()), subtask.action),
error=False,
ctx=str(self.get_id()),
ctx_type='task')
self.add_status_msg(msg="Started subtask %s for action %s" %
(str(subtask.get_id()), subtask.action),
error=False,
ctx=str(self.get_id()),
ctx_type='task')
self.subtask_id_list.append(subtask.task_id)
subtask.parent_task_id = self.task_id
subtask.save()
@ -261,8 +258,8 @@ class Task(object):
:param action_filter: string action name to filter subtasks on
"""
self.logger.debug(
"Bubbling subtask results up to task %s." % str(self.task_id))
self.logger.debug("Bubbling subtask results up to task %s." %
str(self.task_id))
self.result.successes = []
self.result.failures = []
for st in self.statemgr.get_complete_subtasks(self.task_id):
@ -340,13 +337,12 @@ class Task(object):
msg_list = task_result.message_list
for m in msg_list:
self.add_status_msg(
msg=m.msg,
error=m.error,
ctx_type=m.ctx_type,
ctx=m.ctx,
ts=m.ts,
**m.extra)
self.add_status_msg(msg=m.msg,
error=m.error,
ctx_type=m.ctx_type,
ctx=m.ctx,
ts=m.ts,
**m.extra)
def to_db(self, include_id=True):
"""Convert this instance to a dictionary for use persisting to a db.
@ -666,9 +662,8 @@ class TaskStatusMessage(object):
:param d: dictionary of values
"""
i = TaskStatusMessage(
d.get('message', None), d.get('error'), d.get('context_type'),
d.get('context'))
i = TaskStatusMessage(d.get('message', None), d.get('error'),
d.get('context_type'), d.get('context'))
if 'extra' in d:
i.extra = d.get('extra')
i.ts = d.get('ts', None)

View File

@ -123,8 +123,8 @@ class DocumentReference(base.DrydockObject):
def __hash__(self):
"""Override default hashing function."""
return hash(
str(self.doc_type), str(self.doc_schema), str(self.doc_name))
return hash(str(self.doc_type), str(self.doc_schema),
str(self.doc_name))
def to_dict(self):
"""Serialize to a dictionary for further serialization."""

View File

@ -72,8 +72,8 @@ class BaseAction(object):
if len(target_nodes) > 1:
self.logger.info(
"Found multiple target nodes in task %s, splitting..." % str(
self.task.get_id()))
"Found multiple target nodes in task %s, splitting..." %
str(self.task.get_id()))
split_tasks = dict()
with concurrent.futures.ThreadPoolExecutor(max_workers=64) as te:
@ -101,8 +101,8 @@ class BaseAction(object):
:param timeout: The number of seconds to wait for all Futures to complete
:param bubble: Whether to bubble results from collected subtasks
"""
finished, timed_out = concurrent.futures.wait(
subtask_futures.values(), timeout=timeout)
finished, timed_out = concurrent.futures.wait(subtask_futures.values(),
timeout=timeout)
for k, v in subtask_futures.items():
if not v.done():
@ -116,8 +116,8 @@ class BaseAction(object):
else:
if v.exception():
self.logger.error(
"Uncaught excetion in subtask %s future:" % str(
uuid.UUID(bytes=k)),
"Uncaught excetion in subtask %s future:" %
str(uuid.UUID(bytes=k)),
exc_info=v.exception())
st = self.state_manager.get_task(uuid.UUID(bytes=k))
st.bubble_results()
@ -184,16 +184,20 @@ class Noop(BaseAction):
self.logger.debug("Terminating action.")
self.task.set_status(hd_fields.TaskStatus.Terminated)
self.task.failure()
self.task.add_status_msg(
msg="Action terminated.", ctx_type='NA', ctx='NA', error=False)
self.task.add_status_msg(msg="Action terminated.",
ctx_type='NA',
ctx='NA',
error=False)
else:
self.logger.debug("Marked task as successful.")
self.task.set_status(hd_fields.TaskStatus.Complete)
target_nodes = self.orchestrator.get_target_nodes(self.task)
for n in target_nodes:
self.task.success(focus=n.name)
self.task.add_status_msg(
msg="Noop action.", ctx_type='NA', ctx='NA', error=False)
self.task.add_status_msg(msg="Noop action.",
ctx_type='NA',
ctx='NA',
error=False)
self.task.save()
self.logger.debug("Saved task state.")
self.logger.debug("Finished Noop Action.")
@ -226,11 +230,10 @@ class DestroyNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes:
self.task.add_status_msg(
msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
@ -325,11 +328,11 @@ class VerifySite(BaseAction):
node_driver_task = self.state_manager.get_task(
node_driver_task.get_id())
self.task.add_status_msg(
msg="Collected subtask %s" % str(node_driver_task.get_id()),
error=False,
ctx=str(node_driver_task.get_id()),
ctx_type='task')
self.task.add_status_msg(msg="Collected subtask %s" %
str(node_driver_task.get_id()),
error=False,
ctx=str(node_driver_task.get_id()),
ctx_type='task')
self.task = self.state_manager.get_task(self.task.get_id())
self.task.set_status(hd_fields.TaskStatus.Complete)
@ -386,11 +389,11 @@ class PrepareSite(BaseAction):
driver.execute_task(config_prov_task.get_id())
self.task.add_status_msg(
msg="Collected subtask %s" % str(config_prov_task.get_id()),
error=False,
ctx=str(config_prov_task.get_id()),
ctx_type='task')
self.task.add_status_msg(msg="Collected subtask %s" %
str(config_prov_task.get_id()),
error=False,
ctx=str(config_prov_task.get_id()),
ctx_type='task')
self.logger.info("Node driver task %s:%s is complete." %
(config_prov_task.get_id(), config_prov_task.action))
@ -410,13 +413,13 @@ class PrepareSite(BaseAction):
driver.execute_task(site_network_task.get_id())
self.task.add_status_msg(
msg="Collected subtask %s" % str(site_network_task.get_id()),
error=False,
ctx=str(site_network_task.get_id()),
ctx_type='task')
self.logger.info(
"Node driver task %s complete" % (site_network_task.get_id()))
self.task.add_status_msg(msg="Collected subtask %s" %
str(site_network_task.get_id()),
error=False,
ctx=str(site_network_task.get_id()),
ctx_type='task')
self.logger.info("Node driver task %s complete" %
(site_network_task.get_id()))
def step_usercredentials(self, driver):
"""Run the ConfigureUserCredentials step of this action.
@ -434,13 +437,13 @@ class PrepareSite(BaseAction):
driver.execute_task(user_creds_task.get_id())
self.task.add_status_msg(
msg="Collected subtask %s" % str(user_creds_task.get_id()),
error=False,
ctx=str(user_creds_task.get_id()),
ctx_type='task')
self.logger.info(
"Node driver task %s complete" % (user_creds_task.get_id()))
self.task.add_status_msg(msg="Collected subtask %s" %
str(user_creds_task.get_id()),
error=False,
ctx=str(user_creds_task.get_id()),
ctx_type='task')
self.logger.info("Node driver task %s complete" %
(user_creds_task.get_id()))
class VerifyNodes(BaseAction):
@ -504,19 +507,18 @@ class VerifyNodes(BaseAction):
try:
self._collect_subtask_futures(
task_futures,
timeout=(
config.config_mgr.conf.timeouts.drydock_timeout * 60))
self.logger.debug(
"Collected subtasks for task %s" % str(self.task.get_id()))
timeout=(config.config_mgr.conf.timeouts.drydock_timeout
* 60))
self.logger.debug("Collected subtasks for task %s" %
str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex))
else:
# no target nodes
self.task.add_status_msg(
msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.success()
# Set task complete and persist that info.
@ -554,11 +556,10 @@ class PrepareNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes:
self.task.add_status_msg(
msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
@ -701,8 +702,9 @@ class PrepareNodes(BaseAction):
create_nodefilter_from_nodelist(node_list))
self.task.register_subtask(node_identify_task)
self.logger.info("Starting node driver task %s to identify nodes."
% (node_identify_task.get_id()))
self.logger.info(
"Starting node driver task %s to identify nodes." %
(node_identify_task.get_id()))
node_driver.execute_task(node_identify_task.get_id())
@ -742,8 +744,8 @@ class PrepareNodes(BaseAction):
oob_driver = self._get_driver('oob', oob_type)
if oob_driver is None:
self.logger.warning(
"Node OOB type %s has no enabled driver." % oob_type)
self.logger.warning("Node OOB type %s has no enabled driver." %
oob_type)
self.task.failure()
for n in oob_nodes:
self.task.add_status_msg(
@ -772,8 +774,8 @@ class PrepareNodes(BaseAction):
self._collect_subtask_futures(
task_futures,
timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60))
self.logger.debug(
"Collected subtasks for task %s" % str(self.task.get_id()))
self.logger.debug("Collected subtasks for task %s" %
str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex))
@ -799,8 +801,8 @@ class PrepareNodes(BaseAction):
oob_driver = self._get_driver('oob', oob_type)
if oob_driver is None:
self.logger.warning(
"Node OOB type %s has no enabled driver." % oob_type)
self.logger.warning("Node OOB type %s has no enabled driver." %
oob_type)
self.task.failure()
for n in oob_nodes:
self.task.add_status_msg(
@ -830,8 +832,8 @@ class PrepareNodes(BaseAction):
self._collect_subtask_futures(
task_futures,
timeout=(config.config_mgr.conf.timeouts.drydock_timeout * 60))
self.logger.debug(
"Collected subtasks for task %s" % str(self.task.get_id()))
self.logger.debug("Collected subtasks for task %s" %
str(self.task.get_id()))
except errors.CollectSubtaskTimeout as ex:
self.logger.warning(str(ex))
@ -897,11 +899,10 @@ class DeployNodes(BaseAction):
target_nodes = self.orchestrator.get_target_nodes(self.task)
if not target_nodes:
self.task.add_status_msg(
msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.add_status_msg(msg="No nodes in scope, no work to do.",
error=False,
ctx='NA',
ctx_type='NA')
self.task.success()
self.task.set_status(hd_fields.TaskStatus.Complete)
self.task.save()
@ -952,8 +953,8 @@ class DeployNodes(BaseAction):
if (node_storage_task is not None
and len(node_storage_task.result.successes) > 0):
self.logger.info(
"Configured storage on %s nodes, configuring platform." % (len(
node_storage_task.result.successes)))
"Configured storage on %s nodes, configuring platform." %
(len(node_storage_task.result.successes)))
node_platform_task = self.orchestrator.create_task(
design_ref=self.task.design_ref,
@ -979,8 +980,8 @@ class DeployNodes(BaseAction):
if node_platform_task is not None and len(
node_platform_task.result.successes) > 0:
self.logger.info(
"Configured platform on %s nodes, starting deployment." % (len(
node_platform_task.result.successes)))
"Configured platform on %s nodes, starting deployment." %
(len(node_platform_task.result.successes)))
while True:
if node_deploy_task is None:
@ -1078,8 +1079,9 @@ class RelabelNodes(BaseAction):
node_filter=nf)
self.task.register_subtask(relabel_node_task)
self.logger.info("Starting kubernetes driver task %s to relabel nodes."
% (relabel_node_task.get_id()))
self.logger.info(
"Starting kubernetes driver task %s to relabel nodes." %
(relabel_node_task.get_id()))
kubernetes_driver.execute_task(relabel_node_task.get_id())
relabel_node_task = self.state_manager.get_task(
@ -1118,8 +1120,8 @@ class BootactionReport(BaseAction):
bas = self.state_manager.get_boot_actions_for_node(n)
running_bas = {
k: v
for (k, v) in bas.items() if v.
get('action_status') == hd_fields.ActionResult.Incomplete
for (k, v) in bas.items() if v.get('action_status')
== hd_fields.ActionResult.Incomplete
}
if len(running_bas) > 0:
still_running = True
@ -1166,11 +1168,11 @@ class BootactionReport(BaseAction):
ctx=n,
ctx_type='node')
for ba in running_bas.values():
self.task.add_status_msg(
msg="Boot action %s timed out." % (ba['action_name']),
error=True,
ctx=n,
ctx_type='node')
self.task.add_status_msg(msg="Boot action %s timed out." %
(ba['action_name']),
error=True,
ctx=n,
ctx_type='node')
if len(failure_bas) == 0 and len(running_bas) == 0:
self.task.success(focus=n)

View File

@ -41,7 +41,9 @@ from .validations.validator import Validator
class Orchestrator(object):
"""Defines functionality for task execution workflow."""
def __init__(self, enabled_drivers=None, state_manager=None,
def __init__(self,
enabled_drivers=None,
state_manager=None,
ingester=None):
"""Initialize the orchestrator. A single instance should be executing at a time.
@ -81,9 +83,8 @@ class Orchestrator(object):
if self.enabled_drivers.get('oob', None) is None:
self.enabled_drivers['oob'] = []
self.enabled_drivers['oob'].append(
oob_driver_class(
state_manager=state_manager,
orchestrator=self))
oob_driver_class(state_manager=state_manager,
orchestrator=self))
node_driver_name = enabled_drivers.node_driver
if node_driver_name is not None:
@ -97,8 +98,8 @@ class Orchestrator(object):
network_driver_name = enabled_drivers.network_driver
if network_driver_name is not None:
m, c = network_driver_name.rsplit('.', 1)
network_driver_class = getattr(
importlib.import_module(m), c, None)
network_driver_class = getattr(importlib.import_module(m), c,
None)
if network_driver_class is not None:
self.enabled_drivers['network'] = network_driver_class(
state_manager=state_manager, orchestrator=self)
@ -106,8 +107,8 @@ class Orchestrator(object):
kubernetes_driver_name = enabled_drivers.kubernetes_driver
if kubernetes_driver_name is not None:
m, c = kubernetes_driver_name.rsplit('.', 1)
kubernetes_driver_class = getattr(
importlib.import_module(m), c, None)
kubernetes_driver_class = getattr(importlib.import_module(m),
c, None)
if kubernetes_driver_class is not None:
self.enabled_drivers[
'kubernetes'] = kubernetes_driver_class(
@ -191,8 +192,8 @@ class Orchestrator(object):
else:
self.logger.warning(
"Task %s has unsupported action %s, ending execution."
% (str(next_task.get_id()),
next_task.action))
% (str(
next_task.get_id()), next_task.action))
next_task.add_status_msg(
msg="Unsupported action %s." %
next_task.action,
@ -230,8 +231,8 @@ class Orchestrator(object):
:param propagate: whether the termination should propagatge to subtasks
"""
if task is None:
raise errors.OrchestratorError(
"Could find task %s" % str(task.get_id()))
raise errors.OrchestratorError("Could find task %s" %
str(task.get_id()))
else:
# Terminate initial task first to prevent add'l subtasks
self.logger.debug("Terminating task %s." % str(task.get_id()))
@ -243,8 +244,9 @@ class Orchestrator(object):
for st_id in subtasks:
st = self.state_manager.get_task(st_id)
self.terminate_task(
st, propagate=True, terminated_by=terminated_by)
self.terminate_task(st,
propagate=True,
terminated_by=terminated_by)
def create_task(self, **kwargs):
"""Create a new task and persist it."""
@ -263,13 +265,14 @@ class Orchestrator(object):
nodes = site_design.baremetal_nodes
for n in nodes or []:
try:
n.compile_applied_model(
site_design,
state_manager=self.state_manager,
resolve_aliases=resolve_aliases)
n.compile_applied_model(site_design,
state_manager=self.state_manager,
resolve_aliases=resolve_aliases)
except Exception as ex:
self.logger.debug(
"Failed to build applied model for node %s.", n.name, exc_info=ex)
"Failed to build applied model for node %s.",
n.name,
exc_info=ex)
raise ex
except AttributeError:
self.logger.debug(
@ -305,21 +308,21 @@ class Orchestrator(object):
try:
status, site_design = self.get_described_site(design_ref)
if status.status == hd_fields.ValidationResult.Success:
self.compute_model_inheritance(
site_design, resolve_aliases=resolve_aliases)
self.compute_model_inheritance(site_design,
resolve_aliases=resolve_aliases)
self.compute_bootaction_targets(site_design)
self.render_route_domains(site_design)
status = val.validate_design(site_design, result_status=status)
except Exception as ex:
if status is not None:
status.add_status_msg(
"Error loading effective site: %s" % str(ex),
error=True,
ctx='NA',
ctx_type='NA')
status.add_status_msg("Error loading effective site: %s" %
str(ex),
error=True,
ctx='NA',
ctx_type='NA')
status.set_status(hd_fields.ActionResult.Failure)
self.logger.error(
"Error getting site definition: %s" % str(ex), exc_info=ex)
self.logger.error("Error getting site definition: %s" % str(ex),
exc_info=ex)
return status, site_design
@ -368,9 +371,8 @@ class Orchestrator(object):
nf['filter_set_type'] = 'intersection'
nf['filter_set'] = [
dict(
node_names=[x.get_id() for x in node_list],
filter_type='union')
dict(node_names=[x.get_id() for x in node_list],
filter_type='union')
]
return nf
@ -418,8 +420,8 @@ class Orchestrator(object):
for f in node_filter.get('filter_set', []):
result_sets.append(self.process_filter(target_nodes, f))
return self.join_filter_sets(
node_filter.get('filter_set_type'), result_sets)
return self.join_filter_sets(node_filter.get('filter_set_type'),
result_sets)
elif isinstance(node_filter, objects.NodeFilterSet):
for f in node_filter.filter_set:
@ -434,8 +436,8 @@ class Orchestrator(object):
elif filter_set_type == 'intersection':
return self.list_intersection(*result_sets)
else:
raise errors.OrchestratorError(
"Unknown filter set type %s" % filter_set_type)
raise errors.OrchestratorError("Unknown filter set type %s" %
filter_set_type)
def process_filter(self, node_set, filter_set):
"""Take a filter and apply it to the node_set.
@ -500,11 +502,10 @@ class Orchestrator(object):
target_nodes['rack_labels'] = node_set
if set_type == 'union':
return self.list_union(
target_nodes.get('node_names', []),
target_nodes.get('node_tags', []),
target_nodes.get('rack_names', []),
target_nodes.get('node_labels', []))
return self.list_union(target_nodes.get('node_names', []),
target_nodes.get('node_tags', []),
target_nodes.get('rack_names', []),
target_nodes.get('node_labels', []))
elif set_type == 'intersection':
return self.list_intersection(
target_nodes.get('node_names', None),
@ -514,8 +515,8 @@ class Orchestrator(object):
except Exception as ex:
self.logger.error("Error processing node filter.", exc_info=ex)
raise errors.OrchestratorError(
"Error processing node filter: %s" % str(ex))
raise errors.OrchestratorError("Error processing node filter: %s" %
str(ex))
def list_intersection(self, a, *rest):
"""Take the intersection of a with the intersection of all the rest.
@ -569,12 +570,12 @@ class Orchestrator(object):
identity_key = None
self.logger.debug(
"Creating boot action context for node %s" % nodename)
self.logger.debug("Creating boot action context for node %s" %
nodename)
for ba in site_design.bootactions:
self.logger.debug(
"Boot actions target nodes: %s" % ba.target_nodes)
self.logger.debug("Boot actions target nodes: %s" %
ba.target_nodes)
if nodename in ba.target_nodes:
if identity_key is None:
identity_key = os.urandom(32)
@ -591,13 +592,12 @@ class Orchestrator(object):
"Boot action %s has disabled signaling, marking unreported."
% ba.name)
action_id = ulid2.generate_binary_ulid()
self.state_manager.post_boot_action(
nodename,
task.get_id(),
identity_key,
action_id,
ba.name,
action_status=init_status)
self.state_manager.post_boot_action(nodename,
task.get_id(),
identity_key,
action_id,
ba.name,
action_status=init_status)
return identity_key
def find_node_package_lists(self, nodename, task):
@ -611,8 +611,8 @@ class Orchestrator(object):
if site_design.bootactions is None:
return None
self.logger.debug(
"Extracting package install list for node %s" % nodename)
self.logger.debug("Extracting package install list for node %s" %
nodename)
pkg_list = dict()
@ -668,22 +668,22 @@ class Orchestrator(object):
metric = None
if 'routes' in n and n.routes is not None:
for r in n.routes:
if 'routedomain' in r and r.get('routedomain',
None) == rd:
if 'routedomain' in r and r.get(
'routedomain', None) == rd:
gw = r.get('gateway')
metric = r.get('metric')
self.logger.debug(
"Use gateway %s for routedomain %s "
"on network %s." % (gw, rd,
n.get_name()))
"on network %s." %
(gw, rd, n.get_name()))
break
if gw is not None and metric is not None:
for cidr in rd_cidrs:
if cidr != n.cidr:
n.routes.append(
dict(
subnet=cidr, gateway=gw,
metric=metric))
dict(subnet=cidr,
gateway=gw,
metric=metric))
else:
msg = "Invalid network model: {}. Cannot find " \
"routes field in network with routedomain: " \

View File

@ -18,6 +18,7 @@ import drydock_provisioner.error as errors
class SimpleBytes():
def calculate_bytes(size_str):
"""
Calculate the size in bytes of a size_str.
@ -34,8 +35,8 @@ class SimpleBytes():
match = regex.match(size_str)
if not match:
raise errors.InvalidSizeFormat(
"Invalid size string format: %s" % size_str)
raise errors.InvalidSizeFormat("Invalid size string format: %s" %
size_str)
base_size = int(match.group(1))

View File

@ -18,6 +18,7 @@ from drydock_provisioner.orchestrator.util import SimpleBytes
class BootStorageRational(Validators):
def __init__(self):
super().__init__('Rational Boot Storage', 'DD1001')

View File

@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class CidrValidity(Validators):
def __init__(self):
super().__init__('CIDR Validity', 'DD2006')
@ -40,8 +41,10 @@ class CidrValidity(Validators):
except ValueError as e:
if str(e) == (net.cidr + " has host bits set"):
msg = 'The provided CIDR %s has host bits set' % net.cidr
valid_cidr = ipaddress.ip_network(net.cidr, strict=False)
valid_cidr = ipaddress.ip_network(net.cidr,
strict=False)
self.report_error(
msg, [net.doc_ref],
"Provide a CIDR acceptable by MAAS: %s" % str(valid_cidr))
msg, [net.doc_ref],
"Provide a CIDR acceptable by MAAS: %s" %
str(valid_cidr))
return

View File

@ -17,6 +17,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class HostnameValidity(Validators):
def __init__(self):
super().__init__('Hostname Validity', 'DD3003')

View File

@ -2,6 +2,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class HugepagesValidity(Validators):
def __init__(self):
super().__init__('Hugepages', 'DD1008')

View File

@ -17,6 +17,7 @@ from netaddr import IPNetwork, IPAddress
class IpLocalityCheck(Validators):
def __init__(self):
super().__init__('IP Locality Check', "DD2002")

View File

@ -37,8 +37,9 @@ class MtuRational(Validators):
mtu = network_link.mtu
if mtu and (mtu < MtuRational.MIN_MTU_SIZE
or mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" % (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
msg = (
"MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error(
msg, [network_link.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.")
@ -52,8 +53,9 @@ class MtuRational(Validators):
if network_mtu and (network_mtu < MtuRational.MIN_MTU_SIZE
or network_mtu > MtuRational.MAX_MTU_SIZE):
msg = ("MTU must be between %d and %d, value is %d" % (
MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
msg = (
"MTU must be between %d and %d, value is %d" %
(MtuRational.MIN_MTU_SIZE, MtuRational.MAX_MTU_SIZE, mtu))
self.report_error(
msg, [network.doc_ref],
"Define a valid MTU. Standard is 1500, Jumbo is 9100.")

View File

@ -17,6 +17,7 @@ import drydock_provisioner.objects.fields as hd_fields
class NetworkTrunkingRational(Validators):
def __init__(self):
super().__init__('Network Trunking Rationalty', "DD2004")
@ -30,8 +31,8 @@ class NetworkTrunkingRational(Validators):
for network_link in network_link_list:
allowed_networks = network_link.allowed_networks
# if allowed networks > 1 trunking must be enabled
if (len(allowed_networks) > 1 and network_link.
trunk_mode == hd_fields.NetworkLinkTrunkingMode.Disabled):
if (len(allowed_networks) > 1 and network_link.trunk_mode
== hd_fields.NetworkLinkTrunkingMode.Disabled):
msg = ('If there is more than 1 allowed network,'
'trunking mode must be enabled')
self.report_error(
@ -40,15 +41,17 @@ class NetworkTrunkingRational(Validators):
)
# trunking mode is disabled, default_network must be defined
if (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode.
Disabled and network_link.native_network is None):
if (network_link.trunk_mode
== hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is None):
msg = 'Trunking mode is disabled, a trunking default_network must be defined'
self.report_error(
msg, [network_link.doc_ref],
"Non-trunked links must have a native network defined.")
elif (network_link.trunk_mode == hd_fields.NetworkLinkTrunkingMode.
Disabled and network_link.native_network is not None):
elif (network_link.trunk_mode
== hd_fields.NetworkLinkTrunkingMode.Disabled
and network_link.native_network is not None):
network = site_design.get_network(network_link.native_network)
if network and network.vlan_id:
msg = "Network link native network has a defined VLAN tag."

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class NoDuplicateIpsCheck(Validators):
def __init__(self):
super().__init__('Duplicated IP Check', "DD2005")

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class IpmiValidity(Validators):
def __init__(self):
super().__init__('Valid IPMI Configuration', 'DD4001')
@ -32,9 +33,8 @@ class IpmiValidity(Validators):
if baremetal_node.oob_type == 'ipmi':
for p in required_params:
if not baremetal_node.oob_parameters.get(p, None):
msg = (
'OOB parameter %s for IPMI node %s missing.' % p,
baremetal_node.name)
msg = ('OOB parameter %s for IPMI node %s missing.' %
p, baremetal_node.name)
self.report_error(msg, [baremetal_node.doc_ref],
"Define OOB parameter %s" % p)
oob_addr = None

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class LibvirtValidity(Validators):
def __init__(self):
super().__init__('Valid Libvirt Configuration', 'DD4002')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class PlatformSelection(Validators):
def __init__(self):
super().__init__('Platform Selection', 'DD3001')
@ -39,8 +40,9 @@ class PlatformSelection(Validators):
try:
valid_images = node_driver.get_available_images()
except Exception:
msg = ("Platform validation: Could not load images from driver, skipping"
"image and kernel selection validation.")
msg = (
"Platform validation: Could not load images from driver, skipping"
"image and kernel selection validation.")
self.report_warn(
msg, [],
"Cannot validate platform selection without accessing the node provisioner."
@ -53,8 +55,9 @@ class PlatformSelection(Validators):
try:
valid_kernels[i] = node_driver.get_available_kernels(i)
except Exception:
msg = ("Platform validation: Could not load kernels from driver, skipping"
"image and kernel selection validation.")
msg = (
"Platform validation: Could not load kernels from driver, skipping"
"image and kernel selection validation.")
self.report_warn(
msg, [],
"Cannot validate platform selection without accessing the node provisioner."

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class RationalNetworkBond(Validators):
def __init__(self):
super().__init__('Network Bond Rationality', 'DD1006')

View File

@ -14,7 +14,9 @@
from drydock_provisioner.orchestrator.validations.validators import Validators
class StorageMountpoints(Validators):
def __init__(self):
super().__init__('Storage Mountpoint', "DD2004")
@ -43,11 +45,10 @@ class StorageMountpoints(Validators):
if mountpoint is None:
continue
if mountpoint in mountpoint_list:
msg = ('Mountpoint "{}" already exists'
.format(mountpoint))
self.report_error(
msg, [baremetal_node.doc_ref],
'Please use unique mountpoints.')
msg = ('Mountpoint "{}" already exists'.format(
mountpoint))
self.report_error(msg, [baremetal_node.doc_ref],
'Please use unique mountpoints.')
return
else:
mountpoint_list.append(mountpoint)
@ -66,8 +67,8 @@ class StorageMountpoints(Validators):
if mountpoint is None:
continue
if mountpoint in mountpoint_list:
msg = ('Mountpoint "{}" already exists'
.format(mountpoint))
msg = ('Mountpoint "{}" already exists'.
format(mountpoint))
self.report_error(
msg, [baremetal_node.doc_ref],
'Please use unique mountpoints.')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class StoragePartitioning(Validators):
def __init__(self):
super().__init__('Storage Partitioning', "DD2002")
@ -70,8 +71,9 @@ class StoragePartitioning(Validators):
all_volume_groups = baremetal_node.volume_groups or []
for volume_group in all_volume_groups:
if volume_group.name not in volume_group_check_list:
msg = ('Volume group %s not assigned any physical volumes'
% (volume_group.name))
msg = (
'Volume group %s not assigned any physical volumes' %
(volume_group.name))
self.report_error(
msg, [baremetal_node.doc_ref],
"Each volume group should be assigned at least one storage device "

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class StorageSizing(Validators):
def __init__(self):
super().__init__('Storage Sizing', 'DD2003')

View File

@ -15,6 +15,7 @@ from drydock_provisioner.orchestrator.validations.validators import Validators
class UniqueNetworkCheck(Validators):
def __init__(self):
super().__init__('Allowed Network Check', 'DD1007')
@ -53,8 +54,8 @@ class UniqueNetworkCheck(Validators):
for name in duplicated_names:
msg = (
'Allowed network %s duplicated on NetworkLink %s and NetworkLink '
'%s' % (name, network_link_name,
network_link_name_2))
'%s' %
(name, network_link_name, network_link_name_2))
self.report_error(
msg, [],
"Each network is only allowed to cross a single network link."

View File

@ -38,6 +38,7 @@ from drydock_provisioner.orchestrator.validations.storage_mountpoints import Sto
class Validator():
def __init__(self, orchestrator):
"""Create a validator with a reference to the orchestrator.
@ -63,8 +64,8 @@ class Validator():
validation_error = False
for rule in rule_set:
message_list = rule.execute(
site_design=site_design, orchestrator=self.orchestrator)
message_list = rule.execute(site_design=site_design,
orchestrator=self.orchestrator)
result_status.message_list.extend(message_list)
error_msg = [m for m in message_list if m.error]
result_status.error_count = result_status.error_count + len(

View File

@ -20,7 +20,9 @@ from drydock_provisioner.objects import fields as hd_fields
import drydock_provisioner.config as config
class Validators:
def __init__(self, long_name, name):
self.name = name
self.long_name = long_name
@ -42,13 +44,12 @@ class Validators:
:param level: String - More detailed of the severity level of this message
"""
fmt_msg = "%s: %s" % (self.long_name, msg)
msg_obj = objects.ValidationMessage(
fmt_msg,
self.name,
error=error,
level=level,
docs=docs,
diagnostic=diagnostic)
msg_obj = objects.ValidationMessage(fmt_msg,
self.name,
error=error,
level=level,
docs=docs,
diagnostic=diagnostic)
self.messages.append(msg_obj)
def report_error(self, msg, docs, diagnostic):

View File

@ -30,35 +30,35 @@ class DrydockPolicy(object):
# Base Policy
base_rules = [
policy.RuleDefault(
'admin_required',
'role:admin or is_admin:1',
description='Actions requiring admin authority'),
policy.RuleDefault('admin_required',
'role:admin or is_admin:1',
description='Actions requiring admin authority'),
]
# Orchestrator Policy
task_rules = [
policy.DocumentedRuleDefault(
'physical_provisioner:read_task', 'role:admin', 'Get task status',
[{
'path': '/api/v1.0/tasks',
'method': 'GET'
}, {
'path': '/api/v1.0/tasks/{task_id}',
'method': 'GET'
}]),
policy.DocumentedRuleDefault('physical_provisioner:read_task',
'role:admin', 'Get task status',
[{
'path': '/api/v1.0/tasks',
'method': 'GET'
}, {
'path': '/api/v1.0/tasks/{task_id}',
'method': 'GET'
}]),
policy.DocumentedRuleDefault('physical_provisioner:create_task',
'role:admin', 'Create a task',
[{
'path': '/api/v1.0/tasks',
'method': 'POST'
}]),
policy.DocumentedRuleDefault(
'physical_provisioner:validate_design', 'role:admin',
'Create validate_design task', [{
'path': '/api/v1.0/tasks',
'method': 'POST'
}]),
policy.DocumentedRuleDefault('physical_provisioner:validate_design',
'role:admin',
'Create validate_design task',
[{
'path': '/api/v1.0/tasks',
'method': 'POST'
}]),
policy.DocumentedRuleDefault('physical_provisioner:verify_site',
'role:admin', 'Create verify_site task',
[{
@ -95,12 +95,12 @@ class DrydockPolicy(object):
'path': '/api/v1.0/tasks',
'method': 'POST'
}]),
policy.DocumentedRuleDefault('physical_provisioner:delete_tasks',
'role:admin', 'Deletes tasks by age',
[{
'path': '/api/v1.0/tasks',
'method': 'DELETE'
}]),
policy.DocumentedRuleDefault(
'physical_provisioner:delete_tasks', 'role:admin',
'Deletes tasks by age', [{
'path': '/api/v1.0/tasks',
'method': 'DELETE'
}]),
policy.DocumentedRuleDefault('physical_provisioner:relabel_nodes',
'role:admin', 'Create relabel_nodes task',
[{
@ -110,10 +110,8 @@ class DrydockPolicy(object):
policy.DocumentedRuleDefault(
'physical_provisioner:read_build_data', 'role:admin',
'Read build data for a node', [{
'path':
'/api/v1.0/nodes/{nodename}/builddata',
'method':
'GET',
'path': '/api/v1.0/nodes/{nodename}/builddata',
'method': 'GET',
}]),
]
@ -121,8 +119,7 @@ class DrydockPolicy(object):
data_rules = [
policy.DocumentedRuleDefault(
'physical_provisioner:read_data', 'role:admin',
'Read loaded design data',
[{
'Read loaded design data', [{
'path': '/api/v1.0/designs',
'method': 'GET'
}, {
@ -131,8 +128,7 @@ class DrydockPolicy(object):
}]),
policy.DocumentedRuleDefault(
'physical_provisioner:ingest_data', 'role:admin',
'Load design data',
[{
'Load design data', [{
'path': '/api/v1.0/designs',
'method': 'POST'
}, {
@ -182,6 +178,7 @@ class ApiEnforcer(object):
self.logger = logging.getLogger('drydock.policy')
def __call__(self, f):
@functools.wraps(f)
def secure_handler(slf, req, resp, *args, **kwargs):
ctx = req.context
@ -199,18 +196,16 @@ class ApiEnforcer(object):
slf.info(
ctx,
"Error - Forbidden access - action: %s" % self.action)
slf.return_error(
resp,
falcon.HTTP_403,
message="Forbidden",
retry=False)
slf.return_error(resp,
falcon.HTTP_403,
message="Forbidden",
retry=False)
else:
slf.info(ctx, "Error - Unauthenticated access")
slf.return_error(
resp,
falcon.HTTP_401,
message="Unauthenticated",
retry=False)
slf.return_error(resp,
falcon.HTTP_401,
message="Unauthenticated",
retry=False)
return secure_handler

View File

@ -20,6 +20,7 @@ from sqlalchemy.dialects import postgresql as pg
class ExtendTable(Table):
def __new__(cls, metadata):
self = super().__new__(cls, cls.__tablename__, metadata,
*cls.__schema__)

Some files were not shown because too many files have changed in this diff Show More