diff --git a/bin/iotronic-conductor b/bin/iotronic-conductor
new file mode 100755
index 0000000..e94230d
--- /dev/null
+++ b/bin/iotronic-conductor
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Iotronic Conductor
+"""
+
+import sys
+
+from oslo_config import cfg
+
+from iotronic.common import service as iotronic_service
+from iotronic.openstack.common import service
+
+
+
+CONF = cfg.CONF
+if __name__ == '__main__':
+
+ iotronic_service.prepare_service(sys.argv)
+ mgr = iotronic_service.RPCService(CONF.host,
+ 'iotronic.conductor.manager',
+ 'ConductorManager')
+
+ launcher = service.launch(mgr)
+ launcher.wait()
+ '''
+ try:
+ Conductor()
+ pass
+ except RuntimeError, e:
+ sys.exit("ERROR: %s" % e)
+ '''
+
+
diff --git a/build.sh b/build.sh
new file mode 100755
index 0000000..22b2c33
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,4 @@
+python setup.py build; python setup.py install; systemctl restart httpd;
+rm -rf build
+rm -rf iotronic.egg-info
+rm -rf dist
diff --git a/etc/apache2/iotronic.conf b/etc/apache2/iotronic.conf
new file mode 100644
index 0000000..e3b8b26
--- /dev/null
+++ b/etc/apache2/iotronic.conf
@@ -0,0 +1,38 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is an example Apache2 configuration file for using the
+# Ironic API through mod_wsgi. This version assumes you are
+# running devstack to configure the software.
+
+Listen 1288
+
+
+ WSGIDaemonProcess iotronic
+#user=root group=root threads=10 display-name=%{GROUP}
+ WSGIScriptAlias / /etc/iotronic/app.wsgi
+
+ #SetEnv APACHE_RUN_USER stack
+ #SetEnv APACHE_RUN_GROUP stack
+ WSGIProcessGroup iotronic
+
+ ErrorLog /var/log/httpd/iotronic_error.log
+ LogLevel debug
+ CustomLog /var/log/httpd/iotronic_access.log combined
+
+
+ WSGIProcessGroup iotronic
+ WSGIApplicationGroup %{GLOBAL}
+ AllowOverride All
+ Require all granted
+
+
diff --git a/etc/iotronic/app.wsgi b/etc/iotronic/app.wsgi
new file mode 100644
index 0000000..3401ca7
--- /dev/null
+++ b/etc/iotronic/app.wsgi
@@ -0,0 +1,29 @@
+# -*- mode: python -*-
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''
+from iotronic.api import app
+from iotronic.common import service
+
+from oslo import i18n
+#from oslo_config import cfg
+#cfg.CONF(project='iotronic')
+
+
+i18n.install('iotronic')
+service.prepare_service([])
+
+application = app.VersionSelectorApplication()
+'''
diff --git a/etc/iotronic/iotronic.conf b/etc/iotronic/iotronic.conf
new file mode 100644
index 0000000..41f616c
--- /dev/null
+++ b/etc/iotronic/iotronic.conf
@@ -0,0 +1,25 @@
+[DEFAULT]
+transport_url=rabbit://root:0penstack@iotctrl:5672/
+debug=True
+verbose=False
+
+#
+# Options defined in ironic.api.app
+#
+
+# Authentication strategy used by ironic-api: one of
+# "keystone" or "noauth". "noauth" should not be used in a
+# production environment because all authentication will be
+# disabled. (string value)
+auth_strategy=noauth
+
+# Enable pecan debug mode. WARNING: this is insecure and
+# should not be used in a production environment. (boolean
+# value)
+#pecan_debug=false
+
+
+[database]
+connection = mysql://iotronic:0penstack@localhost/iotronic
+
+
diff --git a/etc/iotronic/iotronic.conf_old b/etc/iotronic/iotronic.conf_old
new file mode 100644
index 0000000..753e60d
--- /dev/null
+++ b/etc/iotronic/iotronic.conf_old
@@ -0,0 +1,1616 @@
+[DEFAULT]
+
+#transport_url=rabbit://root:0penstack@iotctrl:5672/virtual_host
+
+#rpc_backend = rabbit
+#rabbit_host = iotctrl
+#rabbit_password = 0penstack
+
+#
+# Options defined in oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
+
+# MatchMaker driver. (string value)
+#rpc_zmq_matchmaker=local
+
+# ZeroMQ receiver listening port. (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=ironic
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency. (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Size of RPC thread pool. (integer value)
+#rpc_thread_pool_size=64
+
+# The Drivers(s) to handle sending notifications. Possible
+# values are messaging, messagingv2, routing,log, test, noop
+# (multi valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=rabbit://guest:0penstack@iotctrl:5672/virtual_host
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in oslo.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+debug=True
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+verbose=True
+
+# The name of a logging configuration file. This file is
+# appended to any existing logging configuration files. For
+# details about logging configuration files, see the Python
+# logging module documentation. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append=
+
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s . (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=
+
+# (Optional) The base directory used for relative --log-file
+# paths. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=
+
+# Use syslog for logging. Existing syslog format is DEPRECATED
+# during I, and will change in J to honor RFC5424. (boolean
+# value)
+#use_syslog=false
+
+# (Optional) Enables or disables syslog rfc5424 format for
+# logging. If enabled, prefixes the MSG part of the syslog
+# message with APP-NAME (RFC5424). The format without the APP-
+# NAME is deprecated in K, and will be removed in L, along
+# with this option. (boolean value)
+#use_syslog_rfc_format=true
+
+# Syslog facility to receive log lines. (string value)
+#syslog_log_facility=LOG_USER
+
+# Log output to standard error. (boolean value)
+#use_stderr=true
+
+# Format string to use for log messages with context. (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages without context.
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Data to append to log format when level is DEBUG. (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format.
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
+
+# List of logger=LEVEL pairs. (list value)
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN
+
+# Enables or disables publication of error events. (boolean
+# value)
+#publish_errors=false
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+# The format for an instance that is passed with the log
+# message. (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log
+# message. (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
+
+
+#
+# Options defined in ironic.netconf
+#
+
+# IP address of this host. If unset, will determine the IP
+# programmatically. If unable to do so, will use "127.0.0.1".
+# (string value)
+#my_ip=10.0.0.1
+
+
+#
+# Options defined in ironic.api.app
+#
+
+# Authentication strategy used by ironic-api: one of
+# "keystone" or "noauth". "noauth" should not be used in a
+# production environment because all authentication will be
+# disabled. (string value)
+auth_strategy=noauth
+
+# Enable pecan debug mode. WARNING: this is insecure and
+# should not be used in a production environment. (boolean
+# value)
+#pecan_debug=false
+
+
+#
+# Options defined in ironic.common.driver_factory
+#
+
+# Specify the list of drivers to load during service
+# initialization. Missing drivers, or drivers which fail to
+# initialize, will prevent the conductor service from
+# starting. The option default is a recommended set of
+# production-oriented drivers. A complete list of drivers
+# present on your system may be found by enumerating the
+# "ironic.drivers" entrypoint. An example may be found in the
+# developer documentation online. (list value)
+#enabled_drivers=pxe_ipmitool
+
+
+#
+# Options defined in ironic.common.exception
+#
+
+# Used if there is a formatting error when generating an
+# exception message (a programming error). If True, raise an
+# exception; if False, use the unformatted message. (boolean
+# value)
+#fatal_exception_format_errors=false
+
+
+#
+# Options defined in ironic.common.hash_ring
+#
+
+# Exponent to determine number of hash partitions to use when
+# distributing load across conductors. Larger values will
+# result in more even distribution of load and less load when
+# rebalancing the ring, but more memory usage. Number of
+# partitions per conductor is (2^hash_partition_exponent).
+# This determines the granularity of rebalancing: given 10
+# hosts, and an exponent of the 2, there are 40 partitions in
+# the ring.A few thousand partitions should make rebalancing
+# smooth in most cases. The default is suitable for up to a
+# few hundred conductors. Too many partitions has a CPU
+# impact. (integer value)
+#hash_partition_exponent=5
+
+# [Experimental Feature] Number of hosts to map onto each hash
+# partition. Setting this to more than one will cause
+# additional conductor services to prepare deployment
+# environments and potentially allow the Ironic cluster to
+# recover more quickly if a conductor instance is terminated.
+# (integer value)
+#hash_distribution_replicas=1
+
+
+#
+# Options defined in ironic.common.images
+#
+
+# If True, convert backing images to "raw" disk image format.
+# (boolean value)
+#force_raw_images=true
+
+# Path to isolinux binary file. (string value)
+#isolinux_bin=/usr/lib/syslinux/isolinux.bin
+
+# Template file for isolinux configuration file. (string
+# value)
+#isolinux_config_template=$pybasedir/common/isolinux_config.template
+
+# Template file for grub configuration file. (string value)
+#grub_config_template=$pybasedir/common/grub_conf.template
+
+
+#
+# Options defined in ironic.common.paths
+#
+
+# Directory where the ironic python module is installed.
+# (string value)
+#pybasedir=/usr/lib/python/site-packages/ironic/ironic
+
+# Directory where ironic binaries are installed. (string
+# value)
+#bindir=$pybasedir/bin
+
+# Top-level directory for maintaining ironic's state. (string
+# value)
+#state_path=$pybasedir
+
+
+#
+# Options defined in ironic.common.service
+#
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval=60
+
+# Name of this node. This can be an opaque identifier. It is
+# not necessarily a hostname, FQDN, or IP address. However,
+# the node name must be valid within an AMQP key, and if using
+# ZeroMQ, a valid hostname, FQDN, or IP address. (string
+# value)
+#host=ironic
+
+
+#
+# Options defined in ironic.common.utils
+#
+
+# Path to the rootwrap configuration file to use for running
+# commands as root. (string value)
+#rootwrap_config=/etc/ironic/rootwrap.conf
+
+# Explicitly specify the temporary working directory. (string
+# value)
+#tempdir=
+
+
+#
+# Options defined in ironic.drivers.modules.image_cache
+#
+
+# Run image downloads and raw format conversions in parallel.
+# (boolean value)
+#parallel_image_downloads=false
+
+
+#
+# Options defined in ironic.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, ,
+# and :, where 0 results in listening on a random
+# tcp port number; results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and : results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=
+
+
+#
+# Options defined in ironic.openstack.common.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should
+# we run them here? (boolean value)
+#run_external_periodic_tasks=true
+
+
+#
+# Options defined in ironic.openstack.common.versionutils
+#
+
+# Enables or disables fatal status of deprecations. (boolean
+# value)
+#fatal_deprecations=false
+
+
+[agent]
+
+#
+# Options defined in ironic.drivers.modules.agent
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#agent_pxe_append_params=nofb nomodeset vga=normal
+
+# Template file for PXE configuration. (string value)
+#agent_pxe_config_template=$pybasedir/drivers/modules/agent_config.template
+
+# Neutron bootfile DHCP parameter. (string value)
+#agent_pxe_bootfile_name=pxelinux.0
+
+# Priority to run in-band erase devices via the Ironic Python
+# Agent ramdisk. If unset, will use the priority set in the
+# ramdisk (defaults to 10 for the GenericHardwareManager). If
+# set to 0, will not run during cleaning. (integer value)
+#agent_erase_devices_priority=
+
+# Whether Ironic will manage TFTP files for the deploy
+# ramdisks. If set to False, you will need to configure your
+# own TFTP server that allows booting the deploy ramdisks.
+# (boolean value)
+#manage_tftp=true
+
+
+#
+# Options defined in ironic.drivers.modules.agent_base_vendor
+#
+
+# Maximum interval (in seconds) for agent heartbeats. (integer
+# value)
+#heartbeat_timeout=300
+
+
+#
+# Options defined in ironic.drivers.modules.agent_client
+#
+
+# API version to use for communicating with the ramdisk agent.
+# (string value)
+#agent_api_version=v1
+
+
+[amt]
+
+#
+# Options defined in ironic.drivers.modules.amt.common
+#
+
+# Protocol used for AMT endpoint, support http/https (string
+# value)
+#protocol=http
+
+
+#
+# Options defined in ironic.drivers.modules.amt.power
+#
+
+# Maximum number of times to attempt an AMT operation, before
+# failing (integer value)
+#max_attempts=3
+
+# Amount of time (in seconds) to wait, before retrying an AMT
+# operation (integer value)
+#action_wait=10
+
+
+[api]
+
+#
+# Options defined in ironic.api
+#
+
+# The IP address on which ironic-api listens. (string value)
+#host_ip=0.0.0.0
+
+# The TCP port on which ironic-api listens. (integer value)
+#port=6385
+
+# The maximum number of items returned in a single response
+# from a collection resource. (integer value)
+#max_limit=1000
+
+
+[conductor]
+
+#
+# Options defined in ironic.conductor.manager
+#
+
+# URL of Ironic API service. If not set ironic can get the
+# current value from the keystone service catalog. (string
+# value)
+#api_url=
+
+# Seconds between conductor heart beats. (integer value)
+#heartbeat_interval=10
+
+# Maximum time (in seconds) since the last check-in of a
+# conductor. A conductor is considered inactive when this time
+# has been exceeded. (integer value)
+#heartbeat_timeout=60
+
+# Interval between syncing the node power state to the
+# database, in seconds. (integer value)
+#sync_power_state_interval=60
+
+# Interval between checks of provision timeouts, in seconds.
+# (integer value)
+#check_provision_state_interval=60
+
+# Timeout (seconds) to wait for a callback from a deploy
+# ramdisk. Set to 0 to disable timeout. (integer value)
+#deploy_callback_timeout=1800
+
+# During sync_power_state, should the hardware power state be
+# set to the state recorded in the database (True) or should
+# the database be updated based on the hardware state (False).
+# (boolean value)
+#force_power_state_during_sync=true
+
+# During sync_power_state failures, limit the number of times
+# Ironic should try syncing the hardware node power state with
+# the node power state in DB (integer value)
+#power_state_sync_max_retries=3
+
+# Maximum number of worker threads that can be started
+# simultaneously by a periodic task. Should be less than RPC
+# thread pool size. (integer value)
+#periodic_max_workers=8
+
+# The size of the workers greenthread pool. (integer value)
+#workers_pool_size=100
+
+# Number of attempts to grab a node lock. (integer value)
+#node_locked_retry_attempts=3
+
+# Seconds to sleep between node lock attempts. (integer value)
+#node_locked_retry_interval=1
+
+# Enable sending sensor data message via the notification bus
+# (boolean value)
+#send_sensor_data=false
+
+# Seconds between conductor sending sensor data message to
+# ceilometer via the notification bus. (integer value)
+#send_sensor_data_interval=600
+
+# List of comma separated meter types which need to be sent to
+# Ceilometer. The default value, "ALL", is a special value
+# meaning send all the sensor data. (list value)
+#send_sensor_data_types=ALL
+
+# When conductors join or leave the cluster, existing
+# conductors may need to update any persistent local state as
+# nodes are moved around the cluster. This option controls how
+# often, in seconds, each conductor will check for nodes that
+# it should "take over". Set it to a negative value to disable
+# the check entirely. (integer value)
+#sync_local_state_interval=180
+
+# Whether to upload the config drive to Swift. (boolean value)
+#configdrive_use_swift=false
+
+# Name of the Swift container to store config drive data. Used
+# when configdrive_use_swift is True. (string value)
+#configdrive_swift_container=ironic_configdrive_container
+
+# Timeout (seconds) for waiting for node inspection. 0 -
+# unlimited. (integer value)
+#inspect_timeout=1800
+
+# Cleaning is a configurable set of steps, such as erasing
+# disk drives, that are performed on the node to ensure it is
+# in a baseline state and ready to be deployed to. This is
+# done after instance deletion, and during the transition from
+# a "managed" to "available" state. When enabled, the
+# particular steps performed to clean a node depend on which
+# driver that node is managed by; see the individual driver's
+# documentation for details. NOTE: The introduction of the
+# cleaning operation causes instance deletion to take
+# significantly longer. In an environment where all tenants
+# are trusted (eg, because there is only one tenant), this
+# option could be safely disabled. (boolean value)
+#clean_nodes=true
+
+
+[console]
+
+#
+# Options defined in ironic.drivers.modules.console_utils
+#
+
+# Path to serial console terminal program (string value)
+#terminal=shellinaboxd
+
+# Directory containing the terminal SSL cert(PEM) for serial
+# console access (string value)
+#terminal_cert_dir=
+
+# Directory for holding terminal pid files. If not specified,
+# the temporary directory will be used. (string value)
+#terminal_pid_dir=
+
+# Time interval (in seconds) for checking the status of
+# console subprocess. (integer value)
+#subprocess_checking_interval=1
+
+# Time (in seconds) to wait for the console subprocess to
+# start. (integer value)
+#subprocess_timeout=10
+
+
+[database]
+connection = mysql://iotronic:0penstack@localhost/iotronic
+
+#
+# Options defined in oslo.db
+#
+
+# The file name to use with SQLite. (string value)
+#sqlite_db=oslo.sqlite
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous=true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the
+# database. (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection=mysql://iotronic:0penstack@iotctrl/iotronic
+
+# The SQLAlchemy connection string to use to connect to the
+# slave database. (string value)
+#slave_connection=
+
+# The SQL mode to be used for MySQL sessions. This option,
+# including the default, overrides any server-set SQL mode. To
+# use whatever SQL mode is set by the server configuration,
+# set this to no value. Example: mysql_sql_mode= (string
+# value)
+#mysql_sql_mode=TRADITIONAL
+
+# Timeout before idle SQL connections are reaped. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=
+
+# Maximum number of database connection retries during
+# startup. Set to -1 to specify an infinite retry count.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a SQL connection.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
+
+# If set, use this value for max_overflow with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=
+
+# Verbosity of SQL debugging information: 0=None,
+# 100=Everything. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
+
+# Add Python stack traces to SQL as comment strings. (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
+
+# If set, use this value for pool_timeout with SQLAlchemy.
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=
+
+# Enable the experimental use of database reconnect on
+# connection lost. (boolean value)
+#use_db_reconnect=false
+
+# Seconds between retries of a database transaction. (integer
+# value)
+#db_retry_interval=1
+
+# If True, increases the interval between retries of a
+# database operation up to db_max_retry_interval. (boolean
+# value)
+#db_inc_retry_interval=true
+
+# If db_inc_retry_interval is set, the maximum seconds between
+# retries of a database operation. (integer value)
+#db_max_retry_interval=10
+
+# Maximum retries in case of connection error or deadlock
+# error before error is raised. Set to -1 to specify an
+# infinite retry count. (integer value)
+#db_max_retries=20
+
+
+#
+# Options defined in ironic.db.sqlalchemy.models
+#
+
+# MySQL engine to use. (string value)
+#mysql_engine=InnoDB
+
+
+[deploy]
+
+#
+# Options defined in ironic.drivers.modules.deploy_utils
+#
+
+# Size of EFI system partition in MiB when configuring UEFI
+# systems for local boot. (integer value)
+#efi_system_partition_size=200
+
+# Block size to use when writing to the nodes disk. (string
+# value)
+#dd_block_size=1M
+
+# Maximum attempts to verify an iSCSI connection is active,
+# sleeping 1 second between attempts. (integer value)
+#iscsi_verify_attempts=3
+
+
+[dhcp]
+
+#
+# Options defined in ironic.common.dhcp_factory
+#
+
+# DHCP provider to use. "neutron" uses Neutron, and "none"
+# uses a no-op provider. (string value)
+#dhcp_provider=neutron
+
+
+[discoverd]
+
+#
+# Options defined in ironic.drivers.modules.discoverd
+#
+
+# whether to enable inspection using ironic-discoverd (boolean
+# value)
+#enabled=false
+
+# ironic-discoverd HTTP endpoint. If this is not set, the
+# ironic-discoverd client default (http://127.0.0.1:5050) will
+# be used. (string value)
+#service_url=
+
+# period (in seconds) to check status of nodes on inspection
+# (integer value)
+#status_check_period=60
+
+
+[disk_partitioner]
+
+#
+# Options defined in ironic.common.disk_partitioner
+#
+
+# After Ironic has completed creating the partition table, it
+# continues to check for activity on the attached iSCSI device
+# status at this interval prior to copying the image to the
+# node, in seconds (integer value)
+#check_device_interval=1
+
+# The maximum number of times to check that the device is not
+# accessed by another process. If the device is still busy
+# after that, the disk partitioning will be treated as having
+# failed. (integer value)
+#check_device_max_retries=20
+
+
+[drac]
+
+#
+# Options defined in ironic.drivers.modules.drac.client
+#
+
+# In case there is a communication failure, the DRAC client is
+# going to resend the request as many times as defined in this
+# setting. (integer value)
+#client_retry_count=5
+
+# In case there is a communication failure, the DRAC client is
+# going to wait for as many seconds as defined in this setting
+# before resending the request. (integer value)
+#client_retry_delay=5
+
+
+[glance]
+
+#
+# Options defined in ironic.common.glance_service.v2.image_service
+#
+
+# A list of URL schemes that can be downloaded directly via
+# the direct_url. Currently supported schemes: [file]. (list
+# value)
+#allowed_direct_url_schemes=
+
+# The secret token given to Swift to allow temporary URL
+# downloads. Required for temporary URLs. (string value)
+#swift_temp_url_key=
+
+# The length of time in seconds that the temporary URL will be
+# valid for. Defaults to 20 minutes. If some deploys get a 401
+# response code when trying to download from the temporary
+# URL, try raising this duration. (integer value)
+#swift_temp_url_duration=1200
+
+# The "endpoint" (scheme, hostname, optional port) for the
+# Swift URL of the form
+# "endpoint_url/api_version/account/container/object_id". Do
+# not include trailing "/". For example, use
+# "https://swift.example.com". Required for temporary URLs.
+# (string value)
+#swift_endpoint_url=
+
+# The Swift API version to create a temporary URL for.
+# Defaults to "v1". Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_api_version=v1
+
+# The account that Glance uses to communicate with Swift. The
+# format is "AUTH_uuid". "uuid" is the UUID for the account
+# configured in the glance-api.conf. Required for temporary
+# URLs. For example:
+# "AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". Swift temporary
+# URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_account=
+
+# The Swift container Glance is configured to store its images
+# in. Defaults to "glance", which is the default in glance-
+# api.conf. Swift temporary URL format:
+# "endpoint_url/api_version/account/container/object_id"
+# (string value)
+#swift_container=glance
+
+# This should match a config by the same name in the Glance
+# configuration file. When set to 0, a single-tenant store
+# will only use one container to store all images. When set to
+# an integer value between 1 and 32, a single-tenant store
+# will use multiple containers to store images, and this value
+# will determine how many containers are created. (integer
+# value)
+#swift_store_multiple_containers_seed=0
+
+
+#
+# Options defined in ironic.common.image_service
+#
+
+# Default glance hostname or IP address. (string value)
+#glance_host=$my_ip
+
+# Default glance port. (integer value)
+#glance_port=9292
+
+# Default protocol to use when connecting to glance. Set to
+# https for SSL. (string value)
+#glance_protocol=http
+
+# A list of the glance api servers available to ironic. Prefix
+# with https:// for SSL-based glance API servers. Format is
+# [hostname|IP]:port. (list value)
+#glance_api_servers=
+
+# Allow to perform insecure SSL (https) requests to glance.
+# (boolean value)
+#glance_api_insecure=false
+
+# Number of retries when downloading an image from glance.
+# (integer value)
+#glance_num_retries=0
+
+# Authentication strategy to use when connecting to glance.
+# Only "keystone" and "noauth" are currently supported by
+# ironic. (string value)
+#auth_strategy=keystone
+
+
+[ilo]
+
+#
+# Options defined in ironic.drivers.modules.ilo.common
+#
+
+# Timeout (in seconds) for iLO operations (integer value)
+#client_timeout=60
+
+# Port to be used for iLO operations (integer value)
+#client_port=443
+
+# The Swift iLO container to store data. (string value)
+#swift_ilo_container=ironic_ilo_container
+
+# Amount of time in seconds for Swift objects to auto-expire.
+# (integer value)
+#swift_object_expiry_timeout=900
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.deploy
+#
+
+# Priority for erase devices clean step. If unset, it defaults
+# to 10. If set to 0, the step will be disabled and will not
+# run during cleaning. (integer value)
+#clean_priority_erase_devices=
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.management
+#
+
+# Priority for reset_ilo clean step. (integer value)
+#clean_priority_reset_ilo=1
+
+# Priority for reset_bios_to_default clean step. (integer
+# value)
+#clean_priority_reset_bios_to_default=10
+
+# Priority for reset_secure_boot_keys clean step. This step
+# will reset the secure boot keys to manufacturing defaults.
+# (integer value)
+#clean_priority_reset_secure_boot_keys_to_default=20
+
+# Priority for clear_secure_boot_keys clean step. This step is
+# not enabled by default. It can be enabled to to clear all
+# secure boot keys enrolled with iLO. (integer value)
+#clean_priority_clear_secure_boot_keys=0
+
+# Priority for reset_ilo_credential clean step. This step
+# requires "ilo_change_password" parameter to be updated in
+# nodes's driver_info with the new password. (integer value)
+#clean_priority_reset_ilo_credential=30
+
+
+#
+# Options defined in ironic.drivers.modules.ilo.power
+#
+
+# Number of times a power operation needs to be retried
+# (integer value)
+#power_retry=6
+
+# Amount of time in seconds to wait in between power
+# operations (integer value)
+#power_wait=2
+
+
+[ipmi]
+
+#
+# Options defined in ironic.drivers.modules.ipminative
+#
+
+# Maximum time in seconds to retry IPMI operations. There is a
+# tradeoff when setting this value. Setting this too low may
+# cause older BMCs to crash and require a hard reset. However,
+# setting too high can cause the sync power state periodic
+# task to hang when there are slow or unresponsive BMCs.
+# (integer value)
+#retry_timeout=60
+
+# Minimum time, in seconds, between IPMI operations sent to a
+# server. There is a risk with some hardware that setting this
+# too low may cause the BMC to crash. Recommended setting is 5
+# seconds. (integer value)
+#min_command_interval=5
+
+
+[irmc]
+
+#
+# Options defined in ironic.drivers.modules.irmc.common
+#
+
+# Port to be used for iRMC operations, either 80 or 443
+# (integer value)
+#port=443
+
+# Authentication method to be used for iRMC operations, either
+# "basic" or "digest" (string value)
+#auth_method=basic
+
+# Timeout (in seconds) for iRMC operations (integer value)
+#client_timeout=60
+
+# Sensor data retrieval method, either "ipmitool" or "scci"
+# (string value)
+#sensor_method=ipmitool
+
+
+[keystone]
+
+#
+# Options defined in ironic.common.keystone
+#
+
+# The region used for getting endpoints of OpenStackservices.
+# (string value)
+#region_name=
+
+[keystone_authtoken]
+auth_uri = http://iotctrl:5000/v2.0
+identity_uri = http://iotctrl:35357
+admin_tenant_name = service
+admin_user = glance
+admin_password = 0penstack
+
+#
+# Options defined in keystonemiddleware.auth_token
+#
+
+# Complete public Identity API endpoint. (string value)
+#auth_uri=
+
+# API version of the admin Identity API endpoint. (string
+# value)
+#auth_version=
+
+# Do not handle authorization requests within the middleware,
+# but delegate the authorization decision to downstream WSGI
+# components. (boolean value)
+#delay_auth_decision=false
+
+# Request timeout value for communicating with Identity API
+# server. (integer value)
+#http_connect_timeout=
+
+# How many times are we trying to reconnect when communicating
+# with Identity API Server. (integer value)
+#http_request_max_retries=3
+
+# Env key for the swift cache. (string value)
+#cache=
+
+# Required if identity server requires client certificate
+# (string value)
+#certfile=
+
+# Required if identity server requires client certificate
+# (string value)
+#keyfile=
+
+# A PEM encoded Certificate Authority to use when verifying
+# HTTPs connections. Defaults to system CAs. (string value)
+#cafile=
+
+# Verify HTTPS connections. (boolean value)
+#insecure=false
+
+# Directory used to cache files related to PKI tokens. (string
+# value)
+#signing_dir=
+
+# Optionally specify a list of memcached server(s) to use for
+# caching. If left undefined, tokens will instead be cached
+# in-process. (list value)
+# Deprecated group/name - [DEFAULT]/memcache_servers
+#memcached_servers=
+
+# In order to prevent excessive effort spent validating
+# tokens, the middleware caches previously-seen tokens for a
+# configurable duration (in seconds). Set to -1 to disable
+# caching completely. (integer value)
+#token_cache_time=300
+
+# Determines the frequency at which the list of revoked tokens
+# is retrieved from the Identity service (in seconds). A high
+# number of revocation events combined with a low cache
+# duration may significantly reduce performance. (integer
+# value)
+#revocation_cache_time=10
+
+# (Optional) If defined, indicate whether token data should be
+# authenticated or authenticated and encrypted. Acceptable
+# values are MAC or ENCRYPT. If MAC, token data is
+# authenticated (with HMAC) in the cache. If ENCRYPT, token
+# data is encrypted and authenticated in the cache. If the
+# value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+#memcache_security_strategy=
+
+# (Optional, mandatory if memcache_security_strategy is
+# defined) This string is used for key derivation. (string
+# value)
+#memcache_secret_key=
+
+# (Optional) Number of seconds memcached server is considered
+# dead before it is tried again. (integer value)
+#memcache_pool_dead_retry=300
+
+# (Optional) Maximum total number of open connections to every
+# memcached server. (integer value)
+#memcache_pool_maxsize=10
+
+# (Optional) Socket timeout in seconds for communicating with
+# a memcached server. (integer value)
+#memcache_pool_socket_timeout=3
+
+# (Optional) Number of seconds a connection to memcached is
+# held unused in the pool before it is closed. (integer value)
+#memcache_pool_unused_timeout=60
+
+# (Optional) Number of seconds that an operation will wait to
+# get a memcached client connection from the pool. (integer
+# value)
+#memcache_pool_conn_get_timeout=10
+
+# (Optional) Use the advanced (eventlet safe) memcached client
+# pool. The advanced pool will only work under python 2.x.
+# (boolean value)
+#memcache_use_advanced_pool=false
+
+# (Optional) Indicate whether to set the X-Service-Catalog
+# header. If False, middleware will not ask for service
+# catalog on token validation and will not set the X-Service-
+# Catalog header. (boolean value)
+#include_service_catalog=true
+
+# Used to control the use and type of token binding. Can be
+# set to: "disabled" to not check token binding. "permissive"
+# (default) to validate binding information if the bind type
+# is of a form known to the server and ignore it if not.
+# "strict" like "permissive" but if the bind type is unknown
+# the token will be rejected. "required" any form of token
+# binding is needed to be allowed. Finally the name of a
+# binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind=permissive
+
+# If true, the revocation list will be checked for cached
+# tokens. This requires that PKI tokens are configured on the
+# identity server. (boolean value)
+#check_revocations_for_cached=false
+
+# Hash algorithms to use for hashing PKI tokens. This may be a
+# single algorithm or multiple. The algorithms are those
+# supported by Python standard hashlib.new(). The hashes will
+# be tried in the order given, so put the preferred one first
+# for performance. The result of the first hash will be stored
+# in the cache. This will typically be set to multiple values
+# only while migrating from a less secure algorithm to a more
+# secure one. Once all the old tokens are expired this option
+# should be set to a single value for better performance.
+# (list value)
+#hash_algorithms=md5
+
+# Prefix to prepend at the beginning of the path. Deprecated,
+# use identity_uri. (string value)
+#auth_admin_prefix=
+
+# Host providing the admin Identity API endpoint. Deprecated,
+# use identity_uri. (string value)
+#auth_host=127.0.0.1
+
+# Port of the admin Identity API endpoint. Deprecated, use
+# identity_uri. (integer value)
+#auth_port=35357
+
+# Protocol of the admin Identity API endpoint (http or https).
+# Deprecated, use identity_uri. (string value)
+#auth_protocol=https
+
+# Complete admin Identity API endpoint. This should specify
+# the unversioned root endpoint e.g. https://localhost:35357/
+# (string value)
+#identity_uri=
+
+# This option is deprecated and may be removed in a future
+# release. Single shared secret with the Keystone
+# configuration used for bootstrapping a Keystone
+# installation, or otherwise bypassing the normal
+# authentication process. This option should not be used, use
+# `admin_user` and `admin_password` instead. (string value)
+#admin_token=
+
+# Service username. (string value)
+#admin_user=
+
+# Service user password. (string value)
+#admin_password=
+
+# Service tenant name. (string value)
+#admin_tenant_name=admin
+
+
+[matchmaker_redis]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Host to locate redis. (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server (optional). (string value)
+#password=
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON). (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
+
+
+[neutron]
+
+#
+# Options defined in ironic.dhcp.neutron
+#
+
+# URL for connecting to neutron. (string value)
+#url=http://$my_ip:9696
+
+# Timeout value for connecting to neutron in seconds. (integer
+# value)
+#url_timeout=30
+
+# Client retries in the case of a failed request. (integer
+# value)
+#retries=3
+
+# Default authentication strategy to use when connecting to
+# neutron. Can be either "keystone" or "noauth". Running
+# neutron in noauth mode (related to but not affected by this
+# setting) is insecure and should only be used for testing.
+# (string value)
+#auth_strategy=keystone
+
+# UUID of the network to create Neutron ports on when booting
+# to a ramdisk for cleaning/zapping using Neutron DHCP (string
+# value)
+#cleaning_network_uuid=
+
+
+[oslo_concurrency]
+
+#
+# Options defined in oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. For security, the
+# specified directory should only be writable by the user
+# running the processes that need locking. Defaults to
+# environment variable OSLO_LOCK_PATH. If external locks are
+# used, a lock path must be set. (string value)
+#lock_path=
+
+
+[oslo_messaging_amqp]
+
+#
+# Options defined in oslo.messaging
+#
+
+# address prefix used when sending to a specific server
+# (string value)
+#server_request_prefix=exclusive
+
+# address prefix used when broadcasting to all servers (string
+# value)
+#broadcast_prefix=broadcast
+
+# address prefix when sending to any server in group (string
+# value)
+#group_request_prefix=unicast
+
+# Name for the AMQP container (string value)
+#container_name=
+
+# Timeout for inactive connections (in seconds) (integer
+# value)
+#idle_timeout=0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace=false
+
+# CA certificate PEM file to verify server certificate (string
+# value)
+#ssl_ca_file=
+
+# Identifying certificate PEM file to present to clients
+# (string value)
+#ssl_cert_file=
+
+# Private key PEM file used to sign cert_file certificate
+# (string value)
+#ssl_key_file=
+
+# Password for decrypting ssl_key_file (if encrypted) (string
+# value)
+#ssl_key_password=
+
+# Accept clients using either SSL or plain TCP (boolean value)
+#allow_insecure_clients=false
+
+
+[oslo_messaging_qpid]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# Qpid broker hostname. (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port. (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs. (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection. (string value)
+#qpid_username=
+
+# Password for Qpid connection. (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth.
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats. (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl'. (string value)
+#qpid_protocol=tcp
+
+# Whether to disable the Nagle algorithm. (boolean value)
+#qpid_tcp_nodelay=true
+
+# The number of prefetched messages held by receiver. (integer
+# value)
+#qpid_receiver_capacity=1
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+
+[oslo_messaging_rabbit]
+backend = rabbit
+rabbit_host = iotctrl
+rabbit_password = 0penstack
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size=30
+
+# SSL version to use (valid only if SSL enabled). Valid values
+# are TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may
+# be available on some distributions. (string value)
+#kombu_ssl_version=
+
+# SSL key file (valid only if SSL enabled). (string value)
+#kombu_ssl_keyfile=
+
+# SSL cert file (valid only if SSL enabled). (string value)
+#kombu_ssl_certfile=
+
+# SSL certification authority file (valid only if SSL
+# enabled). (string value)
+#kombu_ssl_ca_certs=
+
+# How long to wait before reconnecting in response to an AMQP
+# consumer cancel notification. (floating point value)
+#kombu_reconnect_delay=1.0
+
+# The RabbitMQ broker address where a single node is used.
+# (string value)
+#rabbit_host=localhost
+
+# The RabbitMQ broker port where a single node is used.
+# (integer value)
+#rabbit_port=5672
+
+# RabbitMQ HA cluster host:port pairs. (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
+
+# Connect over SSL for RabbitMQ. (boolean value)
+#rabbit_use_ssl=false
+
+# The RabbitMQ userid. (string value)
+#rabbit_userid=guest
+
+# The RabbitMQ password. (string value)
+#rabbit_password=guest
+
+# The RabbitMQ login method. (string value)
+#rabbit_login_method=AMQPLAIN
+
+# The RabbitMQ virtual host. (string value)
+#rabbit_virtual_host=/
+
+# How frequently to retry connecting with RabbitMQ. (integer
+# value)
+#rabbit_retry_interval=1
+
+# How long to backoff for between retries when connecting to
+# RabbitMQ. (integer value)
+#rabbit_retry_backoff=2
+
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+#rabbit_max_retries=0
+
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
+
+# Number of seconds after which the Rabbit broker is
+# considered down if heartbeat's keep-alive fails (0 disable
+# the heartbeat). (integer value)
+#heartbeat_timeout_threshold=60
+
+# How often times during the heartbeat_timeout_threshold we
+# check the heartbeat. (integer value)
+#heartbeat_rate=2
+
+# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake
+# (boolean value)
+#fake_rabbit=false
+
+
+[oslo_policy]
+
+#
+# Options defined in oslo.policy
+#
+
+# The JSON file that defines policies. (string value)
+#policy_file=policy.json
+
+# Default rule. Enforced when a requested rule is not found.
+# (string value)
+#policy_default_rule=default
+
+# Directories where policy configuration files are stored.
+# They can be relative to any directory in the search path
+# defined by the config_dir option, or absolute paths. The
+# file defined by policy_file must exist for these directories
+# to be searched. Missing or empty directories are ignored.
+# (multi valued)
+#policy_dirs=policy.d
+
+
+[pxe]
+
+#
+# Options defined in ironic.drivers.modules.iscsi_deploy
+#
+
+# Additional append parameters for baremetal PXE boot. (string
+# value)
+#pxe_append_params=nofb nomodeset vga=normal
+
+# Default file system format for ephemeral partition, if one
+# is created. (string value)
+#default_ephemeral_format=ext4
+
+# On the ironic-conductor node, directory where images are
+# stored on disk. (string value)
+#images_path=/var/lib/ironic/images/
+
+# On the ironic-conductor node, directory where master
+# instance images are stored on disk. (string value)
+#instance_master_path=/var/lib/ironic/master_images
+
+# Maximum size (in MiB) of cache for master images, including
+# those in use. (integer value)
+#image_cache_size=20480
+
+# Maximum TTL (in minutes) for old master images in cache.
+# (integer value)
+#image_cache_ttl=10080
+
+# The disk devices to scan while doing the deploy. (string
+# value)
+#disk_devices=cciss/c0d0,sda,hda,vda
+
+
+#
+# Options defined in ironic.drivers.modules.pxe
+#
+
+# On ironic-conductor node, template file for PXE
+# configuration. (string value)
+#pxe_config_template=$pybasedir/drivers/modules/pxe_config.template
+
+# On ironic-conductor node, template file for PXE
+# configuration for UEFI boot loader. (string value)
+#uefi_pxe_config_template=$pybasedir/drivers/modules/elilo_efi_pxe_config.template
+
+# IP address of ironic-conductor node's TFTP server. (string
+# value)
+#tftp_server=$my_ip
+
+# ironic-conductor node's TFTP root path. (string value)
+#tftp_root=/tftpboot
+
+# On ironic-conductor node, directory where master TFTP images
+# are stored on disk. (string value)
+#tftp_master_path=/tftpboot/master_images
+
+# Bootfile DHCP parameter. (string value)
+#pxe_bootfile_name=pxelinux.0
+
+# Bootfile DHCP parameter for UEFI boot mode. (string value)
+#uefi_pxe_bootfile_name=elilo.efi
+
+# ironic-conductor node's HTTP server URL. Example:
+# http://192.1.2.3:8080 (string value)
+#http_url=
+
+# ironic-conductor node's HTTP root path. (string value)
+#http_root=/httpboot
+
+# Enable iPXE boot. (boolean value)
+#ipxe_enabled=false
+
+# On ironic-conductor node, the path to the main iPXE script
+# file. (string value)
+#ipxe_boot_script=$pybasedir/drivers/modules/boot.ipxe
+
+
+[seamicro]
+
+#
+# Options defined in ironic.drivers.modules.seamicro
+#
+
+# Maximum retries for SeaMicro operations (integer value)
+#max_retry=3
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#action_timeout=10
+
+
+[snmp]
+
+#
+# Options defined in ironic.drivers.modules.snmp
+#
+
+# Seconds to wait for power action to be completed (integer
+# value)
+#power_timeout=10
+
+
+[ssh]
+
+#
+# Options defined in ironic.drivers.modules.ssh
+#
+
+# libvirt URI (string value)
+#libvirt_uri=qemu:///system
+
+
+[swift]
+
+#
+# Options defined in ironic.common.swift
+#
+
+# Maximum number of times to retry a Swift request, before
+# failing. (integer value)
+#swift_max_retries=2
+
+
+[virtualbox]
+
+#
+# Options defined in ironic.drivers.modules.virtualbox
+#
+
+# Port on which VirtualBox web service is listening. (integer
+# value)
+#port=18083
+
+
diff --git a/etc/iotronic/policy.json b/etc/iotronic/policy.json
new file mode 100644
index 0000000..f772677
--- /dev/null
+++ b/etc/iotronic/policy.json
@@ -0,0 +1,5 @@
+{
+ "admin_api": "role:admin or role:administrator",
+ "show_password": "!",
+ "default": "rule:admin_api"
+}
diff --git a/infopackages b/infopackages
new file mode 100644
index 0000000..4ef3988
--- /dev/null
+++ b/infopackages
@@ -0,0 +1,17 @@
+yum install mariadb mariadb-server MySQL-python
+yum install rabbitmq-server
+yum install httpd mod_wsgi memcached python-memcached
+yum install gcc python-devel pip
+pip install eventlet
+yum install python-oslo-config
+pip install pecan
+pip install keystonemiddleware
+yum install python-oslo-log
+yum install python-oslo-concurrency
+pip install paramiko
+yum install python-oslo-policy
+yum install python-wsme
+yum install python-oslo-policy
+yum install python-oslo-messaging
+yum install python-oslo-db
+pip install jsonpatch
diff --git a/iotronic/__init__.py b/iotronic/__init__.py
new file mode 100644
index 0000000..c733b15
--- /dev/null
+++ b/iotronic/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+import eventlet
+
+eventlet.monkey_patch(os=False)
\ No newline at end of file
diff --git a/iotronic/api/__init__.py b/iotronic/api/__init__.py
new file mode 100644
index 0000000..ad995d9
--- /dev/null
+++ b/iotronic/api/__init__.py
@@ -0,0 +1,38 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+
+API_SERVICE_OPTS = [
+ cfg.StrOpt('host_ip',
+ default='0.0.0.0',
+ help='The IP address on which iotronic-api listens.'),
+ cfg.IntOpt('port',
+ default=1288,
+ help='The TCP port on which iotronic-api listens.'),
+ cfg.IntOpt('max_limit',
+ default=1000,
+ help='The maximum number of items returned in a single '
+ 'response from a collection resource.'),
+ ]
+
+
+CONF = cfg.CONF
+
+opt_group = cfg.OptGroup(name='api',
+ title='Options for the iotronic-api service')
+CONF.register_group(opt_group)
+CONF.register_opts(API_SERVICE_OPTS, opt_group)
\ No newline at end of file
diff --git a/iotronic/api/acl.py b/iotronic/api/acl.py
new file mode 100644
index 0000000..3d8c841
--- /dev/null
+++ b/iotronic/api/acl.py
@@ -0,0 +1,34 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Access Control Lists (ACL's) control access the API server."""
+
+from iotronic.api.middleware import auth_token
+
+
+def install(app, conf, public_routes):
+ """Install ACL check on application.
+
+ :param app: A WSGI applicatin.
+ :param conf: Settings. Dict'ified and passed to keystonemiddleware
+ :param public_routes: The list of the routes which will be allowed to
+ access without authentication.
+ :return: The same WSGI application with ACL installed.
+
+ """
+ return auth_token.AuthTokenMiddleware(app,
+ conf=dict(conf),
+ public_api_routes=public_routes)
diff --git a/iotronic/api/app.py b/iotronic/api/app.py
new file mode 100644
index 0000000..929910c
--- /dev/null
+++ b/iotronic/api/app.py
@@ -0,0 +1,88 @@
+# -*- encoding: utf-8 -*-
+
+# Copyright © 2012 New Dream Network, LLC (DreamHost)
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+import pecan
+
+from iotronic.api import acl
+from iotronic.api import config
+from iotronic.api import hooks
+from iotronic.api import middleware
+
+
+api_opts = [
+ cfg.StrOpt('auth_strategy',
+ default='keystone',
+ help='Authentication strategy used by iotronic-api: one of "keystone" '
+ 'or "noauth". "noauth" should not be used in a production '
+ 'environment because all authentication will be disabled.'),
+ cfg.BoolOpt('pecan_debug',
+ default=False,
+ help=('Enable pecan debug mode. WARNING: this is insecure '
+ 'and should not be used in a production environment.')),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(api_opts)
+
+
+def get_pecan_config():
+ # Set up the pecan configuration
+ filename = config.__file__.replace('.pyc', '.py')
+ return pecan.configuration.conf_from_file(filename)
+
+
+def setup_app(pecan_config=None, extra_hooks=None):
+ app_hooks = [hooks.ConfigHook(),
+ hooks.DBHook(),
+ hooks.ContextHook(pecan_config.app.acl_public_routes),
+ hooks.RPCHook(),
+ hooks.NoExceptionTracebackHook()]
+ if extra_hooks:
+ app_hooks.extend(extra_hooks)
+
+ if not pecan_config:
+ pecan_config = get_pecan_config()
+
+ if pecan_config.app.enable_acl:
+ app_hooks.append(hooks.TrustedCallHook())
+
+ pecan.configuration.set_config(dict(pecan_config), overwrite=True)
+
+ app = pecan.make_app(
+ pecan_config.app.root,
+ static_root=pecan_config.app.static_root,
+ debug=CONF.pecan_debug,
+ force_canonical=getattr(pecan_config.app, 'force_canonical', True),
+ hooks=app_hooks,
+ wrap_app=middleware.ParsableErrorMiddleware,
+ )
+
+ if pecan_config.app.enable_acl:
+ return acl.install(app, cfg.CONF, pecan_config.app.acl_public_routes)
+
+ return app
+
+
+class VersionSelectorApplication(object):
+ def __init__(self):
+ pc = get_pecan_config()
+ pc.app.enable_acl = (CONF.auth_strategy == 'keystone')
+ self.v1 = setup_app(pecan_config=pc)
+
+ def __call__(self, environ, start_response):
+ return self.v1(environ, start_response)
diff --git a/iotronic/api/config.py b/iotronic/api/config.py
new file mode 100644
index 0000000..5d46e43
--- /dev/null
+++ b/iotronic/api/config.py
@@ -0,0 +1,43 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Server Specific Configurations
+# See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa
+server = {
+ 'port': '1288',
+ 'host': '0.0.0.0'
+}
+
+# Pecan Application Configurations
+# See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa
+app = {
+ 'root': 'iotronic.api.controllers.root.RootController',
+ 'modules': ['iotronic.api'],
+ 'static_root': '%(confdir)s/public',
+ 'debug': True,
+ 'enable_acl': True,
+ 'acl_public_routes': [
+ '/',
+ '/v1',
+ #'/v1/drivers/[a-z_]*/vendor_passthru/lookup',
+ '/v1/nodes/[a-z0-9\-]+/vendor_passthru/heartbeat',
+ '/v1/boards/[a-z0-9\-]',
+ ],
+}
+
+# WSME Configurations
+# See https://wsme.readthedocs.org/en/latest/integrate.html#configuration
+wsme = {
+ 'debug': False,
+}
diff --git a/iotronic/api/controllers/__init__.py b/iotronic/api/controllers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/api/controllers/base.py b/iotronic/api/controllers/base.py
new file mode 100644
index 0000000..7d6d79b
--- /dev/null
+++ b/iotronic/api/controllers/base.py
@@ -0,0 +1,114 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from webob import exc
+import wsme
+from wsme import types as wtypes
+
+from iotronic.common.i18n import _
+
+
+class APIBase(wtypes.Base):
+
+ created_at = wsme.wsattr(datetime.datetime, readonly=True)
+ """The time in UTC at which the object is created"""
+
+ updated_at = wsme.wsattr(datetime.datetime, readonly=True)
+ """The time in UTC at which the object is updated"""
+
+ def as_dict(self):
+ """Render this object as a dict of its fields."""
+ return dict((k, getattr(self, k))
+ for k in self.fields
+ if hasattr(self, k) and
+ getattr(self, k) != wsme.Unset)
+
+ def unset_fields_except(self, except_list=None):
+ """Unset fields so they don't appear in the message body.
+
+ :param except_list: A list of fields that won't be touched.
+
+ """
+ if except_list is None:
+ except_list = []
+
+ for k in self.as_dict():
+ if k not in except_list:
+ setattr(self, k, wsme.Unset)
+
+
+class Version(object):
+ """API Version object."""
+
+ string = 'X-OpenStack-Iotronic-API-Version'
+ """HTTP Header string carrying the requested version"""
+
+ min_string = 'X-OpenStack-Iotronic-API-Minimum-Version'
+ """HTTP response header"""
+
+ max_string = 'X-OpenStack-Iotronic-API-Maximum-Version'
+ """HTTP response header"""
+
+ def __init__(self, headers, default_version, latest_version):
+ """Create an API Version object from the supplied headers.
+
+ :param headers: webob headers
+ :param default_version: version to use if not specified in headers
+ :param latest_version: version to use if latest is requested
+ :raises: webob.HTTPNotAcceptable
+ """
+ (self.major, self.minor) = Version.parse_headers(headers,
+ default_version, latest_version)
+
+ def __repr__(self):
+ return '%s.%s' % (self.major, self.minor)
+
+ @staticmethod
+ def parse_headers(headers, default_version, latest_version):
+ """Determine the API version requested based on the headers supplied.
+
+ :param headers: webob headers
+ :param default_version: version to use if not specified in headers
+ :param latest_version: version to use if latest is requested
+ :returns: a tupe of (major, minor) version numbers
+ :raises: webob.HTTPNotAcceptable
+ """
+ version_str = headers.get(Version.string, default_version)
+
+ if version_str.lower() == 'latest':
+ parse_str = latest_version
+ else:
+ parse_str = version_str
+
+ try:
+ version = tuple(int(i) for i in parse_str.split('.'))
+ except ValueError:
+ version = ()
+
+ if len(version) != 2:
+ raise exc.HTTPNotAcceptable(_(
+ "Invalid value for %s header") % Version.string)
+ return version
+
+ def __lt__(a, b):
+ if (a.major == b.major and a.minor < b.minor):
+ return True
+ return False
+
+ def __gt__(a, b):
+ if (a.major == b.major and a.minor > b.minor):
+ return True
+ return False
diff --git a/iotronic/api/controllers/link.py b/iotronic/api/controllers/link.py
new file mode 100644
index 0000000..9059c9c
--- /dev/null
+++ b/iotronic/api/controllers/link.py
@@ -0,0 +1,58 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+
+
+def build_url(resource, resource_args, bookmark=False, base_url=None):
+ if base_url is None:
+ base_url = pecan.request.host_url
+
+ template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s'
+ # FIXME(lucasagomes): I'm getting a 404 when doing a GET on
+ # a nested resource that the URL ends with a '/'.
+ # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs
+ template += '%(args)s' if resource_args.startswith('?') else '/%(args)s'
+ return template % {'url': base_url, 'res': resource, 'args': resource_args}
+
+
+class Link(base.APIBase):
+ """A link representation."""
+
+ href = wtypes.text
+ """The url of a link."""
+
+ rel = wtypes.text
+ """The name of a link."""
+
+ type = wtypes.text
+ """Indicates the type of document/link."""
+
+ @staticmethod
+ def make_link(rel_name, url, resource, resource_args,
+ bookmark=False, type=wtypes.Unset):
+ href = build_url(resource, resource_args,
+ bookmark=bookmark, base_url=url)
+ return Link(href=href, rel=rel_name, type=type)
+
+ @classmethod
+ def sample(cls):
+ sample = cls(href="http://localhost:6385/chassis/"
+ "eaaca217-e7d8-47b4-bb41-3f99f20eed89",
+ rel="bookmark")
+ return sample
diff --git a/iotronic/api/controllers/root.py b/iotronic/api/controllers/root.py
new file mode 100644
index 0000000..768e035
--- /dev/null
+++ b/iotronic/api/controllers/root.py
@@ -0,0 +1,97 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from pecan import rest
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+from iotronic.api.controllers import v1
+from iotronic.api import expose
+
+
+class Version(base.APIBase):
+ """An API version representation."""
+
+ id = wtypes.text
+ """The ID of the version, also acts as the release number"""
+
+ links = [link.Link]
+ """A Link that point to a specific version of the API"""
+
+ @staticmethod
+ def convert(id):
+ version = Version()
+ version.id = id
+ version.links = [link.Link.make_link('self', pecan.request.host_url,
+ id, '', bookmark=True)]
+ return version
+
+
+class Root(base.APIBase):
+
+ name = wtypes.text
+ """The name of the API"""
+
+ description = wtypes.text
+ """Some information about this API"""
+
+ versions = [Version]
+ """Links to all the versions available in this API"""
+
+ default_version = Version
+ """A link to the default version of the API"""
+
+ @staticmethod
+ def convert():
+ root = Root()
+ root.name = "OpenStack Iotronic API"
+ root.description = ("Iotronic is an OpenStack project which aims to "
+ "provision baremetal machines.")
+ root.versions = [Version.convert('v1')]
+ root.default_version = Version.convert('v1')
+ return root
+
+
+class RootController(rest.RestController):
+
+ _versions = ['v1']
+ """All supported API versions"""
+
+ _default_version = 'v1'
+ """The default API version"""
+
+ v1 = v1.Controller()
+
+ @expose.expose(Root)
+ def get(self):
+ # NOTE: The reason why convert() it's being called for every
+ # request is because we need to get the host url from
+ # the request object to make the links.
+ return Root.convert()
+
+ @pecan.expose()
+ def _route(self, args):
+ """Overrides the default routing behavior.
+
+ It redirects the request to the default version of the iotronic API
+ if the version number is not specified in the url.
+ """
+
+ if args[0] and args[0] not in self._versions:
+ args = [self._default_version] + args
+ return super(RootController, self)._route(args)
diff --git a/iotronic/api/controllers/v1/__init__.py b/iotronic/api/controllers/v1/__init__.py
new file mode 100644
index 0000000..dced5d2
--- /dev/null
+++ b/iotronic/api/controllers/v1/__init__.py
@@ -0,0 +1,208 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Version 1 of the Iotronic API
+"""
+
+import pecan
+from pecan import rest
+from webob import exc
+from wsme import types as wtypes
+from iotronic.api.controllers import link
+from iotronic.api.controllers.v1 import board
+
+
+'''
+
+
+#from iotronic.api.controllers.v1 import chassis
+#from iotronic.api.controllers.v1 import driver
+from iotronic.api.controllers.v1 import node
+
+#from iotronic.api.controllers.v1 import port
+from iotronic.api.controllers.v1 import board
+'''
+
+from iotronic.api.controllers import base
+from iotronic.api import expose
+from iotronic.common.i18n import _
+
+BASE_VERSION = 1
+
+MIN_VER_STR = '1.0'
+
+MAX_VER_STR = '1.0'
+
+
+MIN_VER = base.Version({base.Version.string: MIN_VER_STR},
+ MIN_VER_STR, MAX_VER_STR)
+MAX_VER = base.Version({base.Version.string: MAX_VER_STR},
+ MIN_VER_STR, MAX_VER_STR)
+
+'''
+class MediaType(base.APIBase):
+ """A media type representation."""
+
+ base = wtypes.text
+ type = wtypes.text
+
+ def __init__(self, base, type):
+ self.base = base
+ self.type = type
+'''
+
+
+class V1(base.APIBase):
+ """The representation of the version 1 of the API."""
+
+ id = wtypes.text
+ """The ID of the version, also acts as the release number"""
+
+ #media_types = [MediaType]
+ """An array of supported media types for this version"""
+
+ #links = [link.Link]
+ """Links that point to a specific URL for this version and documentation"""
+
+ #chassis = [link.Link]
+ """Links to the chassis resource"""
+
+ #nodes = [link.Link]
+ """Links to the nodes resource"""
+
+ boards = [link.Link]
+ """Links to the nodes resource"""
+
+ #ports = [link.Link]
+ """Links to the ports resource"""
+
+ #drivers = [link.Link]
+ """Links to the drivers resource"""
+
+ @staticmethod
+ def convert():
+ v1 = V1()
+ v1.id = "v1"
+
+ v1.boards = [link.Link.make_link('self', pecan.request.host_url,
+ 'nodes', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'nodes', '',
+ bookmark=True)
+ ]
+
+ '''
+ v1.links = [link.Link.make_link('self', pecan.request.host_url,
+ 'v1', '', bookmark=True),
+ link.Link.make_link('describedby',
+ 'http://docs.openstack.org',
+ 'developer/iotronic/dev',
+ 'api-spec-v1.html',
+ bookmark=True, type='text/html')
+ ]
+
+ v1.media_types = [MediaType('application/json',
+ 'application/vnd.openstack.iotronic.v1+json')]
+
+ v1.chassis = [link.Link.make_link('self', pecan.request.host_url,
+ 'chassis', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'chassis', '',
+ bookmark=True)
+ ]
+
+ v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
+ 'nodes', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'nodes', '',
+ bookmark=True)
+ ]
+ '''
+ '''
+ v1.ports = [link.Link.make_link('self', pecan.request.host_url,
+ 'ports', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'ports', '',
+ bookmark=True)
+ ]
+ v1.drivers = [link.Link.make_link('self', pecan.request.host_url,
+ 'drivers', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'drivers', '',
+ bookmark=True)
+ ]
+ '''
+ return v1
+
+
+class Controller(rest.RestController):
+ """Version 1 API controller root."""
+
+ boards = board.BoardsController()
+ #nodes = node.NodesController()
+ #ports = port.PortsController()
+ #chassis = chassis.ChassisController()
+ #drivers = driver.DriversController()
+
+
+ #boards= board.BoardsController()
+
+ @expose.expose(V1)
+ def get(self):
+ # NOTE: The reason why convert() it's being called for every
+ # request is because we need to get the host url from
+ # the request object to make the links.
+ return V1.convert()
+
+ def _check_version(self, version, headers=None):
+ if headers is None:
+ headers = {}
+ # ensure that major version in the URL matches the header
+ if version.major != BASE_VERSION:
+ raise exc.HTTPNotAcceptable(_(
+ "Mutually exclusive versions requested. Version %(ver)s "
+ "requested but not supported by this service. The supported "
+ "version range is: [%(min)s, %(max)s].") % {'ver': version,
+ 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers)
+ # ensure the minor version is within the supported range
+ if version < MIN_VER or version > MAX_VER:
+ raise exc.HTTPNotAcceptable(_(
+ "Version %(ver)s was requested but the minor version is not "
+ "supported by this service. The supported version range is: "
+ "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR,
+ 'max': MAX_VER_STR}, headers=headers)
+
+ @pecan.expose()
+ def _route(self, args):
+ v = base.Version(pecan.request.headers, MIN_VER_STR, MAX_VER_STR)
+
+ # Always set the min and max headers
+ pecan.response.headers[base.Version.min_string] = MIN_VER_STR
+ pecan.response.headers[base.Version.max_string] = MAX_VER_STR
+
+ # assert that requested version is supported
+ self._check_version(v, pecan.response.headers)
+ pecan.response.headers[base.Version.string] = str(v)
+ pecan.request.version = v
+
+ return super(Controller, self)._route(args)
+
+
+__all__ = (Controller)
diff --git a/iotronic/api/controllers/v1/__old/__init__.py b/iotronic/api/controllers/v1/__old/__init__.py
new file mode 100644
index 0000000..69a9940
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/__init__.py
@@ -0,0 +1,207 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Version 1 of the Iotronic API
+"""
+
+import pecan
+from pecan import rest
+from webob import exc
+from wsme import types as wtypes
+
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+#from iotronic.api.controllers.v1 import chassis
+#from iotronic.api.controllers.v1 import driver
+from iotronic.api.controllers.v1 import node
+from iotronic.api.controllers.v1 import board
+#from iotronic.api.controllers.v1 import port
+from iotronic.api import expose
+from iotronic.common.i18n import _
+
+
+
+BASE_VERSION = 1
+
+# NOTE(deva): v1.0 is reserved to indicate Juno's API, but is not presently
+# supported by the API service. All changes between Juno and the
+# point where we added microversioning are considered backwards-
+# compatible, but are not specifically discoverable at this time.
+#
+# The v1.1 version indicates this "initial" version as being
+# different from Juno (v1.0), and includes the following changes:
+#
+# 827db7fe: Add Node.maintenance_reason
+# 68eed82b: Add API endpoint to set/unset the node maintenance mode
+# bc973889: Add sync and async support for passthru methods
+# e03f443b: Vendor endpoints to support different HTTP methods
+# e69e5309: Make vendor methods discoverable via the Iotronic API
+# edf532db: Add logic to store the config drive passed by Nova
+
+# v1.1: API at the point in time when microversioning support was added
+MIN_VER_STR = '1.0'
+
+# v1.2: Renamed NOSTATE ("None") to AVAILABLE ("available")
+# v1.3: Add node.driver_internal_info
+# v1.4: Add MANAGEABLE state
+# v1.5: Add logical node names
+# v1.6: Add INSPECT* states
+MAX_VER_STR = '1.0'
+
+
+MIN_VER = base.Version({base.Version.string: MIN_VER_STR},
+ MIN_VER_STR, MAX_VER_STR)
+MAX_VER = base.Version({base.Version.string: MAX_VER_STR},
+ MIN_VER_STR, MAX_VER_STR)
+
+
+class MediaType(base.APIBase):
+ """A media type representation."""
+
+ base = wtypes.text
+ type = wtypes.text
+
+ def __init__(self, base, type):
+ self.base = base
+ self.type = type
+
+
+class V1(base.APIBase):
+ """The representation of the version 1 of the API."""
+
+ id = wtypes.text
+ """The ID of the version, also acts as the release number"""
+
+ media_types = [MediaType]
+ """An array of supported media types for this version"""
+
+ links = [link.Link]
+ """Links that point to a specific URL for this version and documentation"""
+
+ #chassis = [link.Link]
+ """Links to the chassis resource"""
+
+ nodes = [link.Link]
+ """Links to the nodes resource"""
+
+ #ports = [link.Link]
+ """Links to the ports resource"""
+
+ #drivers = [link.Link]
+ """Links to the drivers resource"""
+
+ @staticmethod
+ def convert():
+ v1 = V1()
+ v1.id = "v1"
+
+ v1.links = [link.Link.make_link('self', pecan.request.host_url,
+ 'v1', '', bookmark=True),
+ link.Link.make_link('describedby',
+ 'http://docs.openstack.org',
+ 'developer/iotronic/dev',
+ 'api-spec-v1.html',
+ bookmark=True, type='text/html')
+ ]
+
+ v1.media_types = [MediaType('application/json',
+ 'application/vnd.openstack.iotronic.v1+json')]
+ '''
+ v1.chassis = [link.Link.make_link('self', pecan.request.host_url,
+ 'chassis', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'chassis', '',
+ bookmark=True)
+ ]
+ '''
+ v1.nodes = [link.Link.make_link('self', pecan.request.host_url,
+ 'nodes', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'nodes', '',
+ bookmark=True)
+ ]
+ '''
+ v1.ports = [link.Link.make_link('self', pecan.request.host_url,
+ 'ports', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'ports', '',
+ bookmark=True)
+ ]
+ v1.drivers = [link.Link.make_link('self', pecan.request.host_url,
+ 'drivers', ''),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'drivers', '',
+ bookmark=True)
+ ]
+ '''
+ return v1
+
+
+class Controller(rest.RestController):
+ """Version 1 API controller root."""
+
+ nodes = node.NodesController()
+ #ports = port.PortsController()
+ #chassis = chassis.ChassisController()
+ #drivers = driver.DriversController()
+ boards= board.BoardsController()
+
+ @expose.expose(V1)
+ def get(self):
+ # NOTE: The reason why convert() it's being called for every
+ # request is because we need to get the host url from
+ # the request object to make the links.
+ return V1.convert()
+
+ def _check_version(self, version, headers=None):
+ if headers is None:
+ headers = {}
+ # ensure that major version in the URL matches the header
+ if version.major != BASE_VERSION:
+ raise exc.HTTPNotAcceptable(_(
+ "Mutually exclusive versions requested. Version %(ver)s "
+ "requested but not supported by this service. The supported "
+ "version range is: [%(min)s, %(max)s].") % {'ver': version,
+ 'min': MIN_VER_STR, 'max': MAX_VER_STR}, headers=headers)
+ # ensure the minor version is within the supported range
+ if version < MIN_VER or version > MAX_VER:
+ raise exc.HTTPNotAcceptable(_(
+ "Version %(ver)s was requested but the minor version is not "
+ "supported by this service. The supported version range is: "
+ "[%(min)s, %(max)s].") % {'ver': version, 'min': MIN_VER_STR,
+ 'max': MAX_VER_STR}, headers=headers)
+
+ @pecan.expose()
+ def _route(self, args):
+ v = base.Version(pecan.request.headers, MIN_VER_STR, MAX_VER_STR)
+
+ # Always set the min and max headers
+ pecan.response.headers[base.Version.min_string] = MIN_VER_STR
+ pecan.response.headers[base.Version.max_string] = MAX_VER_STR
+
+ # assert that requested version is supported
+ self._check_version(v, pecan.response.headers)
+ pecan.response.headers[base.Version.string] = str(v)
+ pecan.request.version = v
+
+ return super(Controller, self)._route(args)
+
+
+__all__ = (Controller)
diff --git a/iotronic/api/controllers/v1/__old/chassis.py b/iotronic/api/controllers/v1/__old/chassis.py
new file mode 100644
index 0000000..4c34121
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/chassis.py
@@ -0,0 +1,270 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import pecan
+from pecan import rest
+import wsme
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+from iotronic.api.controllers.v1 import collection
+from iotronic.api.controllers.v1 import node
+from iotronic.api.controllers.v1 import types
+from iotronic.api.controllers.v1 import utils as api_utils
+from iotronic.api import expose
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic import objects
+
+
+class ChassisPatchType(types.JsonPatchType):
+ pass
+
+
+class Chassis(base.APIBase):
+ """API representation of a chassis.
+
+ This class enforces type checking and value constraints, and converts
+ between the internal object model and the API representation of
+ a chassis.
+ """
+
+ uuid = types.uuid
+ """The UUID of the chassis"""
+
+ description = wtypes.text
+ """The description of the chassis"""
+
+ extra = {wtypes.text: types.jsontype}
+ """The metadata of the chassis"""
+
+ links = wsme.wsattr([link.Link], readonly=True)
+ """A list containing a self link and associated chassis links"""
+
+ nodes = wsme.wsattr([link.Link], readonly=True)
+ """Links to the collection of nodes contained in this chassis"""
+
+ def __init__(self, **kwargs):
+ self.fields = []
+ for field in objects.Chassis.fields:
+ # Skip fields we do not expose.
+ if not hasattr(self, field):
+ continue
+ self.fields.append(field)
+ setattr(self, field, kwargs.get(field, wtypes.Unset))
+
+ @staticmethod
+ def _convert_with_links(chassis, url, expand=True):
+ if not expand:
+ chassis.unset_fields_except(['uuid', 'description'])
+ else:
+ chassis.nodes = [link.Link.make_link('self',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes"),
+ link.Link.make_link('bookmark',
+ url,
+ 'chassis',
+ chassis.uuid + "/nodes",
+ bookmark=True)
+ ]
+ chassis.links = [link.Link.make_link('self',
+ url,
+ 'chassis', chassis.uuid),
+ link.Link.make_link('bookmark',
+ url,
+ 'chassis', chassis.uuid,
+ bookmark=True)
+ ]
+ return chassis
+
+ @classmethod
+ def convert_with_links(cls, rpc_chassis, expand=True):
+ chassis = Chassis(**rpc_chassis.as_dict())
+ return cls._convert_with_links(chassis, pecan.request.host_url,
+ expand)
+
+ @classmethod
+ def sample(cls, expand=True):
+ time = datetime.datetime(2000, 1, 1, 12, 0, 0)
+ sample = cls(uuid='eaaca217-e7d8-47b4-bb41-3f99f20eed89', extra={},
+ description='Sample chassis', created_at=time,
+ updated_at=time)
+ return cls._convert_with_links(sample, 'http://localhost:6385',
+ expand)
+
+
+class ChassisCollection(collection.Collection):
+ """API representation of a collection of chassis."""
+
+ chassis = [Chassis]
+ """A list containing chassis objects"""
+
+ def __init__(self, **kwargs):
+ self._type = 'chassis'
+
+ @staticmethod
+ def convert_with_links(chassis, limit, url=None, expand=False, **kwargs):
+ collection = ChassisCollection()
+ collection.chassis = [Chassis.convert_with_links(ch, expand)
+ for ch in chassis]
+ url = url or None
+ collection.next = collection.get_next(limit, url=url, **kwargs)
+ return collection
+
+ @classmethod
+ def sample(cls, expand=True):
+ sample = cls()
+ sample.chassis = [Chassis.sample(expand=False)]
+ return sample
+
+
+class ChassisController(rest.RestController):
+ """REST controller for Chassis."""
+
+ nodes = node.NodesController()
+ """Expose nodes as a sub-element of chassis"""
+
+ # Set the flag to indicate that the requests to this resource are
+ # coming from a top-level resource
+ nodes.from_chassis = True
+
+ _custom_actions = {
+ 'detail': ['GET'],
+ }
+
+ invalid_sort_key_list = ['extra']
+
+ def _get_chassis_collection(self, marker, limit, sort_key, sort_dir,
+ expand=False, resource_url=None):
+ limit = api_utils.validate_limit(limit)
+ sort_dir = api_utils.validate_sort_dir(sort_dir)
+ marker_obj = None
+ if marker:
+ marker_obj = objects.Chassis.get_by_uuid(pecan.request.context,
+ marker)
+
+ if sort_key in self.invalid_sort_key_list:
+ raise exception.InvalidParameterValue(_(
+ "The sort_key value %(key)s is an invalid field for sorting")
+ % {'key': sort_key})
+
+ chassis = objects.Chassis.list(pecan.request.context, limit,
+ marker_obj, sort_key=sort_key,
+ sort_dir=sort_dir)
+ return ChassisCollection.convert_with_links(chassis, limit,
+ url=resource_url,
+ expand=expand,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+
+ @expose.expose(ChassisCollection, types.uuid,
+ int, wtypes.text, wtypes.text)
+ def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
+ """Retrieve a list of chassis.
+
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ return self._get_chassis_collection(marker, limit, sort_key, sort_dir)
+
+ @expose.expose(ChassisCollection, types.uuid, int,
+ wtypes.text, wtypes.text)
+ def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
+ """Retrieve a list of chassis with detail.
+
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ # /detail should only work against collections
+ parent = pecan.request.path.split('/')[:-1][-1]
+ if parent != "chassis":
+ raise exception.HTTPNotFound
+
+ expand = True
+ resource_url = '/'.join(['chassis', 'detail'])
+ return self._get_chassis_collection(marker, limit, sort_key, sort_dir,
+ expand, resource_url)
+
+ @expose.expose(Chassis, types.uuid)
+ def get_one(self, chassis_uuid):
+ """Retrieve information about the given chassis.
+
+ :param chassis_uuid: UUID of a chassis.
+ """
+ rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context,
+ chassis_uuid)
+ return Chassis.convert_with_links(rpc_chassis)
+
+ @expose.expose(Chassis, body=Chassis, status_code=201)
+ def post(self, chassis):
+ """Create a new chassis.
+
+ :param chassis: a chassis within the request body.
+ """
+ new_chassis = objects.Chassis(pecan.request.context,
+ **chassis.as_dict())
+ new_chassis.create()
+ # Set the HTTP Location Header
+ pecan.response.location = link.build_url('chassis', new_chassis.uuid)
+ return Chassis.convert_with_links(new_chassis)
+
+ @wsme.validate(types.uuid, [ChassisPatchType])
+ @expose.expose(Chassis, types.uuid, body=[ChassisPatchType])
+ def patch(self, chassis_uuid, patch):
+ """Update an existing chassis.
+
+ :param chassis_uuid: UUID of a chassis.
+ :param patch: a json PATCH document to apply to this chassis.
+ """
+ rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context,
+ chassis_uuid)
+ try:
+ chassis = Chassis(**api_utils.apply_jsonpatch(
+ rpc_chassis.as_dict(), patch))
+ except api_utils.JSONPATCH_EXCEPTIONS as e:
+ raise exception.PatchError(patch=patch, reason=e)
+
+ # Update only the fields that have changed
+ for field in objects.Chassis.fields:
+ try:
+ patch_val = getattr(chassis, field)
+ except AttributeError:
+ # Ignore fields that aren't exposed in the API
+ continue
+ if patch_val == wtypes.Unset:
+ patch_val = None
+ if rpc_chassis[field] != patch_val:
+ rpc_chassis[field] = patch_val
+
+ rpc_chassis.save()
+ return Chassis.convert_with_links(rpc_chassis)
+
+ @expose.expose(None, types.uuid, status_code=204)
+ def delete(self, chassis_uuid):
+ """Delete a chassis.
+
+ :param chassis_uuid: UUID of a chassis.
+ """
+ rpc_chassis = objects.Chassis.get_by_uuid(pecan.request.context,
+ chassis_uuid)
+ rpc_chassis.destroy()
diff --git a/iotronic/api/controllers/v1/__old/collection.py b/iotronic/api/controllers/v1/__old/collection.py
new file mode 100644
index 0000000..d49337f
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/collection.py
@@ -0,0 +1,48 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+
+
+class Collection(base.APIBase):
+
+ next = wtypes.text
+ """A link to retrieve the next subset of the collection"""
+
+ @property
+ def collection(self):
+ return getattr(self, self._type)
+
+ def has_next(self, limit):
+ """Return whether collection has more items."""
+ return len(self.collection) and len(self.collection) == limit
+
+ def get_next(self, limit, url=None, **kwargs):
+ """Return a link to the next subset of the collection."""
+ if not self.has_next(limit):
+ return wtypes.Unset
+
+ resource_url = url or self._type
+ q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
+ next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
+ 'args': q_args, 'limit': limit,
+ 'marker': self.collection[-1].uuid}
+
+ return link.Link.make_link('next', pecan.request.host_url,
+ resource_url, next_args).href
diff --git a/iotronic/api/controllers/v1/__old/driver.py b/iotronic/api/controllers/v1/__old/driver.py
new file mode 100644
index 0000000..3400890
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/driver.py
@@ -0,0 +1,210 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from pecan import rest
+import wsme
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+from iotronic.api import expose
+from iotronic.common import exception
+from iotronic.common.i18n import _
+
+
+# Property information for drivers:
+# key = driver name;
+# value = dictionary of properties of that driver:
+# key = property name.
+# value = description of the property.
+# NOTE(rloo). This is cached for the lifetime of the API service. If one or
+# more conductor services are restarted with new driver versions, the API
+# service should be restarted.
+_DRIVER_PROPERTIES = {}
+
+# Vendor information for drivers:
+# key = driver name;
+# value = dictionary of vendor methods of that driver:
+# key = method name.
+# value = dictionary with the metadata of that method.
+# NOTE(lucasagomes). This is cached for the lifetime of the API
+# service. If one or more conductor services are restarted with new driver
+# versions, the API service should be restarted.
+_VENDOR_METHODS = {}
+
+
+class Driver(base.APIBase):
+ """API representation of a driver."""
+
+ name = wtypes.text
+ """The name of the driver"""
+
+ hosts = [wtypes.text]
+ """A list of active conductors that support this driver"""
+
+ links = wsme.wsattr([link.Link], readonly=True)
+ """A list containing self and bookmark links"""
+
+ @staticmethod
+ def convert_with_links(name, hosts):
+ driver = Driver()
+ driver.name = name
+ driver.hosts = hosts
+ driver.links = [
+ link.Link.make_link('self',
+ pecan.request.host_url,
+ 'drivers', name),
+ link.Link.make_link('bookmark',
+ pecan.request.host_url,
+ 'drivers', name,
+ bookmark=True)
+ ]
+ return driver
+
+ @classmethod
+ def sample(cls):
+ sample = cls(name="sample-driver",
+ hosts=["fake-host"])
+ return sample
+
+
+class DriverList(base.APIBase):
+ """API representation of a list of drivers."""
+
+ drivers = [Driver]
+ """A list containing drivers objects"""
+
+ @staticmethod
+ def convert_with_links(drivers):
+ collection = DriverList()
+ collection.drivers = [
+ Driver.convert_with_links(dname, list(drivers[dname]))
+ for dname in drivers]
+ return collection
+
+ @classmethod
+ def sample(cls):
+ sample = cls()
+ sample.drivers = [Driver.sample()]
+ return sample
+
+
+class DriverPassthruController(rest.RestController):
+ """REST controller for driver passthru.
+
+ This controller allow vendors to expose cross-node functionality in the
+ Iotronic API. Iotronic will merely relay the message from here to the specified
+ driver, no introspection will be made in the message body.
+ """
+
+ _custom_actions = {
+ 'methods': ['GET']
+ }
+
+ @expose.expose(wtypes.text, wtypes.text)
+ def methods(self, driver_name):
+ """Retrieve information about vendor methods of the given driver.
+
+ :param driver_name: name of the driver.
+ :returns: dictionary with :
+ entries.
+ :raises: DriverNotFound if the driver name is invalid or the
+ driver cannot be loaded.
+ """
+ if driver_name not in _VENDOR_METHODS:
+ topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
+ ret = pecan.request.rpcapi.get_driver_vendor_passthru_methods(
+ pecan.request.context, driver_name, topic=topic)
+ _VENDOR_METHODS[driver_name] = ret
+
+ return _VENDOR_METHODS[driver_name]
+
+ @expose.expose(wtypes.text, wtypes.text, wtypes.text,
+ body=wtypes.text)
+ def _default(self, driver_name, method, data=None):
+ """Call a driver API extension.
+
+ :param driver_name: name of the driver to call.
+ :param method: name of the method, to be passed to the vendor
+ implementation.
+ :param data: body of data to supply to the specified method.
+ """
+ if not method:
+ raise wsme.exc.ClientSideError(_("Method not specified"))
+
+ if data is None:
+ data = {}
+
+ http_method = pecan.request.method.upper()
+ topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
+ ret, is_async = pecan.request.rpcapi.driver_vendor_passthru(
+ pecan.request.context, driver_name, method,
+ http_method, data, topic=topic)
+ status_code = 202 if is_async else 200
+ return wsme.api.Response(ret, status_code=status_code)
+
+
+class DriversController(rest.RestController):
+ """REST controller for Drivers."""
+
+ vendor_passthru = DriverPassthruController()
+
+ _custom_actions = {
+ 'properties': ['GET'],
+ }
+
+ @expose.expose(DriverList)
+ def get_all(self):
+ """Retrieve a list of drivers."""
+ # FIXME(deva): formatting of the auto-generated REST API docs
+ # will break from a single-line doc string.
+ # This is a result of a bug in sphinxcontrib-pecanwsme
+ # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8
+ driver_list = pecan.request.dbapi.get_active_driver_dict()
+ return DriverList.convert_with_links(driver_list)
+
+ @expose.expose(Driver, wtypes.text)
+ def get_one(self, driver_name):
+ """Retrieve a single driver."""
+ # NOTE(russell_h): There is no way to make this more efficient than
+ # retrieving a list of drivers using the current sqlalchemy schema, but
+ # this path must be exposed for Pecan to route any paths we might
+ # choose to expose below it.
+
+ driver_dict = pecan.request.dbapi.get_active_driver_dict()
+ for name, hosts in driver_dict.items():
+ if name == driver_name:
+ return Driver.convert_with_links(name, list(hosts))
+
+ raise exception.DriverNotFound(driver_name=driver_name)
+
+ @expose.expose(wtypes.text, wtypes.text)
+ def properties(self, driver_name):
+ """Retrieve property information of the given driver.
+
+ :param driver_name: name of the driver.
+ :returns: dictionary with :
+ entries.
+ :raises: DriverNotFound (HTTP 404) if the driver name is invalid or
+ the driver cannot be loaded.
+ """
+ if driver_name not in _DRIVER_PROPERTIES:
+ topic = pecan.request.rpcapi.get_topic_for_driver(driver_name)
+ properties = pecan.request.rpcapi.get_driver_properties(
+ pecan.request.context, driver_name, topic=topic)
+ _DRIVER_PROPERTIES[driver_name] = properties
+
+ return _DRIVER_PROPERTIES[driver_name]
diff --git a/iotronic/api/controllers/v1/__old/node.py b/iotronic/api/controllers/v1/__old/node.py
new file mode 100644
index 0000000..137f0ac
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/node.py
@@ -0,0 +1,1104 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ast
+import datetime
+
+from oslo_config import cfg
+from oslo_log import log
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+import pecan
+from pecan import rest
+import wsme
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+from iotronic.api.controllers.v1 import collection
+from iotronic.api.controllers.v1 import port
+from iotronic.api.controllers.v1 import types
+from iotronic.api.controllers.v1 import utils as api_utils
+from iotronic.api import expose
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import states as ir_states
+from iotronic import objects
+
+
+CONF = cfg.CONF
+CONF.import_opt('heartbeat_timeout', 'iotronic.conductor.manager',
+ group='conductor')
+
+LOG = log.getLogger(__name__)
+
+# Vendor information for node's driver:
+# key = driver name;
+# value = dictionary of node vendor methods of that driver:
+# key = method name.
+# value = dictionary with the metadata of that method.
+# NOTE(lucasagomes). This is cached for the lifetime of the API
+# service. If one or more conductor services are restarted with new driver
+# versions, the API service should be restarted.
+_VENDOR_METHODS = {}
+
+
+def hide_fields_in_newer_versions(obj):
+ # if requested version is < 1.3, hide driver_internal_info
+ if pecan.request.version.minor < 3:
+ obj.driver_internal_info = wsme.Unset
+
+ if not api_utils.allow_node_logical_names():
+ obj.name = wsme.Unset
+
+ # if requested version is < 1.6, hide inspection_*_at fields
+ if pecan.request.version.minor < 6:
+ obj.inspection_finished_at = wsme.Unset
+ obj.inspection_started_at = wsme.Unset
+
+
+def assert_juno_provision_state_name(obj):
+ # if requested version is < 1.2, convert AVAILABLE to the old NOSTATE
+ #if (pecan.request.version.minor < 2 and
+ # obj.provision_state == ir_states.AVAILABLE):
+ # obj.provision_state = ir_states.NOSTATE
+ pass
+
+
+def check_allow_management_verbs(verb):
+ # v1.4 added the MANAGEABLE state and two verbs to move nodes into
+ # and out of that state. Reject requests to do this in older versions
+ if (pecan.request.version.minor < 4 and
+ verb in [ir_states.VERBS['manage'], ir_states.VERBS['provide']]):
+ raise exception.NotAcceptable()
+ if (pecan.request.version.minor < 6 and
+ verb == ir_states.VERBS['inspect']):
+ raise exception.NotAcceptable()
+
+
+class NodePatchType(types.JsonPatchType):
+
+ @staticmethod
+ def internal_attrs():
+ defaults = types.JsonPatchType.internal_attrs()
+ # TODO(lucasagomes): Include maintenance once the endpoint
+ # v1/nodes//maintenance do more things than updating the DB.
+ return defaults + ['/console_enabled', '/last_error',
+ '/power_state', '/provision_state', '/reservation',
+ '/target_power_state', '/target_provision_state',
+ '/provision_updated_at', '/maintenance_reason',
+ '/driver_internal_info', '/inspection_finished_at',
+ '/inspection_started_at', ]
+
+ @staticmethod
+ def mandatory_attrs():
+ return ['/chassis_uuid', '/driver']
+
+
+class BootDeviceController(rest.RestController):
+
+ _custom_actions = {
+ 'supported': ['GET'],
+ }
+
+ def _get_boot_device(self, node_ident, supported=False):
+ """Get the current boot device or a list of supported devices.
+
+ :param node_ident: the UUID or logical name of a node.
+ :param supported: Boolean value. If true return a list of
+ supported boot devices, if false return the
+ current boot device. Default: False.
+ :returns: The current boot device or a list of the supported
+ boot devices.
+
+ """
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ if supported:
+ return pecan.request.rpcapi.get_supported_boot_devices(
+ pecan.request.context, rpc_node.uuid, topic)
+ else:
+ return pecan.request.rpcapi.get_boot_device(pecan.request.context,
+ rpc_node.uuid, topic)
+
+ @expose.expose(None, types.uuid_or_name, wtypes.text, types.boolean,
+ status_code=204)
+ def put(self, node_ident, boot_device, persistent=False):
+ """Set the boot device for a node.
+
+ Set the boot device to use on next reboot of the node.
+
+ :param node_ident: the UUID or logical name of a node.
+ :param boot_device: the boot device, one of
+ :mod:`iotronic.common.boot_devices`.
+ :param persistent: Boolean value. True if the boot device will
+ persist to all future boots, False if not.
+ Default: False.
+
+ """
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ pecan.request.rpcapi.set_boot_device(pecan.request.context,
+ rpc_node.uuid,
+ boot_device,
+ persistent=persistent,
+ topic=topic)
+
+ @expose.expose(wtypes.text, types.uuid_or_name)
+ def get(self, node_ident):
+ """Get the current boot device for a node.
+
+ :param node_ident: the UUID or logical name of a node.
+ :returns: a json object containing:
+
+ :boot_device: the boot device, one of
+ :mod:`iotronic.common.boot_devices` or None if it is unknown.
+ :persistent: Whether the boot device will persist to all
+ future boots or not, None if it is unknown.
+
+ """
+ return self._get_boot_device(node_ident)
+
+ @expose.expose(wtypes.text, types.uuid_or_name)
+ def supported(self, node_ident):
+ """Get a list of the supported boot devices.
+
+ :param node_ident: the UUID or logical name of a node.
+ :returns: A json object with the list of supported boot
+ devices.
+
+ """
+ boot_devices = self._get_boot_device(node_ident, supported=True)
+ return {'supported_boot_devices': boot_devices}
+
+
+class NodeManagementController(rest.RestController):
+
+ boot_device = BootDeviceController()
+ """Expose boot_device as a sub-element of management"""
+
+
+class ConsoleInfo(base.APIBase):
+ """API representation of the console information for a node."""
+
+ console_enabled = types.boolean
+ """The console state: if the console is enabled or not."""
+
+ console_info = {wtypes.text: types.jsontype}
+ """The console information. It typically includes the url to access the
+ console and the type of the application that hosts the console."""
+
+ @classmethod
+ def sample(cls):
+ console = {'type': 'shellinabox', 'url': 'http://:4201'}
+ return cls(console_enabled=True, console_info=console)
+
+
+class NodeConsoleController(rest.RestController):
+
+ @expose.expose(ConsoleInfo, types.uuid_or_name)
+ def get(self, node_ident):
+ """Get connection information about the console.
+
+ :param node_ident: UUID or logical name of a node.
+ """
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ try:
+ console = pecan.request.rpcapi.get_console_information(
+ pecan.request.context, rpc_node.uuid, topic)
+ console_state = True
+ except exception.NodeConsoleNotEnabled:
+ console = None
+ console_state = False
+
+ return ConsoleInfo(console_enabled=console_state, console_info=console)
+
+ @expose.expose(None, types.uuid_or_name, types.boolean,
+ status_code=202)
+ def put(self, node_ident, enabled):
+ """Start and stop the node console.
+
+ :param node_ident: UUID or logical name of a node.
+ :param enabled: Boolean value; whether to enable or disable the
+ console.
+ """
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ pecan.request.rpcapi.set_console_mode(pecan.request.context,
+ rpc_node.uuid, enabled, topic)
+ # Set the HTTP Location Header
+ url_args = '/'.join([node_ident, 'states', 'console'])
+ pecan.response.location = link.build_url('nodes', url_args)
+
+
+class NodeStates(base.APIBase):
+ """API representation of the states of a node."""
+
+ console_enabled = types.boolean
+ """Indicates whether the console access is enabled or disabled on
+ the node."""
+
+ power_state = wtypes.text
+ """Represent the current (not transition) power state of the node"""
+
+ provision_state = wtypes.text
+ """Represent the current (not transition) provision state of the node"""
+
+ provision_updated_at = datetime.datetime
+ """The UTC date and time of the last provision state change"""
+
+ target_power_state = wtypes.text
+ """The user modified desired power state of the node."""
+
+ target_provision_state = wtypes.text
+ """The user modified desired provision state of the node."""
+
+ last_error = wtypes.text
+ """Any error from the most recent (last) asynchronous transaction that
+ started but failed to finish."""
+
+ @staticmethod
+ def convert(rpc_node):
+ attr_list = ['console_enabled', 'last_error', 'power_state',
+ 'provision_state', 'target_power_state',
+ 'target_provision_state', 'provision_updated_at']
+ states = NodeStates()
+ for attr in attr_list:
+ setattr(states, attr, getattr(rpc_node, attr))
+ assert_juno_provision_state_name(states)
+ return states
+
+ @classmethod
+ def sample(cls):
+ sample = cls(target_power_state=ir_states.POWER_ON,
+ target_provision_state=ir_states.ACTIVE,
+ last_error=None,
+ console_enabled=False,
+ provision_updated_at=None,
+ power_state=ir_states.POWER_ON,
+ provision_state=None)
+ return sample
+
+
+class NodeStatesController(rest.RestController):
+
+ _custom_actions = {
+ 'power': ['PUT'],
+ 'provision': ['PUT'],
+ }
+
+ console = NodeConsoleController()
+ """Expose console as a sub-element of states"""
+
+ @expose.expose(NodeStates, types.uuid_or_name)
+ def get(self, node_ident):
+ """List the states of the node.
+
+ :param node_ident: the UUID or logical_name of a node.
+ """
+ # NOTE(lucasagomes): All these state values come from the
+ # DB. Iotronic counts with a periodic task that verify the current
+ # power states of the nodes and update the DB accordingly.
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ return NodeStates.convert(rpc_node)
+
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
+ status_code=202)
+ def power(self, node_ident, target):
+ """Set the power state of the node.
+
+ :param node_ident: the UUID or logical name of a node.
+ :param target: The desired power state of the node.
+ :raises: ClientSideError (HTTP 409) if a power operation is
+ already in progress.
+ :raises: InvalidStateRequested (HTTP 400) if the requested target
+ state is not valid or if the node is in CLEANING state.
+
+ """
+ # TODO(lucasagomes): Test if it's able to transition to the
+ # target state from the current one
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+
+ if target not in [ir_states.POWER_ON,
+ ir_states.POWER_OFF,
+ ir_states.REBOOT]:
+ raise exception.InvalidStateRequested(
+ action=target, node=node_ident,
+ state=rpc_node.power_state)
+
+ # Don't change power state for nodes in cleaning
+ elif rpc_node.provision_state == ir_states.CLEANING:
+ raise exception.InvalidStateRequested(
+ action=target, node=node_ident,
+ state=rpc_node.provision_state)
+
+ pecan.request.rpcapi.change_node_power_state(pecan.request.context,
+ rpc_node.uuid, target,
+ topic)
+ # Set the HTTP Location Header
+ url_args = '/'.join([node_ident, 'states'])
+ pecan.response.location = link.build_url('nodes', url_args)
+
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
+ wtypes.text, status_code=202)
+ def provision(self, node_ident, target, configdrive=None):
+ """Asynchronous trigger the provisioning of the node.
+
+ This will set the target provision state of the node, and a
+ background task will begin which actually applies the state
+ change. This call will return a 202 (Accepted) indicating the
+ request was accepted and is in progress; the client should
+ continue to GET the status of this node to observe the status
+ of the requested action.
+
+ :param node_ident: UUID or logical name of a node.
+ :param target: The desired provision state of the node.
+ :param configdrive: Optional. A gzipped and base64 encoded
+ configdrive. Only valid when setting provision state
+ to "active".
+ :raises: NodeLocked (HTTP 409) if the node is currently locked.
+ :raises: ClientSideError (HTTP 409) if the node is already being
+ provisioned.
+ :raises: InvalidStateRequested (HTTP 400) if the requested transition
+ is not possible from the current state.
+ :raises: NotAcceptable (HTTP 406) if the API version specified does
+ not allow the requested state transition.
+ """
+ check_allow_management_verbs(target)
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+
+ # Normally, we let the task manager recognize and deal with
+ # NodeLocked exceptions. However, that isn't done until the RPC calls
+ # below. In order to main backward compatibility with our API HTTP
+ # response codes, we have this check here to deal with cases where
+ # a node is already being operated on (DEPLOYING or such) and we
+ # want to continue returning 409. Without it, we'd return 400.
+ if rpc_node.reservation:
+ raise exception.NodeLocked(node=rpc_node.uuid,
+ host=rpc_node.reservation)
+
+ if (target in (ir_states.ACTIVE, ir_states.REBUILD)
+ and rpc_node.maintenance):
+ raise exception.NodeInMaintenance(op=_('provisioning'),
+ node=rpc_node.uuid)
+
+ m = ir_states.machine.copy()
+ m.initialize(rpc_node.provision_state)
+ if not m.is_valid_event(ir_states.VERBS.get(target, target)):
+ raise exception.InvalidStateRequested(
+ action=target, node=rpc_node.uuid,
+ state=rpc_node.provision_state)
+
+ if configdrive and target != ir_states.ACTIVE:
+ msg = (_('Adding a config drive is only supported when setting '
+ 'provision state to %s') % ir_states.ACTIVE)
+ raise wsme.exc.ClientSideError(msg, status_code=400)
+
+ # Note that there is a race condition. The node state(s) could change
+ # by the time the RPC call is made and the TaskManager manager gets a
+ # lock.
+ if target == ir_states.ACTIVE:
+ pecan.request.rpcapi.do_node_deploy(pecan.request.context,
+ rpc_node.uuid, False,
+ configdrive, topic)
+ elif target == ir_states.REBUILD:
+ pecan.request.rpcapi.do_node_deploy(pecan.request.context,
+ rpc_node.uuid, True,
+ None, topic)
+ elif target == ir_states.DELETED:
+ pecan.request.rpcapi.do_node_tear_down(
+ pecan.request.context, rpc_node.uuid, topic)
+ elif target == ir_states.VERBS['inspect']:
+ pecan.request.rpcapi.inspect_hardware(
+ pecan.request.context, rpc_node.uuid, topic=topic)
+ elif target in (
+ ir_states.VERBS['manage'], ir_states.VERBS['provide']):
+ pecan.request.rpcapi.do_provisioning_action(
+ pecan.request.context, rpc_node.uuid, target, topic)
+ else:
+ msg = (_('The requested action "%(action)s" could not be '
+ 'understood.') % {'action': target})
+ raise exception.InvalidStateRequested(message=msg)
+
+ # Set the HTTP Location Header
+ url_args = '/'.join([node_ident, 'states'])
+ pecan.response.location = link.build_url('nodes', url_args)
+
+
+class Node(base.APIBase):
+ """API representation of a bare metal node.
+
+ This class enforces type checking and value constraints, and converts
+ between the internal object model and the API representation of a node.
+ """
+ '''
+ _chassis_uuid = None
+
+ def _get_chassis_uuid(self):
+ return self._chassis_uuid
+
+ def _set_chassis_uuid(self, value):
+ if value and self._chassis_uuid != value:
+ try:
+ chassis = objects.Chassis.get(pecan.request.context, value)
+ self._chassis_uuid = chassis.uuid
+ # NOTE(lucasagomes): Create the chassis_id attribute on-the-fly
+ # to satisfy the api -> rpc object
+ # conversion.
+ self.chassis_id = chassis.id
+ except exception.ChassisNotFound as e:
+ # Change error code because 404 (NotFound) is inappropriate
+ # response for a POST request to create a Port
+ e.code = 400 # BadRequest
+ raise e
+ elif value == wtypes.Unset:
+ self._chassis_uuid = wtypes.Unset
+ '''
+ uuid = types.uuid
+ """Unique UUID for this node"""
+
+ #instance_uuid = types.uuid
+ #"""The UUID of the instance in nova-compute"""
+
+ name = wsme.wsattr(wtypes.text)
+ status = wsme.wsattr(wtypes.text)
+ """The logical name for this node"""
+ '''
+ power_state = wsme.wsattr(wtypes.text, readonly=True)
+ """Represent the current (not transition) power state of the node"""
+
+ target_power_state = wsme.wsattr(wtypes.text, readonly=True)
+ """The user modified desired power state of the node."""
+
+ last_error = wsme.wsattr(wtypes.text, readonly=True)
+ """Any error from the most recent (last) asynchronous transaction that
+ started but failed to finish."""
+
+ provision_state = wsme.wsattr(wtypes.text, readonly=True)
+ """Represent the current (not transition) provision state of the node"""
+ '''
+ reservation = wsme.wsattr(wtypes.text, readonly=True)
+ """The hostname of the conductor that holds an exclusive lock on
+ the node."""
+ '''
+ provision_updated_at = datetime.datetime
+ """The UTC date and time of the last provision state change"""
+
+ inspection_finished_at = datetime.datetime
+ """The UTC date and time when the last hardware inspection finished
+ successfully."""
+
+ inspection_started_at = datetime.datetime
+ """The UTC date and time when the hardware inspection was started"""
+
+ maintenance = types.boolean
+ """Indicates whether the node is in maintenance mode."""
+
+ maintenance_reason = wsme.wsattr(wtypes.text, readonly=True)
+ """Indicates reason for putting a node in maintenance mode."""
+
+ target_provision_state = wsme.wsattr(wtypes.text, readonly=True)
+ """The user modified desired provision state of the node."""
+
+ console_enabled = types.boolean
+ """Indicates whether the console access is enabled or disabled on
+ the node."""
+
+ instance_info = {wtypes.text: types.jsontype}
+ """This node's instance info."""
+
+ driver = wsme.wsattr(wtypes.text, mandatory=True)
+ """The driver responsible for controlling the node"""
+
+ driver_info = {wtypes.text: types.jsontype}
+ """This node's driver configuration"""
+
+ driver_internal_info = wsme.wsattr({wtypes.text: types.jsontype},
+ readonly=True)
+ """This driver's internal configuration"""
+
+ extra = {wtypes.text: types.jsontype}
+ """This node's meta data"""
+
+ # NOTE: properties should use a class to enforce required properties
+ # current list: arch, cpus, disk, ram, image
+ properties = {wtypes.text: types.jsontype}
+ """The physical characteristics of this node"""
+
+ chassis_uuid = wsme.wsproperty(types.uuid, _get_chassis_uuid,
+ _set_chassis_uuid)
+ """The UUID of the chassis this node belongs"""
+ '''
+ links = wsme.wsattr([link.Link], readonly=True)
+ """A list containing a self link and associated node links"""
+ '''
+ ports = wsme.wsattr([link.Link], readonly=True)
+ """Links to the collection of ports on this node"""
+ '''
+ # NOTE(deva): "conductor_affinity" shouldn't be presented on the
+ # API because it's an internal value. Don't add it here.
+
+ def __init__(self, **kwargs):
+ self.fields = []
+ fields = list(objects.Node.fields)
+ # NOTE(lucasagomes): chassis_uuid is not part of objects.Node.fields
+ # because it's an API-only attribute.
+ #fields.append('chassis_uuid')
+ for k in fields:
+ # Skip fields we do not expose.
+ if not hasattr(self, k):
+ continue
+ self.fields.append(k)
+ setattr(self, k, kwargs.get(k, wtypes.Unset))
+
+ # NOTE(lucasagomes): chassis_id is an attribute created on-the-fly
+ # by _set_chassis_uuid(), it needs to be present in the fields so
+ # that as_dict() will contain chassis_id field when converting it
+ # before saving it in the database.
+ #self.fields.append('chassis_id')
+ #setattr(self, 'chassis_uuid', kwargs.get('chassis_id', wtypes.Unset))
+
+ @staticmethod
+ def _convert_with_links(node, url, expand=True, show_password=True):
+ if not expand:
+ except_list = ['instance_uuid', 'maintenance', 'power_state',
+ 'provision_state', 'uuid', 'name']
+ node.unset_fields_except(except_list)
+ '''
+ else:
+ if not show_password:
+ node.driver_info = ast.literal_eval(strutils.mask_password(
+ node.driver_info,
+ "******"))
+ node.ports = [link.Link.make_link('self', url, 'nodes',
+ node.uuid + "/ports"),
+ link.Link.make_link('bookmark', url, 'nodes',
+ node.uuid + "/ports",
+ bookmark=True)
+ ]
+
+ # NOTE(lucasagomes): The numeric ID should not be exposed to
+ # the user, it's internal only.
+ node.chassis_id = wtypes.Unset
+ '''
+ node.links = [link.Link.make_link('self', url, 'nodes',
+ node.uuid),
+ link.Link.make_link('bookmark', url, 'nodes',
+ node.uuid, bookmark=True)
+ ]
+ return node
+
+ @classmethod
+ def convert_with_links(cls, rpc_node, expand=True):
+ node = Node(**rpc_node.as_dict())
+ assert_juno_provision_state_name(node)
+ hide_fields_in_newer_versions(node)
+ return cls._convert_with_links(node, pecan.request.host_url,
+ expand,
+ pecan.request.context.show_password)
+
+ @classmethod
+ def sample(cls, expand=True):
+ time = datetime.datetime(2000, 1, 1, 12, 0, 0)
+ node_uuid = '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
+ instance_uuid = 'dcf1fbc5-93fc-4596-9395-b80572f6267b'
+ name = 'database16-dc02'
+ sample = cls(uuid=node_uuid, instance_uuid=instance_uuid,
+ name=name, power_state=ir_states.POWER_ON,
+ target_power_state=ir_states.NOSTATE,
+ last_error=None, provision_state=ir_states.ACTIVE,
+ target_provision_state=ir_states.NOSTATE,
+ reservation=None, driver='fake', driver_info={},
+ driver_internal_info={}, extra={},
+ properties={'memory_mb': '1024', 'local_gb': '10',
+ 'cpus': '1'}, updated_at=time, created_at=time,
+ provision_updated_at=time, instance_info={},
+ maintenance=False, maintenance_reason=None,
+ inspection_finished_at=None, inspection_started_at=time,
+ console_enabled=False, clean_step='')
+ # NOTE(matty_dubs): The chassis_uuid getter() is based on the
+ # _chassis_uuid variable:
+ sample._chassis_uuid = 'edcad704-b2da-41d5-96d9-afd580ecfa12'
+ return cls._convert_with_links(sample, 'http://localhost:6385', expand)
+
+
+class NodeCollection(collection.Collection):
+ """API representation of a collection of nodes."""
+
+ nodes = [Node]
+ """A list containing nodes objects"""
+
+ def __init__(self, **kwargs):
+ self._type = 'nodes'
+
+ @staticmethod
+ def convert_with_links(nodes, limit, url=None, expand=False, **kwargs):
+ collection = NodeCollection()
+ collection.nodes = [Node.convert_with_links(n, expand) for n in nodes]
+ collection.next = collection.get_next(limit, url=url, **kwargs)
+ return collection
+
+ @classmethod
+ def sample(cls):
+ sample = cls()
+ node = Node.sample(expand=False)
+ sample.nodes = [node]
+ return sample
+
+
+class NodeVendorPassthruController(rest.RestController):
+ """REST controller for VendorPassthru.
+
+ This controller allow vendors to expose a custom functionality in
+ the Iotronic API. Iotronic will merely relay the message from here to the
+ appropriate driver, no introspection will be made in the message body.
+ """
+
+ _custom_actions = {
+ 'methods': ['GET']
+ }
+
+ @expose.expose(wtypes.text, types.uuid_or_name)
+ def methods(self, node_ident):
+ """Retrieve information about vendor methods of the given node.
+
+ :param node_ident: UUID or logical name of a node.
+ :returns: dictionary with :
+ entries.
+ :raises: NodeNotFound if the node is not found.
+ """
+ # Raise an exception if node is not found
+ rpc_node = api_utils.get_rpc_node(node_ident)
+
+ if rpc_node.driver not in _VENDOR_METHODS:
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ ret = pecan.request.rpcapi.get_node_vendor_passthru_methods(
+ pecan.request.context, rpc_node.uuid, topic=topic)
+ _VENDOR_METHODS[rpc_node.driver] = ret
+
+ return _VENDOR_METHODS[rpc_node.driver]
+
+ @expose.expose(wtypes.text, types.uuid_or_name, wtypes.text,
+ body=wtypes.text)
+ def _default(self, node_ident, method, data=None):
+ """Call a vendor extension.
+
+ :param node_ident: UUID or logical name of a node.
+ :param method: name of the method in vendor driver.
+ :param data: body of data to supply to the specified method.
+ """
+ # Raise an exception if node is not found
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+
+ # Raise an exception if method is not specified
+ if not method:
+ raise wsme.exc.ClientSideError(_("Method not specified"))
+
+ if data is None:
+ data = {}
+
+ http_method = pecan.request.method.upper()
+ ret, is_async = pecan.request.rpcapi.vendor_passthru(
+ pecan.request.context, rpc_node.uuid, method,
+ http_method, data, topic)
+ status_code = 202 if is_async else 200
+ return wsme.api.Response(ret, status_code=status_code)
+
+
+class NodeMaintenanceController(rest.RestController):
+
+ def _set_maintenance(self, node_ident, maintenance_mode, reason=None):
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ rpc_node.maintenance = maintenance_mode
+ rpc_node.maintenance_reason = reason
+
+ try:
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ except exception.NoValidHost as e:
+ e.code = 400
+ raise e
+ pecan.request.rpcapi.update_node(pecan.request.context,
+ rpc_node, topic=topic)
+
+ @expose.expose(None, types.uuid_or_name, wtypes.text,
+ status_code=202)
+ def put(self, node_ident, reason=None):
+ """Put the node in maintenance mode.
+
+ :param node_ident: the UUID or logical_name of a node.
+ :param reason: Optional, the reason why it's in maintenance.
+
+ """
+ self._set_maintenance(node_ident, True, reason=reason)
+
+ @expose.expose(None, types.uuid_or_name, status_code=202)
+ def delete(self, node_ident):
+ """Remove the node from maintenance mode.
+
+ :param node_ident: the UUID or logical name of a node.
+
+ """
+ self._set_maintenance(node_ident, False)
+
+
+class NodesController(rest.RestController):
+ """REST controller for Nodes."""
+
+ states = NodeStatesController()
+ """Expose the state controller action as a sub-element of nodes"""
+
+ vendor_passthru = NodeVendorPassthruController()
+ """A resource used for vendors to expose a custom functionality in
+ the API"""
+
+ ports = port.PortsController()
+ """Expose ports as a sub-element of nodes"""
+
+ management = NodeManagementController()
+ """Expose management as a sub-element of nodes"""
+
+ maintenance = NodeMaintenanceController()
+ """Expose maintenance as a sub-element of nodes"""
+
+ # Set the flag to indicate that the requests to this resource are
+ # coming from a top-level resource
+ ports.from_nodes = True
+
+ from_chassis = False
+ """A flag to indicate if the requests to this controller are coming
+ from the top-level resource Chassis"""
+
+ _custom_actions = {
+ 'detail': ['GET'],
+ 'validate': ['GET'],
+ }
+
+ invalid_sort_key_list = ['properties', 'driver_info', 'extra',
+ 'instance_info', 'driver_internal_info']
+
+ def _get_nodes_collection(self, chassis_uuid, instance_uuid, associated,
+ maintenance, marker, limit, sort_key, sort_dir,
+ expand=False, resource_url=None):
+ if self.from_chassis and not chassis_uuid:
+ raise exception.MissingParameterValue(_(
+ "Chassis id not specified."))
+
+ limit = api_utils.validate_limit(limit)
+ sort_dir = api_utils.validate_sort_dir(sort_dir)
+
+ marker_obj = None
+ if marker:
+ marker_obj = objects.Node.get_by_uuid(pecan.request.context,
+ marker)
+
+ if sort_key in self.invalid_sort_key_list:
+ raise exception.InvalidParameterValue(_(
+ "The sort_key value %(key)s is an invalid field for sorting")
+ % {'key': sort_key})
+
+ if instance_uuid:
+ nodes = self._get_nodes_by_instance(instance_uuid)
+ else:
+ filters = {}
+ if chassis_uuid:
+ filters['chassis_uuid'] = chassis_uuid
+ if associated is not None:
+ filters['associated'] = associated
+ if maintenance is not None:
+ filters['maintenance'] = maintenance
+
+ nodes = objects.Node.list(pecan.request.context, limit, marker_obj,
+ sort_key=sort_key, sort_dir=sort_dir,
+ filters=filters)
+
+ parameters = {'sort_key': sort_key, 'sort_dir': sort_dir}
+ if associated:
+ parameters['associated'] = associated
+ if maintenance:
+ parameters['maintenance'] = maintenance
+ return NodeCollection.convert_with_links(nodes, limit,
+ url=resource_url,
+ expand=expand,
+ **parameters)
+
+ def _get_nodes_by_instance(self, instance_uuid):
+ """Retrieve a node by its instance uuid.
+
+ It returns a list with the node, or an empty list if no node is found.
+ """
+ try:
+ node = objects.Node.get_by_instance_uuid(pecan.request.context,
+ instance_uuid)
+ return [node]
+ except exception.InstanceNotFound:
+ return []
+
+ @expose.expose(NodeCollection, types.uuid, types.uuid,
+ types.boolean, types.boolean, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
+ maintenance=None, marker=None, limit=None, sort_key='id',
+ sort_dir='asc'):
+ """Retrieve a list of nodes.
+
+ :param chassis_uuid: Optional UUID of a chassis, to get only nodes for
+ that chassis.
+ :param instance_uuid: Optional UUID of an instance, to find the node
+ associated with that instance.
+ :param associated: Optional boolean whether to return a list of
+ associated or unassociated nodes. May be combined
+ with other parameters.
+ :param maintenance: Optional boolean value that indicates whether
+ to get nodes in maintenance mode ("True"), or not
+ in maintenance mode ("False").
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ return self._get_nodes_collection(chassis_uuid, instance_uuid,
+ associated, maintenance, marker,
+ limit, sort_key, sort_dir)
+
+ @expose.expose(NodeCollection, types.uuid, types.uuid,
+ types.boolean, types.boolean, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def detail(self, chassis_uuid=None, instance_uuid=None, associated=None,
+ maintenance=None, marker=None, limit=None, sort_key='id',
+ sort_dir='asc'):
+ """Retrieve a list of nodes with detail.
+
+ :param chassis_uuid: Optional UUID of a chassis, to get only nodes for
+ that chassis.
+ :param instance_uuid: Optional UUID of an instance, to find the node
+ associated with that instance.
+ :param associated: Optional boolean whether to return a list of
+ associated or unassociated nodes. May be combined
+ with other parameters.
+ :param maintenance: Optional boolean value that indicates whether
+ to get nodes in maintenance mode ("True"), or not
+ in maintenance mode ("False").
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ # /detail should only work against collections
+ parent = pecan.request.path.split('/')[:-1][-1]
+ if parent != "nodes":
+ raise exception.HTTPNotFound
+
+ expand = True
+ resource_url = '/'.join(['nodes', 'detail'])
+ return self._get_nodes_collection(chassis_uuid, instance_uuid,
+ associated, maintenance, marker,
+ limit, sort_key, sort_dir, expand,
+ resource_url)
+
+ @expose.expose(wtypes.text, types.uuid_or_name, types.uuid)
+ def validate(self, node=None, node_uuid=None):
+ """Validate the driver interfaces, using the node's UUID or name.
+
+ Note that the 'node_uuid' interface is deprecated in favour
+ of the 'node' interface
+
+ :param node: UUID or name of a node.
+ :param node_uuid: UUID of a node.
+ """
+ if node:
+ # We're invoking this interface using positional notation, or
+ # explicitly using 'node'. Try and determine which one.
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
+ raise exception.NotAcceptable()
+
+ rpc_node = api_utils.get_rpc_node(node_uuid or node)
+
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ return pecan.request.rpcapi.validate_driver_interfaces(
+ pecan.request.context, rpc_node.uuid, topic)
+
+ @expose.expose(Node, types.uuid_or_name)
+ def get_one(self, node_ident):
+ """Retrieve information about the given node.
+
+ :param node_ident: UUID or logical name of a node.
+ """
+ if self.from_chassis:
+ raise exception.OperationNotPermitted
+
+ rpc_node = api_utils.get_rpc_node(node_ident)
+ return Node.convert_with_links(rpc_node)
+
+ @expose.expose(Node, body=Node, status_code=201)
+ def post(self, node):
+ """Create a new node.
+
+ :param node: a node within the request body.
+ """
+ if self.from_chassis:
+ raise exception.OperationNotPermitted
+
+ # NOTE(deva): get_topic_for checks if node.driver is in the hash ring
+ # and raises NoValidHost if it is not.
+ # We need to ensure that node has a UUID before it can
+ # be mapped onto the hash ring.
+ if not node.uuid:
+ node.uuid = uuidutils.generate_uuid()
+
+ try:
+ pecan.request.rpcapi.get_topic_for(node)
+ except exception.NoValidHost as e:
+ # NOTE(deva): convert from 404 to 400 because client can see
+ # list of available drivers and shouldn't request
+ # one that doesn't exist.
+ e.code = 400
+ raise e
+
+ # Verify that if we're creating a new node with a 'name' set
+ # that it is a valid name
+ if node.name:
+ if not api_utils.allow_node_logical_names():
+ raise exception.NotAcceptable()
+ if not api_utils.is_valid_node_name(node.name):
+ msg = _("Cannot create node with invalid name %(name)s")
+ raise wsme.exc.ClientSideError(msg % {'name': node.name},
+ status_code=400)
+
+ new_node = objects.Node(pecan.request.context,
+ **node.as_dict())
+ new_node.create()
+ # Set the HTTP Location Header
+ pecan.response.location = link.build_url('nodes', new_node.uuid)
+ return Node.convert_with_links(new_node)
+
+ @wsme.validate(types.uuid, [NodePatchType])
+ @expose.expose(Node, types.uuid_or_name, body=[NodePatchType])
+ def patch(self, node_ident, patch):
+ """Update an existing node.
+
+ :param node_ident: UUID or logical name of a node.
+ :param patch: a json PATCH document to apply to this node.
+ """
+ if self.from_chassis:
+ raise exception.OperationNotPermitted
+
+ rpc_node = api_utils.get_rpc_node(node_ident)
+
+ # Check if node is transitioning state, although nodes in some states
+ # can be updated.
+ if (rpc_node.provision_state == ir_states.CLEANING and
+ patch == [{'op': 'remove', 'path': '/instance_uuid'}]):
+ # Allow node.instance_uuid removal during cleaning, but not other
+ # operations.
+ # TODO(JoshNang) remove node.instance_uuid when removing
+ # instance_info and stop removing node.instance_uuid in the Nova
+ # Iotronic driver. Bug: 1436568
+ LOG.debug('Removing instance uuid %(instance)s from node %(node)s',
+ {'instance': rpc_node.instance_uuid,
+ 'node': rpc_node.uuid})
+ elif ((rpc_node.target_power_state or rpc_node.target_provision_state)
+ and rpc_node.provision_state not in
+ ir_states.UPDATE_ALLOWED_STATES):
+ msg = _("Node %s can not be updated while a state transition "
+ "is in progress.")
+ raise wsme.exc.ClientSideError(msg % node_ident, status_code=409)
+
+ # Verify that if we're patching 'name' that it is a valid
+ name = api_utils.get_patch_value(patch, '/name')
+ if name:
+ if not api_utils.allow_node_logical_names():
+ raise exception.NotAcceptable()
+ if not api_utils.is_valid_node_name(name):
+ msg = _("Node %(node)s: Cannot change name to invalid "
+ "name '%(name)s'")
+ raise wsme.exc.ClientSideError(msg % {'node': node_ident,
+ 'name': name},
+ status_code=400)
+
+ try:
+ node_dict = rpc_node.as_dict()
+ # NOTE(lucasagomes):
+ # 1) Remove chassis_id because it's an internal value and
+ # not present in the API object
+ # 2) Add chassis_uuid
+ node_dict['chassis_uuid'] = node_dict.pop('chassis_id', None)
+ node = Node(**api_utils.apply_jsonpatch(node_dict, patch))
+ except api_utils.JSONPATCH_EXCEPTIONS as e:
+ raise exception.PatchError(patch=patch, reason=e)
+
+ # Update only the fields that have changed
+ for field in objects.Node.fields:
+ try:
+ patch_val = getattr(node, field)
+ except AttributeError:
+ # Ignore fields that aren't exposed in the API
+ continue
+ if patch_val == wtypes.Unset:
+ patch_val = None
+ if rpc_node[field] != patch_val:
+ rpc_node[field] = patch_val
+
+ # NOTE(deva): we calculate the rpc topic here in case node.driver
+ # has changed, so that update is sent to the
+ # new conductor, not the old one which may fail to
+ # load the new driver.
+ try:
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ except exception.NoValidHost as e:
+ # NOTE(deva): convert from 404 to 400 because client can see
+ # list of available drivers and shouldn't request
+ # one that doesn't exist.
+ e.code = 400
+ raise e
+
+ # NOTE(lucasagomes): If it's changing the driver and the console
+ # is enabled we prevent updating it because the new driver will
+ # not be able to stop a console started by the previous one.
+ delta = rpc_node.obj_what_changed()
+ if 'driver' in delta and rpc_node.console_enabled:
+ raise wsme.exc.ClientSideError(
+ _("Node %s can not update the driver while the console is "
+ "enabled. Please stop the console first.") % node_ident,
+ status_code=409)
+
+ new_node = pecan.request.rpcapi.update_node(
+ pecan.request.context, rpc_node, topic)
+
+ return Node.convert_with_links(new_node)
+
+ @expose.expose(None, types.uuid_or_name, status_code=204)
+ def delete(self, node_ident):
+ """Delete a node.
+
+ :param node_ident: UUID or logical name of a node.
+ """
+ if self.from_chassis:
+ raise exception.OperationNotPermitted
+
+ rpc_node = api_utils.get_rpc_node(node_ident)
+
+ try:
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ except exception.NoValidHost as e:
+ e.code = 400
+ raise e
+
+ pecan.request.rpcapi.destroy_node(pecan.request.context,
+ rpc_node.uuid, topic)
diff --git a/iotronic/api/controllers/v1/__old/port.py b/iotronic/api/controllers/v1/__old/port.py
new file mode 100644
index 0000000..7483370
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/port.py
@@ -0,0 +1,396 @@
+# Copyright 2013 UnitedStack Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo_utils import uuidutils
+import pecan
+from pecan import rest
+import wsme
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+from iotronic.api.controllers.v1 import collection
+from iotronic.api.controllers.v1 import types
+from iotronic.api.controllers.v1 import utils as api_utils
+from iotronic.api import expose
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic import objects
+
+
+class PortPatchType(types.JsonPatchType):
+
+ @staticmethod
+ def mandatory_attrs():
+ return ['/address', '/node_uuid']
+
+
+class Port(base.APIBase):
+ """API representation of a port.
+
+ This class enforces type checking and value constraints, and converts
+ between the internal object model and the API representation of a port.
+ """
+
+ _node_uuid = None
+
+ def _get_node_uuid(self):
+ return self._node_uuid
+
+ def _set_node_uuid(self, value):
+ if value and self._node_uuid != value:
+ try:
+ # FIXME(comstud): One should only allow UUID here, but
+ # there seems to be a bug in that tests are passing an
+ # ID. See bug #1301046 for more details.
+ node = objects.Node.get(pecan.request.context, value)
+ self._node_uuid = node.uuid
+ # NOTE(lucasagomes): Create the node_id attribute on-the-fly
+ # to satisfy the api -> rpc object
+ # conversion.
+ self.node_id = node.id
+ except exception.NodeNotFound as e:
+ # Change error code because 404 (NotFound) is inappropriate
+ # response for a POST request to create a Port
+ e.code = 400 # BadRequest
+ raise e
+ elif value == wtypes.Unset:
+ self._node_uuid = wtypes.Unset
+
+ uuid = types.uuid
+ """Unique UUID for this port"""
+
+ address = wsme.wsattr(types.macaddress, mandatory=True)
+ """MAC Address for this port"""
+
+ extra = {wtypes.text: types.jsontype}
+ """This port's meta data"""
+
+ node_uuid = wsme.wsproperty(types.uuid, _get_node_uuid, _set_node_uuid,
+ mandatory=True)
+ """The UUID of the node this port belongs to"""
+
+ links = wsme.wsattr([link.Link], readonly=True)
+ """A list containing a self link and associated port links"""
+
+ def __init__(self, **kwargs):
+ self.fields = []
+ fields = list(objects.Port.fields)
+ # NOTE(lucasagomes): node_uuid is not part of objects.Port.fields
+ # because it's an API-only attribute
+ fields.append('node_uuid')
+ for field in fields:
+ # Skip fields we do not expose.
+ if not hasattr(self, field):
+ continue
+ self.fields.append(field)
+ setattr(self, field, kwargs.get(field, wtypes.Unset))
+
+ # NOTE(lucasagomes): node_id is an attribute created on-the-fly
+ # by _set_node_uuid(), it needs to be present in the fields so
+ # that as_dict() will contain node_id field when converting it
+ # before saving it in the database.
+ self.fields.append('node_id')
+ setattr(self, 'node_uuid', kwargs.get('node_id', wtypes.Unset))
+
+ @staticmethod
+ def _convert_with_links(port, url, expand=True):
+ if not expand:
+ port.unset_fields_except(['uuid', 'address'])
+
+ # never expose the node_id attribute
+ port.node_id = wtypes.Unset
+
+ port.links = [link.Link.make_link('self', url,
+ 'ports', port.uuid),
+ link.Link.make_link('bookmark', url,
+ 'ports', port.uuid,
+ bookmark=True)
+ ]
+ return port
+
+ @classmethod
+ def convert_with_links(cls, rpc_port, expand=True):
+ port = Port(**rpc_port.as_dict())
+ return cls._convert_with_links(port, pecan.request.host_url, expand)
+
+ @classmethod
+ def sample(cls, expand=True):
+ sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
+ address='fe:54:00:77:07:d9',
+ extra={'foo': 'bar'},
+ created_at=datetime.datetime.utcnow(),
+ updated_at=datetime.datetime.utcnow())
+ # NOTE(lucasagomes): node_uuid getter() method look at the
+ # _node_uuid variable
+ sample._node_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae'
+ return cls._convert_with_links(sample, 'http://localhost:6385', expand)
+
+
+class PortCollection(collection.Collection):
+ """API representation of a collection of ports."""
+
+ ports = [Port]
+ """A list containing ports objects"""
+
+ def __init__(self, **kwargs):
+ self._type = 'ports'
+
+ @staticmethod
+ def convert_with_links(rpc_ports, limit, url=None, expand=False, **kwargs):
+ collection = PortCollection()
+ collection.ports = [Port.convert_with_links(p, expand)
+ for p in rpc_ports]
+ collection.next = collection.get_next(limit, url=url, **kwargs)
+ return collection
+
+ @classmethod
+ def sample(cls):
+ sample = cls()
+ sample.ports = [Port.sample(expand=False)]
+ return sample
+
+
+class PortsController(rest.RestController):
+ """REST controller for Ports."""
+
+ from_nodes = False
+ """A flag to indicate if the requests to this controller are coming
+ from the top-level resource Nodes."""
+
+ _custom_actions = {
+ 'detail': ['GET'],
+ }
+
+ invalid_sort_key_list = ['extra']
+
+ def _get_ports_collection(self, node_ident, address, marker, limit,
+ sort_key, sort_dir, expand=False,
+ resource_url=None):
+ if self.from_nodes and not node_ident:
+ raise exception.MissingParameterValue(_(
+ "Node identifier not specified."))
+
+ limit = api_utils.validate_limit(limit)
+ sort_dir = api_utils.validate_sort_dir(sort_dir)
+
+ marker_obj = None
+ if marker:
+ marker_obj = objects.Port.get_by_uuid(pecan.request.context,
+ marker)
+
+ if sort_key in self.invalid_sort_key_list:
+ raise exception.InvalidParameterValue(_(
+ "The sort_key value %(key)s is an invalid field for sorting"
+ ) % {'key': sort_key})
+
+ if node_ident:
+ # FIXME(comstud): Since all we need is the node ID, we can
+ # make this more efficient by only querying
+ # for that column. This will get cleaned up
+ # as we move to the object interface.
+ node = api_utils.get_rpc_node(node_ident)
+ ports = objects.Port.list_by_node_id(pecan.request.context,
+ node.id, limit, marker_obj,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ elif address:
+ ports = self._get_ports_by_address(address)
+ else:
+ ports = objects.Port.list(pecan.request.context, limit,
+ marker_obj, sort_key=sort_key,
+ sort_dir=sort_dir)
+
+ return PortCollection.convert_with_links(ports, limit,
+ url=resource_url,
+ expand=expand,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+
+ def _get_ports_by_address(self, address):
+ """Retrieve a port by its address.
+
+ :param address: MAC address of a port, to get the port which has
+ this MAC address.
+ :returns: a list with the port, or an empty list if no port is found.
+
+ """
+ try:
+ port = objects.Port.get_by_address(pecan.request.context, address)
+ return [port]
+ except exception.PortNotFound:
+ return []
+
+ @expose.expose(PortCollection, types.uuid_or_name, types.uuid,
+ types.macaddress, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def get_all(self, node=None, node_uuid=None, address=None, marker=None,
+ limit=None, sort_key='id', sort_dir='asc'):
+ """Retrieve a list of ports.
+
+ Note that the 'node_uuid' interface is deprecated in favour
+ of the 'node' interface
+
+ :param node: UUID or name of a node, to get only ports for that
+ node.
+ :param node_uuid: UUID of a node, to get only ports for that
+ node.
+ :param address: MAC address of a port, to get the port which has
+ this MAC address.
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ if not node_uuid and node:
+ # We're invoking this interface using positional notation, or
+ # explicitly using 'node'. Try and determine which one.
+ # Make sure only one interface, node or node_uuid is used
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
+ raise exception.NotAcceptable()
+
+ return self._get_ports_collection(node_uuid or node, address, marker,
+ limit, sort_key, sort_dir)
+
+ @expose.expose(PortCollection, types.uuid_or_name, types.uuid,
+ types.macaddress, types.uuid, int, wtypes.text,
+ wtypes.text)
+ def detail(self, node=None, node_uuid=None, address=None, marker=None,
+ limit=None, sort_key='id', sort_dir='asc'):
+ """Retrieve a list of ports with detail.
+
+ Note that the 'node_uuid' interface is deprecated in favour
+ of the 'node' interface
+
+ :param node: UUID or name of a node, to get only ports for that
+ node.
+ :param node_uuid: UUID of a node, to get only ports for that
+ node.
+ :param address: MAC address of a port, to get the port which has
+ this MAC address.
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ if not node_uuid and node:
+ # We're invoking this interface using positional notation, or
+ # explicitly using 'node'. Try and determine which one.
+ # Make sure only one interface, node or node_uuid is used
+ if (not api_utils.allow_node_logical_names() and
+ not uuidutils.is_uuid_like(node)):
+ raise exception.NotAcceptable()
+
+ # NOTE(lucasagomes): /detail should only work against collections
+ parent = pecan.request.path.split('/')[:-1][-1]
+ if parent != "ports":
+ raise exception.HTTPNotFound
+
+ expand = True
+ resource_url = '/'.join(['ports', 'detail'])
+ return self._get_ports_collection(node_uuid or node, address, marker,
+ limit, sort_key, sort_dir, expand,
+ resource_url)
+
+ @expose.expose(Port, types.uuid)
+ def get_one(self, port_uuid):
+ """Retrieve information about the given port.
+
+ :param port_uuid: UUID of a port.
+ """
+ if self.from_nodes:
+ raise exception.OperationNotPermitted
+
+ rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
+ return Port.convert_with_links(rpc_port)
+
+ @expose.expose(Port, body=Port, status_code=201)
+ def post(self, port):
+ """Create a new port.
+
+ :param port: a port within the request body.
+ """
+ if self.from_nodes:
+ raise exception.OperationNotPermitted
+
+ new_port = objects.Port(pecan.request.context,
+ **port.as_dict())
+ new_port.create()
+ # Set the HTTP Location Header
+ pecan.response.location = link.build_url('ports', new_port.uuid)
+ return Port.convert_with_links(new_port)
+
+ @wsme.validate(types.uuid, [PortPatchType])
+ @expose.expose(Port, types.uuid, body=[PortPatchType])
+ def patch(self, port_uuid, patch):
+ """Update an existing port.
+
+ :param port_uuid: UUID of a port.
+ :param patch: a json PATCH document to apply to this port.
+ """
+ if self.from_nodes:
+ raise exception.OperationNotPermitted
+
+ rpc_port = objects.Port.get_by_uuid(pecan.request.context, port_uuid)
+ try:
+ port_dict = rpc_port.as_dict()
+ # NOTE(lucasagomes):
+ # 1) Remove node_id because it's an internal value and
+ # not present in the API object
+ # 2) Add node_uuid
+ port_dict['node_uuid'] = port_dict.pop('node_id', None)
+ port = Port(**api_utils.apply_jsonpatch(port_dict, patch))
+ except api_utils.JSONPATCH_EXCEPTIONS as e:
+ raise exception.PatchError(patch=patch, reason=e)
+
+ # Update only the fields that have changed
+ for field in objects.Port.fields:
+ try:
+ patch_val = getattr(port, field)
+ except AttributeError:
+ # Ignore fields that aren't exposed in the API
+ continue
+ if patch_val == wtypes.Unset:
+ patch_val = None
+ if rpc_port[field] != patch_val:
+ rpc_port[field] = patch_val
+
+ rpc_node = objects.Node.get_by_id(pecan.request.context,
+ rpc_port.node_id)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+
+ new_port = pecan.request.rpcapi.update_port(
+ pecan.request.context, rpc_port, topic)
+
+ return Port.convert_with_links(new_port)
+
+ @expose.expose(None, types.uuid, status_code=204)
+ def delete(self, port_uuid):
+ """Delete a port.
+
+ :param port_uuid: UUID of a port.
+ """
+ if self.from_nodes:
+ raise exception.OperationNotPermitted
+ rpc_port = objects.Port.get_by_uuid(pecan.request.context,
+ port_uuid)
+ rpc_node = objects.Node.get_by_id(pecan.request.context,
+ rpc_port.node_id)
+ topic = pecan.request.rpcapi.get_topic_for(rpc_node)
+ pecan.request.rpcapi.destroy_port(pecan.request.context,
+ rpc_port, topic)
diff --git a/iotronic/api/controllers/v1/__old/state.py b/iotronic/api/controllers/v1/__old/state.py
new file mode 100644
index 0000000..c3843bf
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/state.py
@@ -0,0 +1,34 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+
+
+class State(base.APIBase):
+
+ current = wtypes.text
+ """The current state"""
+
+ target = wtypes.text
+ """The user modified desired state"""
+
+ available = [wtypes.text]
+ """A list of available states it is able to transition to"""
+
+ links = [link.Link]
+ """A list containing a self link and associated state links"""
diff --git a/iotronic/api/controllers/v1/__old/types.py b/iotronic/api/controllers/v1/__old/types.py
new file mode 100644
index 0000000..f435c61
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/types.py
@@ -0,0 +1,239 @@
+# coding: utf-8
+#
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+import six
+import wsme
+from wsme import types as wtypes
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+
+
+class MacAddressType(wtypes.UserType):
+ """A simple MAC address type."""
+
+ basetype = wtypes.text
+ name = 'macaddress'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ return utils.validate_and_normalize_mac(value)
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return MacAddressType.validate(value)
+
+
+class UuidOrNameType(wtypes.UserType):
+ """A simple UUID or logical name type."""
+
+ basetype = wtypes.text
+ name = 'uuid_or_name'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not (uuidutils.is_uuid_like(value)
+ or utils.is_hostname_safe(value)):
+ raise exception.InvalidUuidOrName(name=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return UuidOrNameType.validate(value)
+
+
+class NameType(wtypes.UserType):
+ """A simple logical name type."""
+
+ basetype = wtypes.text
+ name = 'name'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not utils.is_hostname_safe(value):
+ raise exception.InvalidName(name=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return NameType.validate(value)
+
+
+class UuidType(wtypes.UserType):
+ """A simple UUID type."""
+
+ basetype = wtypes.text
+ name = 'uuid'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not uuidutils.is_uuid_like(value):
+ raise exception.InvalidUUID(uuid=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return UuidType.validate(value)
+
+
+class BooleanType(wtypes.UserType):
+ """A simple boolean type."""
+
+ basetype = wtypes.text
+ name = 'boolean'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ try:
+ return strutils.bool_from_string(value, strict=True)
+ except ValueError as e:
+ # raise Invalid to return 400 (BadRequest) in the API
+ raise exception.Invalid(e)
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return BooleanType.validate(value)
+
+
+class JsonType(wtypes.UserType):
+ """A simple JSON type."""
+
+ basetype = wtypes.text
+ name = 'json'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ def __str__(self):
+ # These are the json serializable native types
+ return ' | '.join(map(str, (wtypes.text, six.integer_types, float,
+ BooleanType, list, dict, None)))
+
+ @staticmethod
+ def validate(value):
+ try:
+ json.dumps(value)
+ except TypeError:
+ raise exception.Invalid(_('%s is not JSON serializable') % value)
+ else:
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ return JsonType.validate(value)
+
+
+macaddress = MacAddressType()
+uuid_or_name = UuidOrNameType()
+name = NameType()
+uuid = UuidType()
+boolean = BooleanType()
+# Can't call it 'json' because that's the name of the stdlib module
+jsontype = JsonType()
+
+
+class JsonPatchType(wtypes.Base):
+ """A complex type that represents a single json-patch operation."""
+
+ path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
+ mandatory=True)
+ op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
+ mandatory=True)
+ value = wsme.wsattr(jsontype, default=wtypes.Unset)
+
+ @staticmethod
+ def internal_attrs():
+ """Returns a list of internal attributes.
+
+ Internal attributes can't be added, replaced or removed. This
+ method may be overwritten by derived class.
+
+ """
+ return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
+
+ @staticmethod
+ def mandatory_attrs():
+ """Retruns a list of mandatory attributes.
+
+ Mandatory attributes can't be removed from the document. This
+ method should be overwritten by derived class.
+
+ """
+ return []
+
+ @staticmethod
+ def validate(patch):
+ _path = '/' + patch.path.split('/')[1]
+ if _path in patch.internal_attrs():
+ msg = _("'%s' is an internal attribute and can not be updated")
+ raise wsme.exc.ClientSideError(msg % patch.path)
+
+ if patch.path in patch.mandatory_attrs() and patch.op == 'remove':
+ msg = _("'%s' is a mandatory attribute and can not be removed")
+ raise wsme.exc.ClientSideError(msg % patch.path)
+
+ if patch.op != 'remove':
+ if patch.value is wsme.Unset:
+ msg = _("'add' and 'replace' operations needs value")
+ raise wsme.exc.ClientSideError(msg)
+
+ ret = {'path': patch.path, 'op': patch.op}
+ if patch.value is not wsme.Unset:
+ ret['value'] = patch.value
+ return ret
diff --git a/iotronic/api/controllers/v1/__old/utils.py b/iotronic/api/controllers/v1/__old/utils.py
new file mode 100644
index 0000000..9d3336e
--- /dev/null
+++ b/iotronic/api/controllers/v1/__old/utils.py
@@ -0,0 +1,107 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import jsonpatch
+from oslo_config import cfg
+from oslo_utils import uuidutils
+import pecan
+import wsme
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+from iotronic import objects
+
+
+CONF = cfg.CONF
+
+
+JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException,
+ jsonpatch.JsonPointerException,
+ KeyError)
+
+
+def validate_limit(limit):
+ if limit is None:
+ return CONF.api.max_limit
+
+ if limit <= 0:
+ raise wsme.exc.ClientSideError(_("Limit must be positive"))
+
+ return min(CONF.api.max_limit, limit)
+
+
+def validate_sort_dir(sort_dir):
+ if sort_dir not in ['asc', 'desc']:
+ raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. "
+ "Acceptable values are "
+ "'asc' or 'desc'") % sort_dir)
+ return sort_dir
+
+
+def apply_jsonpatch(doc, patch):
+ for p in patch:
+ if p['op'] == 'add' and p['path'].count('/') == 1:
+ if p['path'].lstrip('/') not in doc:
+ msg = _('Adding a new attribute (%s) to the root of '
+ ' the resource is not allowed')
+ raise wsme.exc.ClientSideError(msg % p['path'])
+ return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
+
+
+def get_patch_value(patch, path):
+ for p in patch:
+ if p['path'] == path:
+ return p['value']
+
+
+def allow_node_logical_names():
+ # v1.5 added logical name aliases
+ return pecan.request.version.minor >= 5
+
+
+def get_rpc_node(node_ident):
+ """Get the RPC node from the node uuid or logical name.
+
+ :param node_ident: the UUID or logical name of a node.
+
+ :returns: The RPC Node.
+ :raises: InvalidUuidOrName if the name or uuid provided is not valid.
+ :raises: NodeNotFound if the node is not found.
+ """
+ # Check to see if the node_ident is a valid UUID. If it is, treat it
+ # as a UUID.
+ if uuidutils.is_uuid_like(node_ident):
+ return objects.Node.get_by_uuid(pecan.request.context, node_ident)
+
+ # We can refer to nodes by their name, if the client supports it
+ if allow_node_logical_names():
+ if utils.is_hostname_safe(node_ident):
+ return objects.Node.get_by_name(pecan.request.context, node_ident)
+ raise exception.InvalidUuidOrName(name=node_ident)
+
+ # Ensure we raise the same exception as we did for the Juno release
+ raise exception.NodeNotFound(node=node_ident)
+
+
+def is_valid_node_name(name):
+ """Determine if the provided name is a valid node name.
+
+ Check to see that the provided node name is valid, and isn't a UUID.
+
+ :param: name: the node name to check.
+ :returns: True if the name is valid, False otherwise.
+ """
+ return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))
diff --git a/iotronic/api/controllers/v1/board.py b/iotronic/api/controllers/v1/board.py
new file mode 100644
index 0000000..2a733ba
--- /dev/null
+++ b/iotronic/api/controllers/v1/board.py
@@ -0,0 +1,238 @@
+from pecan import rest
+from iotronic.api import expose
+from wsme import types as wtypes
+from iotronic import objects
+from iotronic.api.controllers.v1 import types
+from iotronic.api.controllers.v1 import collection
+from iotronic.api.controllers.v1 import utils as api_utils
+from iotronic.api.controllers import base
+from oslo_utils import uuidutils
+import wsme
+import pecan
+from pecan import rest
+
+
+class Board(base.APIBase):
+ """API representation of a board.
+ """
+
+ uuid = types.uuid
+ name = wsme.wsattr(wtypes.text)
+ status = wsme.wsattr(wtypes.text)
+
+ @staticmethod
+ def _convert_with_links(board, url, expand=True, show_password=True):
+ '''
+ if not expand:
+ except_list = ['instance_uuid', 'maintenance', 'power_state',
+ 'provision_state', 'uuid', 'name']
+ board.unset_fields_except(except_list)
+ else:
+ if not show_password:
+ board.driver_info = ast.literal_eval(strutils.mask_password(
+ board.driver_info,
+ "******"))
+ board.ports = [link.Link.make_link('self', url, 'boards',
+ board.uuid + "/ports"),
+ link.Link.make_link('bookmark', url, 'boards',
+ board.uuid + "/ports",
+ bookmark=True)
+ ]
+
+ board.chassis_id = wtypes.Unset
+ '''
+ '''
+ board.links = [link.Link.make_link('self', url, 'boards',
+ board.uuid),
+ link.Link.make_link('bookmark', url, 'boards',
+ board.uuid, bookmark=True)
+ ]
+ '''
+ return board
+
+ @classmethod
+ def convert_with_links(cls, rpc_board, expand=True):
+ board = Board(**rpc_board.as_dict())
+ return cls._convert_with_links(board, pecan.request.host_url,
+ expand,
+ pecan.request.context.show_password)
+
+ def __init__(self, **kwargs):
+ self.fields = []
+ fields = list(objects.Board.fields)
+ for k in fields:
+ # Skip fields we do not expose.
+ if not hasattr(self, k):
+ continue
+ self.fields.append(k)
+ setattr(self, k, kwargs.get(k, wtypes.Unset))
+
+class BoardCollection(collection.Collection):
+ """API representation of a collection of boards."""
+
+ boards = [Board]
+ """A list containing boards objects"""
+
+ def __init__(self, **kwargs):
+ self._type = 'boards'
+
+ @staticmethod
+ def convert_with_links(boards, limit, url=None, expand=False, **kwargs):
+ collection = BoardCollection()
+ collection.boards = [Board.convert_with_links(n, expand) for n in boards]
+ collection.next = collection.get_next(limit, url=url, **kwargs)
+ return collection
+
+class BoardsController(rest.RestController):
+
+ invalid_sort_key_list = ['properties']
+
+ def _get_boards_collection(self, chassis_uuid, instance_uuid, associated,
+ maintenance, marker, limit, sort_key, sort_dir,
+ expand=False, resource_url=None):
+ '''
+ if self.from_chassis and not chassis_uuid:
+ raise exception.MissingParameterValue(
+ _("Chassis id not specified."))
+ '''
+ limit = api_utils.validate_limit(limit)
+ sort_dir = api_utils.validate_sort_dir(sort_dir)
+
+ marker_obj = None
+ if marker:
+ marker_obj = objects.Board.get_by_uuid(pecan.request.context,
+ marker)
+
+ if sort_key in self.invalid_sort_key_list:
+ raise exception.InvalidParameterValue(
+ _("The sort_key value %(key)s is an invalid field for "
+ "sorting") % {'key': sort_key})
+
+ if instance_uuid:
+ boards = self._get_boards_by_instance(instance_uuid)
+ else:
+ filters = {}
+ if chassis_uuid:
+ filters['chassis_uuid'] = chassis_uuid
+ if associated is not None:
+ filters['associated'] = associated
+ if maintenance is not None:
+ filters['maintenance'] = maintenance
+
+ boards = objects.Board.list(pecan.request.context, limit, marker_obj,
+ sort_key=sort_key, sort_dir=sort_dir,
+ filters=filters)
+
+ parameters = {'sort_key': sort_key, 'sort_dir': sort_dir}
+ if associated:
+ parameters['associated'] = associated
+ if maintenance:
+ parameters['maintenance'] = maintenance
+ return BoardCollection.convert_with_links(boards, limit,
+ url=resource_url,
+ expand=expand,
+ **parameters)
+
+ @expose.expose(BoardCollection, types.uuid, types.uuid, types.boolean,
+ types.boolean, types.uuid, int, wtypes.text, wtypes.text)
+ def get_all(self, chassis_uuid=None, instance_uuid=None, associated=None,
+ maintenance=None, marker=None, limit=None, sort_key='id',
+ sort_dir='asc'):
+ """Retrieve a list of boards.
+
+ :param chassis_uuid: Optional UUID of a chassis, to get only boards for
+ that chassis.
+ :param instance_uuid: Optional UUID of an instance, to find the board
+ associated with that instance.
+ :param associated: Optional boolean whether to return a list of
+ associated or unassociated boards. May be combined
+ with other parameters.
+ :param maintenance: Optional boolean value that indicates whether
+ to get boards in maintenance mode ("True"), or not
+ in maintenance mode ("False").
+ :param marker: pagination marker for large data sets.
+ :param limit: maximum number of resources to return in a single result.
+ :param sort_key: column to sort results by. Default: id.
+ :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
+ """
+ return self._get_boards_collection(chassis_uuid, instance_uuid,
+ associated, maintenance, marker,
+ limit, sort_key, sort_dir)
+
+
+
+ @expose.expose(Board,types.uuid_or_name)
+ def get(self,board_ident):
+ """Retrieve information about the given board.
+
+ :param node_ident: UUID or logical name of a board.
+ """
+ rpc_board = api_utils.get_rpc_board(board_ident)
+ board = Board(**rpc_board.as_dict())
+ return board
+
+ @expose.expose(None, types.uuid_or_name, status_code=204)
+ def delete(self, board_ident):
+ """Delete a board.
+
+ :param board_ident: UUID or logical name of a board.
+ """
+ rpc_board = api_utils.get_rpc_board(board_ident)
+
+ try:
+ topic = pecan.request.rpcapi.get_topic_for(rpc_board)
+ except exception.NoValidHost as e:
+ e.code = 400
+ raise e
+
+ pecan.request.rpcapi.destroy_board(pecan.request.context,
+ rpc_board.uuid, topic)
+
+ #@expose.expose(Board, body=Board, status_code=201)
+ #def post(self, Board):
+ @expose.expose(Board, status_code=201)
+ def post(self):
+ """Create a new Board.
+
+ :param Board: a Board within the request body.
+ """
+ '''
+ if not Board.uuid:
+ Board.uuid = uuidutils.generate_uuid()
+
+ try:
+ pecan.request.rpcapi.get_topic_for(Board)
+ except exception.NoValidHost as e:
+ e.code = 400
+ raise e
+
+ if Board.name:
+ if not api_utils.allow_Board_logical_names():
+ raise exception.NotAcceptable()
+ if not api_utils.is_valid_Board_name(Board.name):
+ msg = _("Cannot create Board with invalid name %(name)s")
+ raise wsme.exc.ClientSideError(msg % {'name': Board.name},
+ status_code=400)
+ '''
+ #new_Board = objects.Board(pecan.request.context,
+ # **Board.as_dict())
+
+ #new_Board = objects.Board(pecan.request.context,
+ # **Board.as_dict())
+ #rpc_board = api_utils.get_rpc_board('a9a86ab8-ad45-455e-86c3-d8f7d892ec9d')
+
+ """{'status': u'1', 'uuid': u'a9a86ab8-ad45-455e-86c3-d8f7d892ec9d',
+ 'created_at': datetime.datetime(2015, 1, 30, 16, 56, tzinfo=),
+ 'updated_at': None,
+ 'reservation': None, 'id': 106, 'name': u'provaaaa'}
+ """
+ b="{'status': '1', 'uuid': 'a9a86ab8-ad45-455e-86c3-d8f7d892ec9d', 'name': 'provaaaa'}"
+ board = Board(**b.as_dict())
+ board.uuid = uuidutils.generate_uuid()
+
+ new_Board = objects.Board(pecan.request.context,
+ **board.as_dict())
+ new_Board.create()
+ #pecan.response.location = link.build_url('Boards', new_Board.uuid)
+ return Board.convert_with_links(new_Board)
+
diff --git a/iotronic/api/controllers/v1/collection.py b/iotronic/api/controllers/v1/collection.py
new file mode 100644
index 0000000..d49337f
--- /dev/null
+++ b/iotronic/api/controllers/v1/collection.py
@@ -0,0 +1,48 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from wsme import types as wtypes
+
+from iotronic.api.controllers import base
+from iotronic.api.controllers import link
+
+
+class Collection(base.APIBase):
+
+ next = wtypes.text
+ """A link to retrieve the next subset of the collection"""
+
+ @property
+ def collection(self):
+ return getattr(self, self._type)
+
+ def has_next(self, limit):
+ """Return whether collection has more items."""
+ return len(self.collection) and len(self.collection) == limit
+
+ def get_next(self, limit, url=None, **kwargs):
+ """Return a link to the next subset of the collection."""
+ if not self.has_next(limit):
+ return wtypes.Unset
+
+ resource_url = url or self._type
+ q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
+ next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
+ 'args': q_args, 'limit': limit,
+ 'marker': self.collection[-1].uuid}
+
+ return link.Link.make_link('next', pecan.request.host_url,
+ resource_url, next_args).href
diff --git a/iotronic/api/controllers/v1/types.py b/iotronic/api/controllers/v1/types.py
new file mode 100644
index 0000000..f435c61
--- /dev/null
+++ b/iotronic/api/controllers/v1/types.py
@@ -0,0 +1,239 @@
+# coding: utf-8
+#
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+import six
+import wsme
+from wsme import types as wtypes
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+
+
+class MacAddressType(wtypes.UserType):
+ """A simple MAC address type."""
+
+ basetype = wtypes.text
+ name = 'macaddress'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ return utils.validate_and_normalize_mac(value)
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return MacAddressType.validate(value)
+
+
+class UuidOrNameType(wtypes.UserType):
+ """A simple UUID or logical name type."""
+
+ basetype = wtypes.text
+ name = 'uuid_or_name'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not (uuidutils.is_uuid_like(value)
+ or utils.is_hostname_safe(value)):
+ raise exception.InvalidUuidOrName(name=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return UuidOrNameType.validate(value)
+
+
+class NameType(wtypes.UserType):
+ """A simple logical name type."""
+
+ basetype = wtypes.text
+ name = 'name'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not utils.is_hostname_safe(value):
+ raise exception.InvalidName(name=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return NameType.validate(value)
+
+
+class UuidType(wtypes.UserType):
+ """A simple UUID type."""
+
+ basetype = wtypes.text
+ name = 'uuid'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ if not uuidutils.is_uuid_like(value):
+ raise exception.InvalidUUID(uuid=value)
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return UuidType.validate(value)
+
+
+class BooleanType(wtypes.UserType):
+ """A simple boolean type."""
+
+ basetype = wtypes.text
+ name = 'boolean'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ @staticmethod
+ def validate(value):
+ try:
+ return strutils.bool_from_string(value, strict=True)
+ except ValueError as e:
+ # raise Invalid to return 400 (BadRequest) in the API
+ raise exception.Invalid(e)
+
+ @staticmethod
+ def frombasetype(value):
+ if value is None:
+ return None
+ return BooleanType.validate(value)
+
+
+class JsonType(wtypes.UserType):
+ """A simple JSON type."""
+
+ basetype = wtypes.text
+ name = 'json'
+ # FIXME(lucasagomes): When used with wsexpose decorator WSME will try
+ # to get the name of the type by accessing it's __name__ attribute.
+ # Remove this __name__ attribute once it's fixed in WSME.
+ # https://bugs.launchpad.net/wsme/+bug/1265590
+ __name__ = name
+
+ def __str__(self):
+ # These are the json serializable native types
+ return ' | '.join(map(str, (wtypes.text, six.integer_types, float,
+ BooleanType, list, dict, None)))
+
+ @staticmethod
+ def validate(value):
+ try:
+ json.dumps(value)
+ except TypeError:
+ raise exception.Invalid(_('%s is not JSON serializable') % value)
+ else:
+ return value
+
+ @staticmethod
+ def frombasetype(value):
+ return JsonType.validate(value)
+
+
+macaddress = MacAddressType()
+uuid_or_name = UuidOrNameType()
+name = NameType()
+uuid = UuidType()
+boolean = BooleanType()
+# Can't call it 'json' because that's the name of the stdlib module
+jsontype = JsonType()
+
+
+class JsonPatchType(wtypes.Base):
+ """A complex type that represents a single json-patch operation."""
+
+ path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
+ mandatory=True)
+ op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
+ mandatory=True)
+ value = wsme.wsattr(jsontype, default=wtypes.Unset)
+
+ @staticmethod
+ def internal_attrs():
+ """Returns a list of internal attributes.
+
+ Internal attributes can't be added, replaced or removed. This
+ method may be overwritten by derived class.
+
+ """
+ return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
+
+ @staticmethod
+ def mandatory_attrs():
+ """Retruns a list of mandatory attributes.
+
+ Mandatory attributes can't be removed from the document. This
+ method should be overwritten by derived class.
+
+ """
+ return []
+
+ @staticmethod
+ def validate(patch):
+ _path = '/' + patch.path.split('/')[1]
+ if _path in patch.internal_attrs():
+ msg = _("'%s' is an internal attribute and can not be updated")
+ raise wsme.exc.ClientSideError(msg % patch.path)
+
+ if patch.path in patch.mandatory_attrs() and patch.op == 'remove':
+ msg = _("'%s' is a mandatory attribute and can not be removed")
+ raise wsme.exc.ClientSideError(msg % patch.path)
+
+ if patch.op != 'remove':
+ if patch.value is wsme.Unset:
+ msg = _("'add' and 'replace' operations needs value")
+ raise wsme.exc.ClientSideError(msg)
+
+ ret = {'path': patch.path, 'op': patch.op}
+ if patch.value is not wsme.Unset:
+ ret['value'] = patch.value
+ return ret
diff --git a/iotronic/api/controllers/v1/utils.py b/iotronic/api/controllers/v1/utils.py
new file mode 100644
index 0000000..4511747
--- /dev/null
+++ b/iotronic/api/controllers/v1/utils.py
@@ -0,0 +1,131 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import jsonpatch
+from oslo_config import cfg
+from oslo_utils import uuidutils
+import pecan
+import wsme
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+from iotronic import objects
+
+
+CONF = cfg.CONF
+
+
+JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException,
+ jsonpatch.JsonPointerException,
+ KeyError)
+
+
+def validate_limit(limit):
+ if limit is None:
+ return CONF.api.max_limit
+
+ if limit <= 0:
+ raise wsme.exc.ClientSideError(_("Limit must be positive"))
+
+ return min(CONF.api.max_limit, limit)
+
+
+def validate_sort_dir(sort_dir):
+ if sort_dir not in ['asc', 'desc']:
+ raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. "
+ "Acceptable values are "
+ "'asc' or 'desc'") % sort_dir)
+ return sort_dir
+
+
+def apply_jsonpatch(doc, patch):
+ for p in patch:
+ if p['op'] == 'add' and p['path'].count('/') == 1:
+ if p['path'].lstrip('/') not in doc:
+ msg = _('Adding a new attribute (%s) to the root of '
+ ' the resource is not allowed')
+ raise wsme.exc.ClientSideError(msg % p['path'])
+ return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch))
+
+
+def get_patch_value(patch, path):
+ for p in patch:
+ if p['path'] == path:
+ return p['value']
+
+
+def allow_node_logical_names():
+ # v1.5 added logical name aliases
+ return pecan.request.version.minor >= 5
+
+def get_rpc_node(node_ident):
+ """Get the RPC node from the node uuid or logical name.
+
+ :param node_ident: the UUID or logical name of a node.
+
+ :returns: The RPC Node.
+ :raises: InvalidUuidOrName if the name or uuid provided is not valid.
+ :raises: NodeNotFound if the node is not found.
+ """
+ # Check to see if the node_ident is a valid UUID. If it is, treat it
+ # as a UUID.
+ if uuidutils.is_uuid_like(node_ident):
+ return objects.Node.get_by_uuid(pecan.request.context, node_ident)
+
+ # We can refer to nodes by their name, if the client supports it
+ if allow_node_logical_names():
+ if utils.is_hostname_safe(node_ident):
+ return objects.Node.get_by_name(pecan.request.context, node_ident)
+ raise exception.InvalidUuidOrName(name=node_ident)
+
+ # Ensure we raise the same exception as we did for the Juno release
+ raise exception.NodeNotFound(node=node_ident)
+
+def is_valid_node_name(name):
+ """Determine if the provided name is a valid node name.
+
+ Check to see that the provided node name is valid, and isn't a UUID.
+
+ :param: name: the node name to check.
+ :returns: True if the name is valid, False otherwise.
+ """
+ return utils.is_hostname_safe(name) and (not uuidutils.is_uuid_like(name))
+
+
+#################### NEW
+
+def get_rpc_board(board_ident):
+ """Get the RPC board from the board uuid or logical name.
+
+ :param board_ident: the UUID or logical name of a board.
+
+ :returns: The RPC Board.
+ :raises: InvalidUuidOrName if the name or uuid provided is not valid.
+ :raises: BoardNotFound if the board is not found.
+ """
+ # Check to see if the board_ident is a valid UUID. If it is, treat it
+ # as a UUID.
+ if uuidutils.is_uuid_like(board_ident):
+ return objects.Board.get_by_uuid(pecan.request.context, board_ident)
+
+ # We can refer to boards by their name, if the client supports it
+ if allow_board_logical_names():
+ if utils.is_hostname_safe(board_ident):
+ return objects.Board.get_by_name(pecan.request.context, board_ident)
+ raise exception.InvalidUuidOrName(name=board_ident)
+
+ # Ensure we raise the same exception as we did for the Juno release
+ raise exception.BoardNotFound(board=board_ident)
diff --git a/iotronic/api/expose.py b/iotronic/api/expose.py
new file mode 100644
index 0000000..46d4649
--- /dev/null
+++ b/iotronic/api/expose.py
@@ -0,0 +1,24 @@
+#
+# Copyright 2015 Rackspace, Inc
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import wsmeext.pecan as wsme_pecan
+
+
+def expose(*args, **kwargs):
+ """Ensure that only JSON, and not XML, is supported."""
+ if 'rest_content_types' not in kwargs:
+ kwargs['rest_content_types'] = ('json',)
+ return wsme_pecan.wsexpose(*args, **kwargs)
diff --git a/iotronic/api/hooks.py b/iotronic/api/hooks.py
new file mode 100644
index 0000000..ef3a147
--- /dev/null
+++ b/iotronic/api/hooks.py
@@ -0,0 +1,159 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from pecan import hooks
+from webob import exc
+
+from iotronic.common import context
+from iotronic.common import policy
+
+
+from iotronic.conductor import rpcapi
+from iotronic.db import api as dbapi
+
+
+class ConfigHook(hooks.PecanHook):
+ """Attach the config object to the request so controllers can get to it."""
+
+ def before(self, state):
+ state.request.cfg = cfg.CONF
+
+
+class DBHook(hooks.PecanHook):
+ """Attach the dbapi object to the request so controllers can get to it."""
+
+ def before(self, state):
+
+ #state.request.dbapi = dbapi.get_instance()
+ pass
+
+
+class ContextHook(hooks.PecanHook):
+ """Configures a request context and attaches it to the request.
+
+ The following HTTP request headers are used:
+
+ X-User-Id or X-User:
+ Used for context.user_id.
+
+ X-Tenant-Id or X-Tenant:
+ Used for context.tenant.
+
+ X-Auth-Token:
+ Used for context.auth_token.
+
+ X-Roles:
+ Used for setting context.is_admin flag to either True or False.
+ The flag is set to True, if X-Roles contains either an administrator
+ or admin substring. Otherwise it is set to False.
+
+ """
+ def __init__(self, public_api_routes):
+ self.public_api_routes = public_api_routes
+ super(ContextHook, self).__init__()
+
+ def before(self, state):
+ headers = state.request.headers
+
+ # Do not pass any token with context for noauth mode
+ auth_token = (None if cfg.CONF.auth_strategy == 'noauth' else
+ headers.get('X-Auth-Token'))
+
+ creds = {
+ 'user': headers.get('X-User') or headers.get('X-User-Id'),
+ 'tenant': headers.get('X-Tenant') or headers.get('X-Tenant-Id'),
+ 'domain_id': headers.get('X-User-Domain-Id'),
+ 'domain_name': headers.get('X-User-Domain-Name'),
+ 'auth_token': auth_token,
+ 'roles': headers.get('X-Roles', '').split(','),
+ }
+
+ # NOTE(adam_g): We also check the previous 'admin' rule to ensure
+ # compat with default juno policy.json. This double check may be
+ # removed in L.
+ is_admin = (policy.enforce('admin_api', creds, creds) or
+ policy.enforce('admin', creds, creds))
+ is_public_api = state.request.environ.get('is_public_api', False)
+ show_password = policy.enforce('show_password', creds, creds)
+
+ state.request.context = context.RequestContext(
+ is_admin=is_admin,
+ is_public_api=is_public_api,
+ show_password=show_password,
+ **creds)
+
+
+class RPCHook(hooks.PecanHook):
+ """Attach the rpcapi object to the request so controllers can get to it."""
+
+ def before(self, state):
+ state.request.rpcapi = rpcapi.ConductorAPI()
+
+
+class TrustedCallHook(hooks.PecanHook):
+ """Verify that the user has admin rights.
+
+ Checks whether the API call is performed against a public
+ resource or the user has admin privileges in the appropriate
+ tenant, domain or other administrative unit.
+
+ """
+ def before(self, state):
+ ctx = state.request.context
+ if ctx.is_public_api:
+ return
+ policy.enforce('admin_api', ctx.to_dict(), ctx.to_dict(),
+ do_raise=True, exc=exc.HTTPForbidden)
+
+
+class NoExceptionTracebackHook(hooks.PecanHook):
+ """Workaround rpc.common: deserialize_remote_exception.
+
+ deserialize_remote_exception builds rpc exception traceback into error
+ message which is then sent to the client. Such behavior is a security
+ concern so this hook is aimed to cut-off traceback from the error message.
+
+ """
+ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because
+ # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator
+ # catches and handles all the errors, so 'on_error' dedicated for unhandled
+ # exceptions never fired.
+ def after(self, state):
+ # Omit empty body. Some errors may not have body at this level yet.
+ if not state.response.body:
+ return
+
+ # Do nothing if there is no error.
+ if 200 <= state.response.status_int < 400:
+ return
+
+ json_body = state.response.json
+ # Do not remove traceback when server in debug mode (except 'Server'
+ # errors when 'debuginfo' will be used for traces).
+ if cfg.CONF.debug and json_body.get('faultcode') != 'Server':
+ return
+
+ faultstring = json_body.get('faultstring')
+ traceback_marker = 'Traceback (most recent call last):'
+ if faultstring and traceback_marker in faultstring:
+ # Cut-off traceback.
+ faultstring = faultstring.split(traceback_marker, 1)[0]
+ # Remove trailing newlines and spaces if any.
+ json_body['faultstring'] = faultstring.rstrip()
+ # Replace the whole json. Cannot change original one beacause it's
+ # generated on the fly.
+ state.response.json = json_body
diff --git a/iotronic/api/middleware/__init__.py b/iotronic/api/middleware/__init__.py
new file mode 100644
index 0000000..022a5ab
--- /dev/null
+++ b/iotronic/api/middleware/__init__.py
@@ -0,0 +1,23 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from iotronic.api.middleware import auth_token
+from iotronic.api.middleware import parsable_error
+
+
+ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware
+AuthTokenMiddleware = auth_token.AuthTokenMiddleware
+
+__all__ = (ParsableErrorMiddleware,
+ AuthTokenMiddleware)
diff --git a/iotronic/api/middleware/auth_token.py b/iotronic/api/middleware/auth_token.py
new file mode 100644
index 0000000..5cb3934
--- /dev/null
+++ b/iotronic/api/middleware/auth_token.py
@@ -0,0 +1,62 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from keystonemiddleware import auth_token
+from oslo_log import log
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+
+LOG = log.getLogger(__name__)
+
+
+class AuthTokenMiddleware(auth_token.AuthProtocol):
+ """A wrapper on Keystone auth_token middleware.
+
+ Does not perform verification of authentication tokens
+ for public routes in the API.
+
+ """
+ def __init__(self, app, conf, public_api_routes=[]):
+ # TODO(mrda): Remove .xml and ensure that doesn't result in a
+ # 401 Authentication Required instead of 404 Not Found
+ route_pattern_tpl = '%s(\.json|\.xml)?$'
+
+ try:
+ self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl)
+ for route_tpl in public_api_routes]
+ except re.error as e:
+ msg = _('Cannot compile public API routes: %s') % e
+
+ LOG.error(msg)
+ raise exception.ConfigInvalid(error_msg=msg)
+
+ super(AuthTokenMiddleware, self).__init__(app, conf)
+
+ def __call__(self, env, start_response):
+ path = utils.safe_rstrip(env.get('PATH_INFO'), '/')
+
+ # The information whether the API call is being performed against the
+ # public API is required for some other components. Saving it to the
+ # WSGI environment is reasonable thereby.
+ env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path),
+ self.public_api_routes))
+
+ if env['is_public_api']:
+ return self._app(env, start_response)
+
+ return super(AuthTokenMiddleware, self).__call__(env, start_response)
diff --git a/iotronic/api/middleware/parsable_error.py b/iotronic/api/middleware/parsable_error.py
new file mode 100644
index 0000000..ae898a9
--- /dev/null
+++ b/iotronic/api/middleware/parsable_error.py
@@ -0,0 +1,94 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2012 New Dream Network, LLC (DreamHost)
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Middleware to replace the plain text message body of an error
+response with one formatted so the client can parse it.
+
+Based on pecan.middleware.errordocument
+"""
+
+import json
+from xml import etree as et
+
+from oslo_log import log
+import six
+import webob
+
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LE
+
+LOG = log.getLogger(__name__)
+
+
+class ParsableErrorMiddleware(object):
+ """Replace error body with something the client can parse."""
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ # Request for this state, modified by replace_start_response()
+ # and used when an error is being reported.
+ state = {}
+
+ def replacement_start_response(status, headers, exc_info=None):
+ """Overrides the default response to make errors parsable."""
+ try:
+ status_code = int(status.split(' ')[0])
+ state['status_code'] = status_code
+ except (ValueError, TypeError): # pragma: nocover
+ raise Exception(_(
+ 'ErrorDocumentMiddleware received an invalid '
+ 'status %s') % status)
+ else:
+ if (state['status_code'] // 100) not in (2, 3):
+ # Remove some headers so we can replace them later
+ # when we have the full error message and can
+ # compute the length.
+ headers = [(h, v)
+ for (h, v) in headers
+ if h not in ('Content-Length', 'Content-Type')
+ ]
+ # Save the headers in case we need to modify them.
+ state['headers'] = headers
+ return start_response(status, headers, exc_info)
+
+ app_iter = self.app(environ, replacement_start_response)
+ if (state['status_code'] // 100) not in (2, 3):
+ req = webob.Request(environ)
+ if (req.accept.best_match(['application/json', 'application/xml'])
+ == 'application/xml'):
+ try:
+ # simple check xml is valid
+ body = [et.ElementTree.tostring(
+ et.ElementTree.fromstring(''
+ + '\n'.join(app_iter)
+ + ''))]
+ except et.ElementTree.ParseError as err:
+ LOG.error(_LE('Error parsing HTTP response: %s'), err)
+ body = ['%s' % state['status_code']
+ + '']
+ state['headers'].append(('Content-Type', 'application/xml'))
+ else:
+ if six.PY3:
+ app_iter = [i.decode('utf-8') for i in app_iter]
+ body = [json.dumps({'error_message': '\n'.join(app_iter)})]
+ if six.PY3:
+ body = [item.encode('utf-8') for item in body]
+ state['headers'].append(('Content-Type', 'application/json'))
+ state['headers'].append(('Content-Length', str(len(body[0]))))
+ else:
+ body = app_iter
+ return body
diff --git a/iotronic/common/__init__.py b/iotronic/common/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/common/boot_devices.py b/iotronic/common/boot_devices.py
new file mode 100644
index 0000000..41d266b
--- /dev/null
+++ b/iotronic/common/boot_devices.py
@@ -0,0 +1,42 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+Mapping of boot devices used when requesting the system to boot
+from an alternate device.
+
+The options presented were based on the IPMItool chassis
+bootdev command. You can find the documentation at:
+http://linux.die.net/man/1/ipmitool
+
+NOTE: This module does not include all the options from ipmitool because
+they don't make sense in the limited context of Iotronic right now.
+"""
+
+PXE = 'pxe'
+"Boot from PXE boot"
+
+DISK = 'disk'
+"Boot from default Hard-drive"
+
+CDROM = 'cdrom'
+"Boot from CD/DVD"
+
+BIOS = 'bios'
+"Boot into BIOS setup"
+
+SAFE = 'safe'
+"Boot from default Hard-drive, request Safe Mode"
diff --git a/iotronic/common/config.py b/iotronic/common/config.py
new file mode 100644
index 0000000..38d6e5c
--- /dev/null
+++ b/iotronic/common/config.py
@@ -0,0 +1,31 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from iotronic.common import rpc
+from iotronic import version
+
+
+def parse_args(argv, default_config_files=None):
+ rpc.set_defaults(control_exchange='iotronic')
+ cfg.CONF(argv[1:],
+ project='iotronic',
+ version=version.version_info.release_string(),
+ #version='2015.7',
+ default_config_files=default_config_files)
+ rpc.init(cfg.CONF)
diff --git a/iotronic/common/config_generator/__init__.py b/iotronic/common/config_generator/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/common/config_generator/generator.py b/iotronic/common/config_generator/generator.py
new file mode 100644
index 0000000..ccf073d
--- /dev/null
+++ b/iotronic/common/config_generator/generator.py
@@ -0,0 +1,333 @@
+# Copyright 2012 SINA Corporation
+# Copyright 2014 Cisco Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Extracts OpenStack config option info from module(s)."""
+
+# NOTE(GheRivero): Copied from oslo_incubator before getting removed in
+# Change-Id: If15b77d31a8c615aad8fca30f6dd9928da2d08bb
+
+from __future__ import print_function
+
+import argparse
+import imp
+import os
+import re
+import socket
+import sys
+import textwrap
+
+from oslo_config import cfg
+import oslo_i18n
+from oslo_utils import importutils
+import six
+import stevedore.named
+
+
+oslo_i18n.install('iotronic')
+
+STROPT = "StrOpt"
+BOOLOPT = "BoolOpt"
+INTOPT = "IntOpt"
+FLOATOPT = "FloatOpt"
+LISTOPT = "ListOpt"
+DICTOPT = "DictOpt"
+MULTISTROPT = "MultiStrOpt"
+
+OPT_TYPES = {
+ STROPT: 'string value',
+ BOOLOPT: 'boolean value',
+ INTOPT: 'integer value',
+ FLOATOPT: 'floating point value',
+ LISTOPT: 'list value',
+ DICTOPT: 'dict value',
+ MULTISTROPT: 'multi valued',
+}
+
+OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
+ FLOATOPT, LISTOPT, DICTOPT,
+ MULTISTROPT]))
+
+PY_EXT = ".py"
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ "../../../../"))
+WORDWRAP_WIDTH = 60
+
+
+def raise_extension_exception(extmanager, ep, err):
+ raise
+
+
+def generate(argv):
+ parser = argparse.ArgumentParser(
+ description='generate sample configuration file',
+ )
+ parser.add_argument('-m', dest='modules', action='append')
+ parser.add_argument('-l', dest='libraries', action='append')
+ parser.add_argument('srcfiles', nargs='*')
+ parsed_args = parser.parse_args(argv)
+
+ mods_by_pkg = dict()
+ for filepath in parsed_args.srcfiles:
+ pkg_name = filepath.split(os.sep)[1]
+ mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
+ os.path.basename(filepath).split('.')[0]])
+ mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
+ # NOTE(lzyeval): place top level modules before packages
+ pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
+ ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
+ pkg_names.extend(ext_names)
+
+ # opts_by_group is a mapping of group name to an options list
+ # The options list is a list of (module, options) tuples
+ opts_by_group = {'DEFAULT': []}
+
+ if parsed_args.modules:
+ for module_name in parsed_args.modules:
+ module = _import_module(module_name)
+ if module:
+ for group, opts in _list_opts(module):
+ opts_by_group.setdefault(group, []).append((module_name,
+ opts))
+
+ # Look for entry points defined in libraries (or applications) for
+ # option discovery, and include their return values in the output.
+ #
+ # Each entry point should be a function returning an iterable
+ # of pairs with the group name (or None for the default group)
+ # and the list of Opt instances for that group.
+ if parsed_args.libraries:
+ loader = stevedore.named.NamedExtensionManager(
+ 'oslo.config.opts',
+ names=list(set(parsed_args.libraries)),
+ invoke_on_load=False,
+ on_load_failure_callback=raise_extension_exception
+ )
+ for ext in loader:
+ for group, opts in ext.plugin():
+ opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
+ opt_list.append((ext.name, opts))
+
+ for pkg_name in pkg_names:
+ mods = mods_by_pkg.get(pkg_name)
+ mods.sort()
+ for mod_str in mods:
+ if mod_str.endswith('.__init__'):
+ mod_str = mod_str[:mod_str.rfind(".")]
+
+ mod_obj = _import_module(mod_str)
+ if not mod_obj:
+ raise RuntimeError("Unable to import module %s" % mod_str)
+
+ for group, opts in _list_opts(mod_obj):
+ opts_by_group.setdefault(group, []).append((mod_str, opts))
+
+ print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
+ for group in sorted(opts_by_group.keys()):
+ print_group_opts(group, opts_by_group[group])
+
+
+def _import_module(mod_str):
+ try:
+ if mod_str.startswith('bin.'):
+ imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
+ return sys.modules[mod_str[4:]]
+ else:
+ return importutils.import_module(mod_str)
+ except Exception as e:
+ sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
+ return None
+
+
+def _is_in_group(opt, group):
+ """Check if opt is in group."""
+ for value in group._opts.values():
+ # NOTE(llu): Temporary workaround for bug #1262148, wait until
+ # newly released oslo.config support '==' operator.
+ if not(value['opt'] != opt):
+ return True
+ return False
+
+
+def _guess_groups(opt):
+ # is it in the DEFAULT group?
+ if _is_in_group(opt, cfg.CONF):
+ return 'DEFAULT'
+
+ # what other groups is it in?
+ for value in cfg.CONF.values():
+ if isinstance(value, cfg.CONF.GroupAttr):
+ if _is_in_group(opt, value._group):
+ return value._group.name
+
+ raise RuntimeError(
+ "Unable to find group for option %s, "
+ "maybe it's defined twice in the same group?"
+ % opt.name
+ )
+
+
+def _list_opts(obj):
+ def is_opt(o):
+ return (isinstance(o, cfg.Opt) and
+ not isinstance(o, cfg.SubCommandOpt))
+
+ opts = list()
+
+ if 'list_opts' in dir(obj):
+ group_opts = getattr(obj, 'list_opts')()
+ # NOTE(GheRivero): Options without a defined group,
+ # must be registered to the DEFAULT section
+ fixed_list = []
+ for section, opts in group_opts:
+ if not section:
+ section = 'DEFAULT'
+ fixed_list.append((section, opts))
+ return fixed_list
+
+ for attr_str in dir(obj):
+ attr_obj = getattr(obj, attr_str)
+ if is_opt(attr_obj):
+ opts.append(attr_obj)
+ elif (isinstance(attr_obj, list) and
+ all(map(lambda x: is_opt(x), attr_obj))):
+ opts.extend(attr_obj)
+
+ ret = {}
+ for opt in opts:
+ ret.setdefault(_guess_groups(opt), []).append(opt)
+ return ret.items()
+
+
+def print_group_opts(group, opts_by_module):
+ print("[%s]" % group)
+ print('')
+ for mod, opts in opts_by_module:
+ print('#')
+ print('# Options defined in %s' % mod)
+ print('#')
+ print('')
+ for opt in opts:
+ _print_opt(opt)
+ print('')
+
+
+def _get_my_ip():
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.error:
+ return None
+
+
+def _sanitize_default(name, value):
+ """Set up a reasonably sensible default for pybasedir, my_ip and host."""
+ hostname = socket.gethostname()
+ fqdn = socket.getfqdn()
+ if value.startswith(sys.prefix):
+ # NOTE(jd) Don't use os.path.join, because it is likely to think the
+ # second part is an absolute pathname and therefore drop the first
+ # part.
+ value = os.path.normpath("/usr/" + value[len(sys.prefix):])
+ elif value.startswith(BASEDIR):
+ return value.replace(BASEDIR, '/usr/lib/python/site-packages')
+ elif BASEDIR in value:
+ return value.replace(BASEDIR, '')
+ elif value == _get_my_ip():
+ return '10.0.0.1'
+ elif value in (hostname, fqdn):
+ if 'host' in name:
+ return 'iotronic'
+ elif value.endswith(hostname):
+ return value.replace(hostname, 'iotronic')
+ elif value.endswith(fqdn):
+ return value.replace(fqdn, 'iotronic')
+ elif value.strip() != value:
+ return '"%s"' % value
+ return value
+
+
+def _print_opt(opt):
+ opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
+ if not opt_help:
+ sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
+ opt_help = ""
+ try:
+ opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
+ except (ValueError, AttributeError) as err:
+ sys.stderr.write("%s\n" % str(err))
+ sys.exit(1)
+ opt_help = u'%s (%s)' % (opt_help,
+ OPT_TYPES[opt_type])
+ print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
+ if opt.deprecated_opts:
+ for deprecated_opt in opt.deprecated_opts:
+ if deprecated_opt.name:
+ deprecated_group = (deprecated_opt.group if
+ deprecated_opt.group else "DEFAULT")
+ print('# Deprecated group/name - [%s]/%s' %
+ (deprecated_group,
+ deprecated_opt.name))
+ try:
+ if opt_default is None:
+ print('#%s=' % opt_name)
+ else:
+ _print_type(opt_type, opt_name, opt_default)
+ print('')
+ except Exception:
+ sys.stderr.write('Error in option "%s"\n' % opt_name)
+ sys.exit(1)
+
+
+def _print_type(opt_type, opt_name, opt_default):
+ if opt_type == STROPT:
+ assert(isinstance(opt_default, six.string_types))
+ print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
+ opt_default)))
+ elif opt_type == BOOLOPT:
+ assert(isinstance(opt_default, bool))
+ print('#%s=%s' % (opt_name, str(opt_default).lower()))
+ elif opt_type == INTOPT:
+ assert(isinstance(opt_default, int) and
+ not isinstance(opt_default, bool))
+ print('#%s=%s' % (opt_name, opt_default))
+ elif opt_type == FLOATOPT:
+ assert(isinstance(opt_default, float))
+ print('#%s=%s' % (opt_name, opt_default))
+ elif opt_type == LISTOPT:
+ assert(isinstance(opt_default, list))
+ print('#%s=%s' % (opt_name, ','.join(opt_default)))
+ elif opt_type == DICTOPT:
+ assert(isinstance(opt_default, dict))
+ opt_default_strlist = [str(key) + ':' + str(value)
+ for (key, value) in opt_default.items()]
+ print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
+ elif opt_type == MULTISTROPT:
+ assert(isinstance(opt_default, list))
+ if not opt_default:
+ opt_default = ['']
+ for default in opt_default:
+ print('#%s=%s' % (opt_name, default))
+
+
+def main():
+ generate(sys.argv[1:])
+
+if __name__ == '__main__':
+ main()
diff --git a/iotronic/common/context.py b/iotronic/common/context.py
new file mode 100644
index 0000000..aaeffb3
--- /dev/null
+++ b/iotronic/common/context.py
@@ -0,0 +1,67 @@
+# -*- encoding: utf-8 -*-
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_context import context
+
+
+class RequestContext(context.RequestContext):
+ """Extends security contexts from the OpenStack common library."""
+
+ def __init__(self, auth_token=None, domain_id=None, domain_name=None,
+ user=None, tenant=None, is_admin=False, is_public_api=False,
+ read_only=False, show_deleted=False, request_id=None,
+ roles=None, show_password=True):
+ """Stores several additional request parameters:
+
+ :param domain_id: The ID of the domain.
+ :param domain_name: The name of the domain.
+ :param is_public_api: Specifies whether the request should be processed
+ without authentication.
+ :param roles: List of user's roles if any.
+ :param show_password: Specifies whether passwords should be masked
+ before sending back to API call.
+
+ """
+ self.is_public_api = is_public_api
+ self.domain_id = domain_id
+ self.domain_name = domain_name
+ self.roles = roles or []
+ self.show_password = show_password
+
+ super(RequestContext, self).__init__(auth_token=auth_token,
+ user=user, tenant=tenant,
+ is_admin=is_admin,
+ read_only=read_only,
+ show_deleted=show_deleted,
+ request_id=request_id)
+
+ def to_dict(self):
+ return {'auth_token': self.auth_token,
+ 'user': self.user,
+ 'tenant': self.tenant,
+ 'is_admin': self.is_admin,
+ 'read_only': self.read_only,
+ 'show_deleted': self.show_deleted,
+ 'request_id': self.request_id,
+ 'domain_id': self.domain_id,
+ 'roles': self.roles,
+ 'domain_name': self.domain_name,
+ 'show_password': self.show_password,
+ 'is_public_api': self.is_public_api}
+
+ @classmethod
+ def from_dict(cls, values):
+ values.pop('user', None)
+ values.pop('tenant', None)
+ return cls(**values)
diff --git a/iotronic/common/dhcp_factory.py b/iotronic/common/dhcp_factory.py
new file mode 100644
index 0000000..1a2236e
--- /dev/null
+++ b/iotronic/common/dhcp_factory.py
@@ -0,0 +1,100 @@
+# Copyright 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+import stevedore
+
+from iotronic.common import exception
+
+
+dhcp_provider_opts = [
+ cfg.StrOpt('dhcp_provider',
+ default='neutron',
+ help='DHCP provider to use. "neutron" uses Neutron, and '
+ '"none" uses a no-op provider.'
+ ),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(dhcp_provider_opts, group='dhcp')
+
+_dhcp_provider = None
+
+EM_SEMAPHORE = 'dhcp_provider'
+
+
+class DHCPFactory(object):
+
+ # NOTE(lucasagomes): Instantiate a stevedore.driver.DriverManager
+ # only once, the first time DHCPFactory.__init__
+ # is called.
+ _dhcp_provider = None
+
+ def __init__(self, **kwargs):
+ if not DHCPFactory._dhcp_provider:
+ DHCPFactory._set_dhcp_provider(**kwargs)
+
+ # NOTE(lucasagomes): Use lockutils to avoid a potential race in eventlet
+ # that might try to create two dhcp factories.
+ @classmethod
+ @lockutils.synchronized(EM_SEMAPHORE, 'iotronic-')
+ def _set_dhcp_provider(cls, **kwargs):
+ """Initialize the dhcp provider
+
+ :raises: DHCPLoadError if the dhcp_provider cannot be loaded.
+ """
+
+ # NOTE(lucasagomes): In case multiple greenthreads queue up on
+ # this lock before _dhcp_provider is initialized,
+ # prevent creation of multiple DriverManager.
+ if cls._dhcp_provider:
+ return
+
+ dhcp_provider_name = CONF.dhcp.dhcp_provider
+ try:
+ _extension_manager = stevedore.driver.DriverManager(
+ 'iotronic.dhcp',
+ dhcp_provider_name,
+ invoke_kwds=kwargs,
+ invoke_on_load=True)
+ except Exception as e:
+ raise exception.DHCPLoadError(
+ dhcp_provider_name=dhcp_provider_name, reason=e
+ )
+
+ cls._dhcp_provider = _extension_manager.driver
+
+ def update_dhcp(self, task, dhcp_opts, ports=None):
+ """Send or update the DHCP BOOT options for this node.
+
+ :param task: A TaskManager instance.
+ :param dhcp_opts: this will be a list of dicts, e.g.
+
+ ::
+
+ [{'opt_name': 'bootfile-name',
+ 'opt_value': 'pxelinux.0'},
+ {'opt_name': 'server-ip-address',
+ 'opt_value': '123.123.123.456'},
+ {'opt_name': 'tftp-server',
+ 'opt_value': '123.123.123.123'}]
+ :param ports: a list of Neutron port dicts to update DHCP options on.
+ If None, will get the list of ports from the Iotronic port objects.
+ """
+ self.provider.update_dhcp_opts(task, dhcp_opts, ports)
+
+ @property
+ def provider(self):
+ return self._dhcp_provider
diff --git a/iotronic/common/disk_partitioner.py b/iotronic/common/disk_partitioner.py
new file mode 100644
index 0000000..d255a4b
--- /dev/null
+++ b/iotronic/common/disk_partitioner.py
@@ -0,0 +1,211 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LW
+from iotronic.common import utils
+from iotronic.openstack.common import loopingcall
+
+opts = [
+ cfg.IntOpt('check_device_interval',
+ default=1,
+ help='After Iotronic has completed creating the partition table, '
+ 'it continues to check for activity on the attached iSCSI '
+ 'device status at this interval prior to copying the image'
+ ' to the node, in seconds'),
+ cfg.IntOpt('check_device_max_retries',
+ default=20,
+ help='The maximum number of times to check that the device is '
+ 'not accessed by another process. If the device is still '
+ 'busy after that, the disk partitioning will be treated as'
+ ' having failed.'),
+]
+
+CONF = cfg.CONF
+opt_group = cfg.OptGroup(name='disk_partitioner',
+ title='Options for the disk partitioner')
+CONF.register_group(opt_group)
+CONF.register_opts(opts, opt_group)
+
+LOG = logging.getLogger(__name__)
+
+
+class DiskPartitioner(object):
+
+ def __init__(self, device, disk_label='msdos', alignment='optimal'):
+ """A convenient wrapper around the parted tool.
+
+ :param device: The device path.
+ :param disk_label: The type of the partition table. Valid types are:
+ "bsd", "dvh", "gpt", "loop", "mac", "msdos",
+ "pc98", or "sun".
+ :param alignment: Set alignment for newly created partitions.
+ Valid types are: none, cylinder, minimal and
+ optimal.
+
+ """
+ self._device = device
+ self._disk_label = disk_label
+ self._alignment = alignment
+ self._partitions = []
+ self._fuser_pids_re = re.compile(r'((\d)+\s*)+')
+
+ def _exec(self, *args):
+ # NOTE(lucasagomes): utils.execute() is already a wrapper on top
+ # of processutils.execute() which raises specific
+ # exceptions. It also logs any failure so we don't
+ # need to log it again here.
+ utils.execute('parted', '-a', self._alignment, '-s', self._device,
+ '--', 'unit', 'MiB', *args, check_exit_code=[0],
+ run_as_root=True)
+
+ def add_partition(self, size, part_type='primary', fs_type='',
+ bootable=False):
+ """Add a partition.
+
+ :param size: The size of the partition in MiB.
+ :param part_type: The type of the partition. Valid values are:
+ primary, logical, or extended.
+ :param fs_type: The filesystem type. Valid types are: ext2, fat32,
+ fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
+ If blank (''), it will create a Linux native
+ partition (83).
+ :param bootable: Boolean value; whether the partition is bootable
+ or not.
+ :returns: The partition number.
+
+ """
+ self._partitions.append({'size': size,
+ 'type': part_type,
+ 'fs_type': fs_type,
+ 'bootable': bootable})
+ return len(self._partitions)
+
+ def get_partitions(self):
+ """Get the partitioning layout.
+
+ :returns: An iterator with the partition number and the
+ partition layout.
+
+ """
+ return enumerate(self._partitions, 1)
+
+ def _wait_for_disk_to_become_available(self, retries, max_retries, pids,
+ stderr):
+ retries[0] += 1
+ if retries[0] > max_retries:
+ raise loopingcall.LoopingCallDone()
+
+ try:
+ # NOTE(ifarkas): fuser returns a non-zero return code if none of
+ # the specified files is accessed
+ out, err = utils.execute('fuser', self._device,
+ check_exit_code=[0, 1], run_as_root=True)
+
+ if not out and not err:
+ raise loopingcall.LoopingCallDone()
+ else:
+ if err:
+ stderr[0] = err
+ if out:
+ pids_match = re.search(self._fuser_pids_re, out)
+ pids[0] = pids_match.group()
+ except processutils.ProcessExecutionError as exc:
+ LOG.warning(_LW('Failed to check the device %(device)s with fuser:'
+ ' %(err)s'), {'device': self._device, 'err': exc})
+
+ def commit(self):
+ """Write to the disk."""
+ LOG.debug("Committing partitions to disk.")
+ cmd_args = ['mklabel', self._disk_label]
+ # NOTE(lucasagomes): Lead in with 1MiB to allow room for the
+ # partition table itself.
+ start = 1
+ for num, part in self.get_partitions():
+ end = start + part['size']
+ cmd_args.extend(['mkpart', part['type'], part['fs_type'],
+ str(start), str(end)])
+ if part['bootable']:
+ cmd_args.extend(['set', str(num), 'boot', 'on'])
+ start = end
+
+ self._exec(*cmd_args)
+
+ retries = [0]
+ pids = ['']
+ fuser_err = ['']
+ interval = CONF.disk_partitioner.check_device_interval
+ max_retries = CONF.disk_partitioner.check_device_max_retries
+
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._wait_for_disk_to_become_available,
+ retries, max_retries, pids, fuser_err)
+ timer.start(interval=interval).wait()
+
+ if retries[0] > max_retries:
+ if pids[0]:
+ raise exception.InstanceDeployFailure(
+ _('Disk partitioning failed on device %(device)s. '
+ 'Processes with the following PIDs are holding it: '
+ '%(pids)s. Time out waiting for completion.')
+ % {'device': self._device, 'pids': pids[0]})
+ else:
+ raise exception.InstanceDeployFailure(
+ _('Disk partitioning failed on device %(device)s. Fuser '
+ 'exited with "%(fuser_err)s". Time out waiting for '
+ 'completion.')
+ % {'device': self._device, 'fuser_err': fuser_err[0]})
+
+
+_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
+ "([\d\.]+)MiB:([\d\.]+)MiB:(\w*)::(\w*)")
+
+
+def list_partitions(device):
+ """Get partitions information from given device.
+
+ :param device: The device path.
+ :returns: list of dictionaries (one per partition) with keys:
+ number, start, end, size (in MiB), filesystem, flags
+ """
+ output = utils.execute(
+ 'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
+ use_standard_locale=True, run_as_root=True)[0]
+ if isinstance(output, bytes):
+ output = output.decode("utf-8")
+ lines = [line for line in output.split('\n') if line.strip()][2:]
+ # Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
+ fields = ('number', 'start', 'end', 'size', 'filesystem', 'flags')
+ result = []
+ for line in lines:
+ match = _PARTED_PRINT_RE.match(line)
+ if match is None:
+ LOG.warn(_LW("Partition information from parted for device "
+ "%(device)s does not match "
+ "expected format: %(line)s"),
+ dict(device=device, line=line))
+ continue
+ # Cast int fields to ints (some are floats and we round them down)
+ groups = [int(float(x)) if i < 4 else x
+ for i, x in enumerate(match.groups())]
+ result.append(dict(zip(fields, groups)))
+ return result
diff --git a/iotronic/common/driver_factory.py b/iotronic/common/driver_factory.py
new file mode 100644
index 0000000..1e3c53c
--- /dev/null
+++ b/iotronic/common/driver_factory.py
@@ -0,0 +1,144 @@
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_log import log
+from stevedore import dispatch
+
+from iotronic.common import exception
+from iotronic.common.i18n import _LI
+
+
+LOG = log.getLogger(__name__)
+
+driver_opts = [
+ cfg.ListOpt('enabled_drivers',
+ default=['pxe_ipmitool'],
+ help='Specify the list of drivers to load during service '
+ 'initialization. Missing drivers, or drivers which '
+ 'fail to initialize, will prevent the conductor '
+ 'service from starting. The option default is a '
+ 'recommended set of production-oriented drivers. A '
+ 'complete list of drivers present on your system may '
+ 'be found by enumerating the "iotronic.drivers" '
+ 'entrypoint. An example may be found in the '
+ 'developer documentation online.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(driver_opts)
+
+EM_SEMAPHORE = 'extension_manager'
+
+
+def get_driver(driver_name):
+ """Simple method to get a ref to an instance of a driver.
+
+ Driver loading is handled by the DriverFactory class. This method
+ conveniently wraps that class and returns the actual driver object.
+
+ :param driver_name: the name of the driver class to load
+ :returns: An instance of a class which implements
+ iotronic.drivers.base.BaseDriver
+ :raises: DriverNotFound if the requested driver_name could not be
+ found in the "iotronic.drivers" namespace.
+
+ """
+
+ try:
+ factory = DriverFactory()
+ return factory[driver_name].obj
+ except KeyError:
+ raise exception.DriverNotFound(driver_name=driver_name)
+
+
+def drivers():
+ """Get all drivers as a dict name -> driver object."""
+ factory = DriverFactory()
+ return {name: factory[name].obj for name in factory.names}
+
+
+class DriverFactory(object):
+ """Discover, load and manage the drivers available."""
+
+ # NOTE(deva): loading the _extension_manager as a class member will break
+ # stevedore when it loads a driver, because the driver will
+ # import this file (and thus instantiate another factory).
+ # Instead, we instantiate a NameDispatchExtensionManager only
+ # once, the first time DriverFactory.__init__ is called.
+ _extension_manager = None
+
+ def __init__(self):
+ if not DriverFactory._extension_manager:
+ DriverFactory._init_extension_manager()
+
+ def __getitem__(self, name):
+ return self._extension_manager[name]
+
+ # NOTE(deva): Use lockutils to avoid a potential race in eventlet
+ # that might try to create two driver factories.
+ @classmethod
+ @lockutils.synchronized(EM_SEMAPHORE, 'iotronic-')
+ def _init_extension_manager(cls):
+ # NOTE(deva): In case multiple greenthreads queue up on this lock
+ # before _extension_manager is initialized, prevent
+ # creation of multiple NameDispatchExtensionManagers.
+ if cls._extension_manager:
+ return
+
+ # NOTE(deva): Drivers raise "DriverLoadError" if they are unable to be
+ # loaded, eg. due to missing external dependencies.
+ # We capture that exception, and, only if it is for an
+ # enabled driver, raise it from here. If enabled driver
+ # raises other exception type, it is wrapped in
+ # "DriverLoadError", providing the name of the driver that
+ # caused it, and raised. If the exception is for a
+ # non-enabled driver, we suppress it.
+ def _catch_driver_not_found(mgr, ep, exc):
+ # NOTE(deva): stevedore loads plugins *before* evaluating
+ # _check_func, so we need to check here, too.
+ if ep.name in CONF.enabled_drivers:
+ if not isinstance(exc, exception.DriverLoadError):
+ raise exception.DriverLoadError(driver=ep.name, reason=exc)
+ raise exc
+
+ def _check_func(ext):
+ return ext.name in CONF.enabled_drivers
+
+ cls._extension_manager = (
+ dispatch.NameDispatchExtensionManager(
+ 'iotronic.drivers',
+ _check_func,
+ invoke_on_load=True,
+ on_load_failure_callback=_catch_driver_not_found))
+
+ # NOTE(deva): if we were unable to load any configured driver, perhaps
+ # because it is not present on the system, raise an error.
+ if (sorted(CONF.enabled_drivers) !=
+ sorted(cls._extension_manager.names())):
+ found = cls._extension_manager.names()
+ names = [n for n in CONF.enabled_drivers if n not in found]
+ # just in case more than one could not be found ...
+ names = ', '.join(names)
+ raise exception.DriverNotFound(driver_name=names)
+
+ LOG.info(_LI("Loaded the following drivers: %s"),
+ cls._extension_manager.names())
+
+ @property
+ def names(self):
+ """The list of driver names available."""
+ return self._extension_manager.names()
diff --git a/iotronic/common/exception.py b/iotronic/common/exception.py
new file mode 100644
index 0000000..e4f57b8
--- /dev/null
+++ b/iotronic/common/exception.py
@@ -0,0 +1,589 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Iotronic base exception handling.
+
+Includes decorator for re-raising Iotronic-type exceptions.
+
+SHOULD include dedicated exception logging.
+
+"""
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import six
+
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LE
+
+
+LOG = logging.getLogger(__name__)
+
+exc_log_opts = [
+ cfg.BoolOpt('fatal_exception_format_errors',
+ default=False,
+ help='Used if there is a formatting error when generating an '
+ 'exception message (a programming error). If True, '
+ 'raise an exception; if False, use the unformatted '
+ 'message.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(exc_log_opts)
+
+
+def _cleanse_dict(original):
+ """Strip all admin_password, new_pass, rescue_pass keys from a dict."""
+ return dict((k, v) for k, v in original.iteritems() if "_pass" not in k)
+
+
+class IotronicException(Exception):
+ """Base Iotronic Exception
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ """
+ message = _("An unknown exception occurred.")
+ code = 500
+ headers = {}
+ safe = False
+
+ def __init__(self, message=None, **kwargs):
+ self.kwargs = kwargs
+
+ if 'code' not in self.kwargs:
+ try:
+ self.kwargs['code'] = self.code
+ except AttributeError:
+ pass
+
+ if not message:
+ try:
+ message = self.message % kwargs
+
+ except Exception as e:
+ # kwargs doesn't match a variable in the message
+ # log the issue and the kwargs
+ LOG.exception(_LE('Exception in string format operation'))
+ for name, value in kwargs.items():
+ LOG.error("%s: %s" % (name, value))
+
+ if CONF.fatal_exception_format_errors:
+ raise e
+ else:
+ # at least get the core message out if something happened
+ message = self.message
+
+ super(IotronicException, self).__init__(message)
+
+ def __str__(self):
+ """Encode to utf-8 then wsme api can consume it as well."""
+ if not six.PY3:
+ return unicode(self.args[0]).encode('utf-8')
+
+ return self.args[0]
+
+ def format_message(self):
+ if self.__class__.__name__.endswith('_Remote'):
+ return self.args[0]
+ else:
+ return six.text_type(self)
+
+
+class NotAuthorized(IotronicException):
+ message = _("Not authorized.")
+ code = 403
+
+
+class OperationNotPermitted(NotAuthorized):
+ message = _("Operation not permitted.")
+
+
+class Invalid(IotronicException):
+ message = _("Unacceptable parameters.")
+ code = 400
+
+
+class Conflict(IotronicException):
+ message = _('Conflict.')
+ code = 409
+
+
+class TemporaryFailure(IotronicException):
+ message = _("Resource temporarily unavailable, please retry.")
+ code = 503
+
+
+class NotAcceptable(IotronicException):
+ # TODO(deva): We need to set response headers in the API for this exception
+ message = _("Request not acceptable.")
+ code = 406
+
+
+class InvalidState(Conflict):
+ message = _("Invalid resource state.")
+
+
+class NodeAlreadyExists(Conflict):
+ message = _("A node with UUID %(uuid)s already exists.")
+
+
+class MACAlreadyExists(Conflict):
+ message = _("A port with MAC address %(mac)s already exists.")
+
+
+class ChassisAlreadyExists(Conflict):
+ message = _("A chassis with UUID %(uuid)s already exists.")
+
+
+class PortAlreadyExists(Conflict):
+ message = _("A port with UUID %(uuid)s already exists.")
+
+
+class InstanceAssociated(Conflict):
+ message = _("Instance %(instance_uuid)s is already associated with a node,"
+ " it cannot be associated with this other node %(node)s")
+
+
+class DuplicateName(Conflict):
+ message = _("A node with name %(name)s already exists.")
+
+
+class InvalidUUID(Invalid):
+ message = _("Expected a uuid but received %(uuid)s.")
+
+
+class InvalidUuidOrName(Invalid):
+ message = _("Expected a logical name or uuid but received %(name)s.")
+
+
+class InvalidName(Invalid):
+ message = _("Expected a logical name but received %(name)s.")
+
+
+class InvalidIdentity(Invalid):
+ message = _("Expected an uuid or int but received %(identity)s.")
+
+
+class InvalidMAC(Invalid):
+ message = _("Expected a MAC address but received %(mac)s.")
+
+
+class InvalidStateRequested(Invalid):
+ message = _('The requested action "%(action)s" can not be performed '
+ 'on node "%(node)s" while it is in state "%(state)s".')
+
+
+class PatchError(Invalid):
+ message = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
+
+
+class InstanceDeployFailure(IotronicException):
+ message = _("Failed to deploy instance: %(reason)s")
+
+
+class ImageUnacceptable(IotronicException):
+ message = _("Image %(image_id)s is unacceptable: %(reason)s")
+
+
+class ImageConvertFailed(IotronicException):
+ message = _("Image %(image_id)s is unacceptable: %(reason)s")
+
+
+# Cannot be templated as the error syntax varies.
+# msg needs to be constructed when raised.
+class InvalidParameterValue(Invalid):
+ message = _("%(err)s")
+
+
+class MissingParameterValue(InvalidParameterValue):
+ message = _("%(err)s")
+
+
+class Duplicate(IotronicException):
+ message = _("Resource already exists.")
+
+
+class NotFound(IotronicException):
+ message = _("Resource could not be found.")
+ code = 404
+
+
+class DHCPLoadError(IotronicException):
+ message = _("Failed to load DHCP provider %(dhcp_provider_name)s, "
+ "reason: %(reason)s")
+
+
+class DriverNotFound(NotFound):
+ message = _("Could not find the following driver(s): %(driver_name)s.")
+
+
+class ImageNotFound(NotFound):
+ message = _("Image %(image_id)s could not be found.")
+
+
+class NoValidHost(NotFound):
+ message = _("No valid host was found. Reason: %(reason)s")
+
+
+class InstanceNotFound(NotFound):
+ message = _("Instance %(instance)s could not be found.")
+
+
+class NodeNotFound(NotFound):
+ message = _("Node %(node)s could not be found.")
+
+
+class NodeAssociated(InvalidState):
+ message = _("Node %(node)s is associated with instance %(instance)s.")
+
+
+class PortNotFound(NotFound):
+ message = _("Port %(port)s could not be found.")
+
+
+class FailedToUpdateDHCPOptOnPort(IotronicException):
+ message = _("Update DHCP options on port: %(port_id)s failed.")
+
+
+class FailedToGetIPAddressOnPort(IotronicException):
+ message = _("Retrieve IP address on port: %(port_id)s failed.")
+
+
+class InvalidIPv4Address(IotronicException):
+ message = _("Invalid IPv4 address %(ip_address)s.")
+
+
+class FailedToUpdateMacOnPort(IotronicException):
+ message = _("Update MAC address on port: %(port_id)s failed.")
+
+
+class ChassisNotFound(NotFound):
+ message = _("Chassis %(chassis)s could not be found.")
+
+
+class NoDriversLoaded(IotronicException):
+ message = _("Conductor %(conductor)s cannot be started "
+ "because no drivers were loaded.")
+
+
+class ConductorNotFound(NotFound):
+ message = _("Conductor %(conductor)s could not be found.")
+
+
+class ConductorAlreadyRegistered(IotronicException):
+ message = _("Conductor %(conductor)s already registered.")
+
+
+class PowerStateFailure(InvalidState):
+ message = _("Failed to set node power state to %(pstate)s.")
+
+
+class ExclusiveLockRequired(NotAuthorized):
+ message = _("An exclusive lock is required, "
+ "but the current context has a shared lock.")
+
+
+class NodeMaintenanceFailure(Invalid):
+ message = _("Failed to toggle maintenance-mode flag "
+ "for node %(node)s: %(reason)s")
+
+
+class NodeConsoleNotEnabled(Invalid):
+ message = _("Console access is not enabled on node %(node)s")
+
+
+class NodeInMaintenance(Invalid):
+ message = _("The %(op)s operation can't be performed on node "
+ "%(node)s because it's in maintenance mode.")
+
+
+class ChassisNotEmpty(Invalid):
+ message = _("Cannot complete the requested action because chassis "
+ "%(chassis)s contains nodes.")
+
+
+class IPMIFailure(IotronicException):
+ message = _("IPMI call failed: %(cmd)s.")
+
+
+class AMTConnectFailure(IotronicException):
+ message = _("Failed to connect to AMT service.")
+
+
+class AMTFailure(IotronicException):
+ message = _("AMT call failed: %(cmd)s.")
+
+
+class MSFTOCSClientApiException(IotronicException):
+ message = _("MSFT OCS call failed.")
+
+
+class SSHConnectFailed(IotronicException):
+ message = _("Failed to establish SSH connection to host %(host)s.")
+
+
+class SSHCommandFailed(IotronicException):
+ message = _("Failed to execute command via SSH: %(cmd)s.")
+
+
+class UnsupportedObjectError(IotronicException):
+ message = _('Unsupported object type %(objtype)s')
+
+
+class OrphanedObjectError(IotronicException):
+ message = _('Cannot call %(method)s on orphaned %(objtype)s object')
+
+
+class UnsupportedDriverExtension(Invalid):
+ message = _('Driver %(driver)s does not support %(extension)s '
+ '(disabled or not implemented).')
+
+
+class IncompatibleObjectVersion(IotronicException):
+ message = _('Version %(objver)s of %(objname)s is not supported')
+
+
+class GlanceConnectionFailed(IotronicException):
+ message = _("Connection to glance host %(host)s:%(port)s failed: "
+ "%(reason)s")
+
+
+class ImageNotAuthorized(NotAuthorized):
+ message = _("Not authorized for image %(image_id)s.")
+
+
+class InvalidImageRef(Invalid):
+ message = _("Invalid image href %(image_href)s.")
+
+
+class ImageRefValidationFailed(IotronicException):
+ message = _("Validation of image href %(image_href)s failed, "
+ "reason: %(reason)s")
+
+
+class ImageDownloadFailed(IotronicException):
+ message = _("Failed to download image %(image_href)s, reason: %(reason)s")
+
+
+class KeystoneUnauthorized(IotronicException):
+ message = _("Not authorized in Keystone.")
+
+
+class KeystoneFailure(IotronicException):
+ pass
+
+
+class CatalogNotFound(IotronicException):
+ message = _("Service type %(service_type)s with endpoint type "
+ "%(endpoint_type)s not found in keystone service catalog.")
+
+
+class ServiceUnavailable(IotronicException):
+ message = _("Connection failed")
+
+
+class Forbidden(IotronicException):
+ message = _("Requested OpenStack Images API is forbidden")
+
+
+class BadRequest(IotronicException):
+ pass
+
+
+class InvalidEndpoint(IotronicException):
+ message = _("The provided endpoint is invalid")
+
+
+class CommunicationError(IotronicException):
+ message = _("Unable to communicate with the server.")
+
+
+class HTTPForbidden(Forbidden):
+ pass
+
+
+class Unauthorized(IotronicException):
+ pass
+
+
+class HTTPNotFound(NotFound):
+ pass
+
+
+class ConfigNotFound(IotronicException):
+ message = _("Could not find config at %(path)s")
+
+
+class NodeLocked(Conflict):
+ message = _("Node %(node)s is locked by host %(host)s, please retry "
+ "after the current operation is completed.")
+
+
+class NodeNotLocked(Invalid):
+ message = _("Node %(node)s found not to be locked on release")
+
+
+class NoFreeConductorWorker(TemporaryFailure):
+ message = _('Requested action cannot be performed due to lack of free '
+ 'conductor workers.')
+ code = 503 # Service Unavailable (temporary).
+
+
+class VendorPassthruException(IotronicException):
+ pass
+
+
+class ConfigInvalid(IotronicException):
+ message = _("Invalid configuration file. %(error_msg)s")
+
+
+class DriverLoadError(IotronicException):
+ message = _("Driver %(driver)s could not be loaded. Reason: %(reason)s.")
+
+
+class ConsoleError(IotronicException):
+ pass
+
+
+class NoConsolePid(ConsoleError):
+ message = _("Could not find pid in pid file %(pid_path)s")
+
+
+class ConsoleSubprocessFailed(ConsoleError):
+ message = _("Console subprocess failed to start. %(error)s")
+
+
+class PasswordFileFailedToCreate(IotronicException):
+ message = _("Failed to create the password file. %(error)s")
+
+
+class IBootOperationError(IotronicException):
+ pass
+
+
+class IloOperationError(IotronicException):
+ message = _("%(operation)s failed, error: %(error)s")
+
+
+class IloOperationNotSupported(IotronicException):
+ message = _("%(operation)s not supported. error: %(error)s")
+
+
+class DracRequestFailed(IotronicException):
+ pass
+
+
+class DracClientError(DracRequestFailed):
+ message = _('DRAC client failed. '
+ 'Last error (cURL error code): %(last_error)s, '
+ 'fault string: "%(fault_string)s" '
+ 'response_code: %(response_code)s')
+
+
+class DracOperationFailed(DracRequestFailed):
+ message = _('DRAC operation failed. Message: %(message)s')
+
+
+class DracUnexpectedReturnValue(DracRequestFailed):
+ message = _('DRAC operation yielded return value %(actual_return_value)s '
+ 'that is neither error nor expected %(expected_return_value)s')
+
+
+class DracPendingConfigJobExists(IotronicException):
+ message = _('Another job with ID %(job_id)s is already created '
+ 'to configure %(target)s. Wait until existing job '
+ 'is completed or is canceled')
+
+
+class DracInvalidFilterDialect(IotronicException):
+ message = _('Invalid filter dialect \'%(invalid_filter)s\'. '
+ 'Supported options are %(supported)s')
+
+
+class FailedToGetSensorData(IotronicException):
+ message = _("Failed to get sensor data for node %(node)s. "
+ "Error: %(error)s")
+
+
+class FailedToParseSensorData(IotronicException):
+ message = _("Failed to parse sensor data for node %(node)s. "
+ "Error: %(error)s")
+
+
+class InsufficientDiskSpace(IotronicException):
+ message = _("Disk volume where '%(path)s' is located doesn't have "
+ "enough disk space. Required %(required)d MiB, "
+ "only %(actual)d MiB available space present.")
+
+
+class ImageCreationFailed(IotronicException):
+ message = _('Creating %(image_type)s image failed: %(error)s')
+
+
+class SwiftOperationError(IotronicException):
+ message = _("Swift operation '%(operation)s' failed: %(error)s")
+
+
+class SNMPFailure(IotronicException):
+ message = _("SNMP operation '%(operation)s' failed: %(error)s")
+
+
+class FileSystemNotSupported(IotronicException):
+ message = _("Failed to create a file system. "
+ "File system %(fs)s is not supported.")
+
+
+class IRMCOperationError(IotronicException):
+ message = _('iRMC %(operation)s failed. Reason: %(error)s')
+
+
+class VirtualBoxOperationFailed(IotronicException):
+ message = _("VirtualBox operation '%(operation)s' failed. "
+ "Error: %(error)s")
+
+
+class HardwareInspectionFailure(IotronicException):
+ message = _("Failed to inspect hardware. Reason: %(error)s")
+
+
+class NodeCleaningFailure(IotronicException):
+ message = _("Failed to clean node %(node)s: %(reason)s")
+
+
+class PathNotFound(IotronicException):
+ message = _("Path %(dir)s does not exist.")
+
+
+class DirectoryNotWritable(IotronicException):
+ message = _("Directory %(dir)s is not writable.")
+
+
+#################### new
+class BoardNotFound(NotFound):
+ message = _("Board %(board)s could not be found.")
+
+class BoardLocked(Conflict):
+ message = _("Board %(board)s is locked by host %(host)s, please retry "
+ "after the current operation is completed.")
+
+class BoardAssociated(InvalidState):
+ message = _("Board %(board)s is associated with instance %(instance)s.")
+
+
+
diff --git a/iotronic/common/fsm.py b/iotronic/common/fsm.py
new file mode 100644
index 0000000..16b202c
--- /dev/null
+++ b/iotronic/common/fsm.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""State machine modelling, copied from TaskFlow project.
+
+This work will be turned into a library.
+See https://github.com/harlowja/automaton
+
+This is being used in the implementation of:
+http://specs.openstack.org/openstack/iotronic-specs/specs/kilo/new-iotronic-state-machine.html
+"""
+
+from collections import OrderedDict # noqa
+
+import six
+
+from iotronic.common import exception as excp
+from iotronic.common.i18n import _
+
+
+class _Jump(object):
+ """A FSM transition tracks this data while jumping."""
+ def __init__(self, name, on_enter, on_exit):
+ self.name = name
+ self.on_enter = on_enter
+ self.on_exit = on_exit
+
+
+class FSM(object):
+ """A finite state machine.
+
+ This class models a state machine, and expects an outside caller to
+ manually trigger the state changes one at a time by invoking process_event
+ """
+ def __init__(self, start_state=None):
+ self._transitions = {}
+ self._states = OrderedDict()
+ self._start_state = start_state
+ self._target_state = None
+ # Note that _current is a _Jump instance
+ self._current = None
+
+ @property
+ def start_state(self):
+ return self._start_state
+
+ @property
+ def current_state(self):
+ if self._current is not None:
+ return self._current.name
+ return None
+
+ @property
+ def target_state(self):
+ return self._target_state
+
+ @property
+ def terminated(self):
+ """Returns whether the state machine is in a terminal state."""
+ if self._current is None:
+ return False
+ return self._states[self._current.name]['terminal']
+
+ def add_state(self, state, on_enter=None, on_exit=None,
+ target=None, terminal=None, stable=False):
+ """Adds a given state to the state machine.
+
+ The on_enter and on_exit callbacks, if provided will be expected to
+ take two positional parameters, these being the state being exited (for
+ on_exit) or the state being entered (for on_enter) and a second
+ parameter which is the event that is being processed that caused the
+ state transition.
+
+ :param stable: Use this to specify that this state is a stable/passive
+ state. A state must have been previously defined as
+ 'stable' before it can be used as a 'target'
+ :param target: The target state for 'state' to go to. Before a state
+ can be used as a target it must have been previously
+ added and specified as 'stable'
+ """
+ if state in self._states:
+ raise excp.Duplicate(_("State '%s' already defined") % state)
+ if on_enter is not None:
+ if not six.callable(on_enter):
+ raise ValueError(_("On enter callback must be callable"))
+ if on_exit is not None:
+ if not six.callable(on_exit):
+ raise ValueError(_("On exit callback must be callable"))
+ if target is not None and target not in self._states:
+ raise excp.InvalidState(_("Target state '%s' does not exist")
+ % target)
+ if target is not None and not self._states[target]['stable']:
+ raise excp.InvalidState(
+ _("Target state '%s' is not a 'stable' state") % target)
+
+ self._states[state] = {
+ 'terminal': bool(terminal),
+ 'reactions': {},
+ 'on_enter': on_enter,
+ 'on_exit': on_exit,
+ 'target': target,
+ 'stable': stable,
+ }
+ self._transitions[state] = OrderedDict()
+
+ def add_transition(self, start, end, event):
+ """Adds an allowed transition from start -> end for the given event."""
+ if start not in self._states:
+ raise excp.NotFound(
+ _("Can not add a transition on event '%(event)s' that "
+ "starts in a undefined state '%(state)s'")
+ % {'event': event, 'state': start})
+ if end not in self._states:
+ raise excp.NotFound(
+ _("Can not add a transition on event '%(event)s' that "
+ "ends in a undefined state '%(state)s'")
+ % {'event': event, 'state': end})
+ self._transitions[start][event] = _Jump(end,
+ self._states[end]['on_enter'],
+ self._states[start]['on_exit'])
+
+ def process_event(self, event):
+ """Trigger a state change in response to the provided event."""
+ current = self._current
+ if current is None:
+ raise excp.InvalidState(_("Can only process events after"
+ " being initialized (not before)"))
+ if self._states[current.name]['terminal']:
+ raise excp.InvalidState(
+ _("Can not transition from terminal "
+ "state '%(state)s' on event '%(event)s'")
+ % {'state': current.name, 'event': event})
+ if event not in self._transitions[current.name]:
+ raise excp.InvalidState(
+ _("Can not transition from state '%(state)s' on "
+ "event '%(event)s' (no defined transition)")
+ % {'state': current.name, 'event': event})
+ replacement = self._transitions[current.name][event]
+ if current.on_exit is not None:
+ current.on_exit(current.name, event)
+ if replacement.on_enter is not None:
+ replacement.on_enter(replacement.name, event)
+ self._current = replacement
+
+ # clear _target if we've reached it
+ if (self._target_state is not None and
+ self._target_state == replacement.name):
+ self._target_state = None
+ # if new state has a different target, update the target
+ if self._states[replacement.name]['target'] is not None:
+ self._target_state = self._states[replacement.name]['target']
+
+ def is_valid_event(self, event):
+ """Check whether the event is actionable in the current state."""
+ current = self._current
+ if current is None:
+ return False
+ if self._states[current.name]['terminal']:
+ return False
+ if event not in self._transitions[current.name]:
+ return False
+ return True
+
+ def initialize(self, state=None):
+ """Sets up the state machine.
+
+ sets the current state to the specified state, or start_state
+ if no state was specified..
+ """
+ if state is None:
+ state = self._start_state
+ if state not in self._states:
+ raise excp.NotFound(_("Can not start from an undefined"
+ " state '%s'") % (state))
+ if self._states[state]['terminal']:
+ raise excp.InvalidState(_("Can not start from a terminal"
+ " state '%s'") % (state))
+ self._current = _Jump(state, None, None)
+ self._target_state = self._states[state]['target']
+
+ def copy(self, shallow=False):
+ """Copies the current state machine (shallow or deep).
+
+ NOTE(harlowja): the copy will be left in an *uninitialized* state.
+
+ NOTE(harlowja): when a shallow copy is requested the copy will share
+ the same transition table and state table as the
+ source; this can be advantageous if you have a machine
+ and transitions + states that is defined somewhere
+ and want to use copies to run with (the copies have
+ the current state that is different between machines).
+ """
+ c = FSM(self.start_state)
+ if not shallow:
+ for state, data in six.iteritems(self._states):
+ copied_data = data.copy()
+ copied_data['reactions'] = copied_data['reactions'].copy()
+ c._states[state] = copied_data
+ for state, data in six.iteritems(self._transitions):
+ c._transitions[state] = data.copy()
+ else:
+ c._transitions = self._transitions
+ c._states = self._states
+ return c
+
+ def __contains__(self, state):
+ """Returns if this state exists in the machines known states."""
+ return state in self._states
+
+ @property
+ def states(self):
+ """Returns a list of the state names."""
+ return list(six.iterkeys(self._states))
+
+ def __iter__(self):
+ """Iterates over (start, event, end) transition tuples."""
+ for state in six.iterkeys(self._states):
+ for event, target in six.iteritems(self._transitions[state]):
+ yield (state, event, target.name)
+
+ @property
+ def events(self):
+ """Returns how many events exist."""
+ c = 0
+ for state in six.iterkeys(self._states):
+ c += len(self._transitions[state])
+ return c
diff --git a/iotronic/common/glance_service/__init__.py b/iotronic/common/glance_service/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/common/glance_service/base_image_service.py b/iotronic/common/glance_service/base_image_service.py
new file mode 100644
index 0000000..44377c8
--- /dev/null
+++ b/iotronic/common/glance_service/base_image_service.py
@@ -0,0 +1,288 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import functools
+import logging
+import os
+import sys
+import time
+
+from glanceclient import client
+from glanceclient import exc as glance_exc
+from oslo_config import cfg
+import sendfile
+import six
+import six.moves.urllib.parse as urlparse
+
+from iotronic.common import exception
+from iotronic.common.glance_service import service_utils
+from iotronic.common.i18n import _LE
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+def _translate_image_exception(image_id, exc_value):
+ if isinstance(exc_value, (glance_exc.Forbidden,
+ glance_exc.Unauthorized)):
+ return exception.ImageNotAuthorized(image_id=image_id)
+ if isinstance(exc_value, glance_exc.NotFound):
+ return exception.ImageNotFound(image_id=image_id)
+ if isinstance(exc_value, glance_exc.BadRequest):
+ return exception.Invalid(exc_value)
+ return exc_value
+
+
+def _translate_plain_exception(exc_value):
+ if isinstance(exc_value, (glance_exc.Forbidden,
+ glance_exc.Unauthorized)):
+ return exception.NotAuthorized(exc_value)
+ if isinstance(exc_value, glance_exc.NotFound):
+ return exception.NotFound(exc_value)
+ if isinstance(exc_value, glance_exc.BadRequest):
+ return exception.Invalid(exc_value)
+ return exc_value
+
+
+def check_image_service(func):
+ """Creates a glance client if doesn't exists and calls the function."""
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ """Wrapper around methods calls.
+
+ :param image_href: href that describes the location of an image
+ """
+
+ if self.client:
+ return func(self, *args, **kwargs)
+
+ image_href = kwargs.get('image_href')
+ (image_id, self.glance_host,
+ self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href)
+
+ if use_ssl:
+ scheme = 'https'
+ else:
+ scheme = 'http'
+ params = {}
+ params['insecure'] = CONF.glance.glance_api_insecure
+ if CONF.glance.auth_strategy == 'keystone':
+ params['token'] = self.context.auth_token
+ endpoint = '%s://%s:%s' % (scheme, self.glance_host, self.glance_port)
+ self.client = client.Client(self.version,
+ endpoint, **params)
+ return func(self, *args, **kwargs)
+ return wrapper
+
+
+class BaseImageService(object):
+
+ def __init__(self, client=None, version=1, context=None):
+ self.client = client
+ self.version = version
+ self.context = context
+
+ def call(self, method, *args, **kwargs):
+ """Call a glance client method.
+
+ If we get a connection error,
+ retry the request according to CONF.glance_num_retries.
+
+ :param context: The request context, for access checks.
+ :param version: The requested API version.v
+ :param method: The method requested to be called.
+ :param args: A list of positional arguments for the method called
+ :param kwargs: A dict of keyword arguments for the method called
+
+ :raises: GlanceConnectionFailed
+ """
+ retry_excs = (glance_exc.ServiceUnavailable,
+ glance_exc.InvalidEndpoint,
+ glance_exc.CommunicationError)
+ image_excs = (glance_exc.Forbidden,
+ glance_exc.Unauthorized,
+ glance_exc.NotFound,
+ glance_exc.BadRequest)
+ num_attempts = 1 + CONF.glance.glance_num_retries
+
+ for attempt in range(1, num_attempts + 1):
+ try:
+ return getattr(self.client.images, method)(*args, **kwargs)
+ except retry_excs as e:
+ host = self.glance_host
+ port = self.glance_port
+ error_msg = _LE("Error contacting glance server "
+ "'%(host)s:%(port)s' for '%(method)s', attempt"
+ " %(attempt)s of %(num_attempts)s failed.")
+ LOG.exception(error_msg, {'host': host,
+ 'port': port,
+ 'num_attempts': num_attempts,
+ 'attempt': attempt,
+ 'method': method})
+ if attempt == num_attempts:
+ raise exception.GlanceConnectionFailed(host=host,
+ port=port,
+ reason=str(e))
+ time.sleep(1)
+ except image_excs as e:
+ exc_type, exc_value, exc_trace = sys.exc_info()
+ if method == 'list':
+ new_exc = _translate_plain_exception(
+ exc_value)
+ else:
+ new_exc = _translate_image_exception(
+ args[0], exc_value)
+ six.reraise(type(new_exc), new_exc, exc_trace)
+
+ @check_image_service
+ def _detail(self, method='list', **kwargs):
+ """Calls out to Glance for a list of detailed image information.
+
+ :returns: A list of dicts containing image metadata.
+ """
+ LOG.debug("Getting a full list of images metadata from glance.")
+ params = service_utils.extract_query_params(kwargs, self.version)
+
+ images = self.call(method, **params)
+
+ _images = []
+ for image in images:
+ if service_utils.is_image_available(self.context, image):
+ _images.append(service_utils.translate_from_glance(image))
+
+ return _images
+
+ @check_image_service
+ def _show(self, image_href, method='get'):
+ """Returns a dict with image data for the given opaque image id.
+
+ :param image_id: The opaque image identifier.
+ :returns: A dict containing image metadata.
+
+ :raises: ImageNotFound
+ """
+ LOG.debug("Getting image metadata from glance. Image: %s"
+ % image_href)
+ (image_id, self.glance_host,
+ self.glance_port, use_ssl) = service_utils.parse_image_ref(image_href)
+
+ image = self.call(method, image_id)
+
+ if not service_utils.is_image_available(self.context, image):
+ raise exception.ImageNotFound(image_id=image_id)
+
+ base_image_meta = service_utils.translate_from_glance(image)
+ return base_image_meta
+
+ @check_image_service
+ def _download(self, image_id, data=None, method='data'):
+ """Calls out to Glance for data and writes data.
+
+ :param image_id: The opaque image identifier.
+ :param data: (Optional) File object to write data to.
+ """
+ (image_id, self.glance_host,
+ self.glance_port, use_ssl) = service_utils.parse_image_ref(image_id)
+
+ if (self.version == 2 and
+ 'file' in CONF.glance.allowed_direct_url_schemes):
+
+ location = self._get_location(image_id)
+ url = urlparse.urlparse(location)
+ if url.scheme == "file":
+ with open(url.path, "r") as f:
+ filesize = os.path.getsize(f.name)
+ sendfile.sendfile(data.fileno(), f.fileno(), 0, filesize)
+ return
+
+ image_chunks = self.call(method, image_id)
+
+ if data is None:
+ return image_chunks
+ else:
+ for chunk in image_chunks:
+ data.write(chunk)
+
+ @check_image_service
+ def _create(self, image_meta, data=None, method='create'):
+ """Store the image data and return the new image object.
+
+ :param image_meta: A dict containing image metadata
+ :param data: (Optional) File object to create image from.
+ :returns: dict -- New created image metadata
+ """
+ sent_service_image_meta = service_utils.translate_to_glance(image_meta)
+
+ # TODO(ghe): Allow copy-from or location headers Bug #1199532
+
+ if data:
+ sent_service_image_meta['data'] = data
+
+ recv_service_image_meta = self.call(method, **sent_service_image_meta)
+
+ return service_utils.translate_from_glance(recv_service_image_meta)
+
+ @check_image_service
+ def _update(self, image_id, image_meta, data=None, method='update',
+ purge_props=False):
+
+ """Modify the given image with the new data.
+
+ :param image_id: The opaque image identifier.
+ :param data: (Optional) File object to update data from.
+ :param purge_props: (Optional=False) Purge existing properties.
+ :returns: dict -- New created image metadata
+ """
+ (image_id, self.glance_host,
+ self.glance_port, use_ssl) = service_utils.parse_image_ref(image_id)
+ if image_meta:
+ image_meta = service_utils.translate_to_glance(image_meta)
+ else:
+ image_meta = {}
+ if self.version == 1:
+ image_meta['purge_props'] = purge_props
+ if data:
+ image_meta['data'] = data
+
+ # NOTE(bcwaldon): id is not an editable field, but it is likely to be
+ # passed in by calling code. Let's be nice and ignore it.
+ image_meta.pop('id', None)
+
+ image_meta = self.call(method, image_id, **image_meta)
+
+ if self.version == 2 and data:
+ self.call('upload', image_id, data)
+ image_meta = self._show(image_id)
+
+ return image_meta
+
+ @check_image_service
+ def _delete(self, image_id, method='delete'):
+ """Delete the given image.
+
+ :param image_id: The opaque image identifier.
+
+ :raises: ImageNotFound if the image does not exist.
+ :raises: NotAuthorized if the user is not an owner.
+ :raises: ImageNotAuthorized if the user is not authorized.
+
+ """
+ (image_id, glance_host,
+ glance_port, use_ssl) = service_utils.parse_image_ref(image_id)
+
+ self.call(method, image_id)
diff --git a/iotronic/common/glance_service/service.py b/iotronic/common/glance_service/service.py
new file mode 100644
index 0000000..1a7f875
--- /dev/null
+++ b/iotronic/common/glance_service/service.py
@@ -0,0 +1,81 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class ImageService(object):
+ """Provides storage and retrieval of disk image objects within Glance."""
+
+ @abc.abstractmethod
+ def __init__(self):
+ """Constructor."""
+
+ @abc.abstractmethod
+ def detail(self):
+ """Calls out to Glance for a list of detailed image information."""
+
+ @abc.abstractmethod
+ def show(self, image_id):
+ """Returns a dict with image data for the given opaque image id.
+
+ :param image_id: The opaque image identifier.
+ :returns: A dict containing image metadata.
+
+ :raises: ImageNotFound
+ """
+
+ @abc.abstractmethod
+ def download(self, image_id, data=None):
+ """Calls out to Glance for data and writes data.
+
+ :param image_id: The opaque image identifier.
+ :param data: (Optional) File object to write data to.
+ """
+
+ @abc.abstractmethod
+ def create(self, image_meta, data=None):
+ """Store the image data and return the new image object.
+
+ :param image_meta: A dict containing image metadata
+ :param data: (Optional) File object to create image from.
+ :returns: dict -- New created image metadata
+ """
+
+ @abc.abstractmethod
+ def update(self, image_id,
+ image_meta, data=None, purge_props=False):
+ """Modify the given image with the new data.
+
+ :param image_id: The opaque image identifier.
+ :param data: (Optional) File object to update data from.
+ :param purge_props: (Optional=True) Purge existing properties.
+ :returns: dict -- New created image metadata
+ """
+
+ @abc.abstractmethod
+ def delete(self, image_id):
+ """Delete the given image.
+
+ :param image_id: The opaque image identifier.
+
+ :raises: ImageNotFound if the image does not exist.
+ :raises: NotAuthorized if the user is not an owner.
+ :raises: ImageNotAuthorized if the user is not authorized.
+
+ """
diff --git a/iotronic/common/glance_service/service_utils.py b/iotronic/common/glance_service/service_utils.py
new file mode 100644
index 0000000..cd304fa
--- /dev/null
+++ b/iotronic/common/glance_service/service_utils.py
@@ -0,0 +1,247 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import itertools
+import logging
+import random
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils import timeutils
+from oslo_utils import uuidutils
+import six
+import six.moves.urllib.parse as urlparse
+
+from iotronic.common import exception
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+_GLANCE_API_SERVER = None
+""" iterator that cycles (indefinitely) over glance API servers. """
+
+
+def generate_glance_url():
+ """Generate the URL to glance."""
+ return "%s://%s:%d" % (CONF.glance.glance_protocol,
+ CONF.glance.glance_host,
+ CONF.glance.glance_port)
+
+
+def generate_image_url(image_ref):
+ """Generate an image URL from an image_ref."""
+ return "%s/images/%s" % (generate_glance_url(), image_ref)
+
+
+def _extract_attributes(image):
+ IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
+ 'container_format', 'checksum', 'id',
+ 'name', 'created_at', 'updated_at',
+ 'deleted_at', 'deleted', 'status',
+ 'min_disk', 'min_ram', 'is_public']
+
+ IMAGE_ATTRIBUTES_V2 = ['tags', 'visibility', 'protected',
+ 'file', 'schema']
+
+ output = {}
+ for attr in IMAGE_ATTRIBUTES:
+ output[attr] = getattr(image, attr, None)
+
+ output['properties'] = getattr(image, 'properties', {})
+
+ if hasattr(image, 'schema') and 'v2' in image['schema']:
+ IMAGE_ATTRIBUTES = IMAGE_ATTRIBUTES + IMAGE_ATTRIBUTES_V2
+ for attr in IMAGE_ATTRIBUTES_V2:
+ output[attr] = getattr(image, attr, None)
+ output['schema'] = image['schema']
+
+ for image_property in set(image.keys()) - set(IMAGE_ATTRIBUTES):
+ output['properties'][image_property] = image[image_property]
+
+ return output
+
+
+def _convert_timestamps_to_datetimes(image_meta):
+ """Returns image with timestamp fields converted to datetime objects."""
+ for attr in ['created_at', 'updated_at', 'deleted_at']:
+ if image_meta.get(attr):
+ image_meta[attr] = timeutils.parse_isotime(image_meta[attr])
+ return image_meta
+
+_CONVERT_PROPS = ('block_device_mapping', 'mappings')
+
+
+def _convert(metadata, method):
+ metadata = copy.deepcopy(metadata)
+ properties = metadata.get('properties')
+ if properties:
+ for attr in _CONVERT_PROPS:
+ if attr in properties:
+ prop = properties[attr]
+ if method == 'from':
+ if isinstance(prop, six.string_types):
+ properties[attr] = jsonutils.loads(prop)
+ if method == 'to':
+ if not isinstance(prop, six.string_types):
+ properties[attr] = jsonutils.dumps(prop)
+ return metadata
+
+
+def _remove_read_only(image_meta):
+ IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at']
+ output = copy.deepcopy(image_meta)
+ for attr in IMAGE_ATTRIBUTES:
+ if attr in output:
+ del output[attr]
+ return output
+
+
+def _get_api_server_iterator():
+ """Return iterator over shuffled API servers.
+
+ Shuffle a list of CONF.glance.glance_api_servers and return an iterator
+ that will cycle through the list, looping around to the beginning if
+ necessary.
+
+ If CONF.glance.glance_api_servers isn't set, we fall back to using this
+ as the server: CONF.glance.glance_host:CONF.glance.glance_port.
+
+ :returns: iterator that cycles (indefinitely) over shuffled glance API
+ servers. The iterator returns tuples of (host, port, use_ssl).
+ """
+ api_servers = []
+
+ configured_servers = (CONF.glance.glance_api_servers or
+ ['%s:%s' % (CONF.glance.glance_host,
+ CONF.glance.glance_port)])
+ for api_server in configured_servers:
+ if '//' not in api_server:
+ api_server = '%s://%s' % (CONF.glance.glance_protocol, api_server)
+ url = urlparse.urlparse(api_server)
+ port = url.port or 80
+ host = url.netloc.split(':', 1)[0]
+ use_ssl = (url.scheme == 'https')
+ api_servers.append((host, port, use_ssl))
+ random.shuffle(api_servers)
+ return itertools.cycle(api_servers)
+
+
+def _get_api_server():
+ """Return a Glance API server.
+
+ :returns: for an API server, the tuple (host-or-IP, port, use_ssl), where
+ use_ssl is True to use the 'https' scheme, and False to use 'http'.
+ """
+ global _GLANCE_API_SERVER
+
+ if not _GLANCE_API_SERVER:
+ _GLANCE_API_SERVER = _get_api_server_iterator()
+ return six.next(_GLANCE_API_SERVER)
+
+
+def parse_image_ref(image_href):
+ """Parse an image href into composite parts.
+
+ :param image_href: href of an image
+ :returns: a tuple of the form (image_id, host, port, use_ssl)
+
+ :raises ValueError
+ """
+ if '/' not in str(image_href):
+ image_id = image_href
+ (glance_host, glance_port, use_ssl) = _get_api_server()
+ return (image_id, glance_host, glance_port, use_ssl)
+ else:
+ try:
+ url = urlparse.urlparse(image_href)
+ if url.scheme == 'glance':
+ (glance_host, glance_port, use_ssl) = _get_api_server()
+ image_id = image_href.split('/')[-1]
+ else:
+ glance_port = url.port or 80
+ glance_host = url.netloc.split(':', 1)[0]
+ image_id = url.path.split('/')[-1]
+ use_ssl = (url.scheme == 'https')
+ return (image_id, glance_host, glance_port, use_ssl)
+ except ValueError:
+ raise exception.InvalidImageRef(image_href=image_href)
+
+
+def extract_query_params(params, version):
+ _params = {}
+ accepted_params = ('filters', 'marker', 'limit',
+ 'sort_key', 'sort_dir')
+ for param in accepted_params:
+ if params.get(param):
+ _params[param] = params.get(param)
+ # ensure filters is a dict
+ _params.setdefault('filters', {})
+
+ # NOTE(vish): don't filter out private images
+ # NOTE(ghe): in v2, not passing any visibility doesn't filter prvate images
+ if version == 1:
+ _params['filters'].setdefault('is_public', 'none')
+
+ return _params
+
+
+def translate_to_glance(image_meta):
+ image_meta = _convert(image_meta, 'to')
+ image_meta = _remove_read_only(image_meta)
+ return image_meta
+
+
+def translate_from_glance(image):
+ image_meta = _extract_attributes(image)
+ image_meta = _convert_timestamps_to_datetimes(image_meta)
+ image_meta = _convert(image_meta, 'from')
+ return image_meta
+
+
+def is_image_available(context, image):
+ """Check image availability.
+
+ This check is needed in case Nova and Glance are deployed
+ without authentication turned on.
+ """
+ # The presence of an auth token implies this is an authenticated
+ # request and we need not handle the noauth use-case.
+ if hasattr(context, 'auth_token') and context.auth_token:
+ return True
+ if image.is_public or context.is_admin:
+ return True
+ properties = image.properties
+ if context.project_id and ('owner_id' in properties):
+ return str(properties['owner_id']) == str(context.project_id)
+
+ if context.project_id and ('project_id' in properties):
+ return str(properties['project_id']) == str(context.project_id)
+
+ try:
+ user_id = properties['user_id']
+ except KeyError:
+ return False
+
+ return str(user_id) == str(context.user_id)
+
+
+def is_glance_image(image_href):
+ if not isinstance(image_href, six.string_types):
+ return False
+ return (image_href.startswith('glance://') or
+ uuidutils.is_uuid_like(image_href))
diff --git a/iotronic/common/glance_service/v1/__init__.py b/iotronic/common/glance_service/v1/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/common/glance_service/v1/image_service.py b/iotronic/common/glance_service/v1/image_service.py
new file mode 100644
index 0000000..a6fcef0
--- /dev/null
+++ b/iotronic/common/glance_service/v1/image_service.py
@@ -0,0 +1,41 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from iotronic.common.glance_service import base_image_service
+from iotronic.common.glance_service import service
+
+
+class GlanceImageService(base_image_service.BaseImageService,
+ service.ImageService):
+
+ def detail(self, **kwargs):
+ return self._detail(method='list', **kwargs)
+
+ def show(self, image_id):
+ return self._show(image_id, method='get')
+
+ def download(self, image_id, data=None):
+ return self._download(image_id, method='data', data=data)
+
+ def create(self, image_meta, data=None):
+ return self._create(image_meta, method='create', data=data)
+
+ def update(self, image_id, image_meta, data=None, purge_props=False):
+ return self._update(image_id, image_meta, data=data, method='update',
+ purge_props=purge_props)
+
+ def delete(self, image_id):
+ return self._delete(image_id, method='delete')
diff --git a/iotronic/common/glance_service/v2/__init__.py b/iotronic/common/glance_service/v2/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/common/glance_service/v2/image_service.py b/iotronic/common/glance_service/v2/image_service.py
new file mode 100644
index 0000000..0656161
--- /dev/null
+++ b/iotronic/common/glance_service/v2/image_service.py
@@ -0,0 +1,231 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_utils import uuidutils
+from swiftclient import utils as swift_utils
+
+from iotronic.common import exception as exc
+from iotronic.common.glance_service import base_image_service
+from iotronic.common.glance_service import service
+from iotronic.common.glance_service import service_utils
+from iotronic.common.i18n import _
+
+
+glance_opts = [
+ cfg.ListOpt('allowed_direct_url_schemes',
+ default=[],
+ help='A list of URL schemes that can be downloaded directly '
+ 'via the direct_url. Currently supported schemes: '
+ '[file].'),
+ # To upload this key to Swift:
+ # swift post -m Temp-Url-Key:correcthorsebatterystaple
+ cfg.StrOpt('swift_temp_url_key',
+ help='The secret token given to Swift to allow temporary URL '
+ 'downloads. Required for temporary URLs.',
+ secret=True),
+ cfg.IntOpt('swift_temp_url_duration',
+ default=1200,
+ help='The length of time in seconds that the temporary URL '
+ 'will be valid for. Defaults to 20 minutes. If some '
+ 'deploys get a 401 response code when trying to download '
+ 'from the temporary URL, try raising this duration.'),
+ cfg.StrOpt('swift_endpoint_url',
+ help='The "endpoint" (scheme, hostname, optional port) for '
+ 'the Swift URL of the form '
+ '"endpoint_url/api_version/account/container/object_id". '
+ 'Do not include trailing "/". '
+ 'For example, use "https://swift.example.com". '
+ 'Required for temporary URLs.'),
+ cfg.StrOpt('swift_api_version',
+ default='v1',
+ help='The Swift API version to create a temporary URL for. '
+ 'Defaults to "v1". Swift temporary URL format: '
+ '"endpoint_url/api_version/account/container/object_id"'),
+ cfg.StrOpt('swift_account',
+ help='The account that Glance uses to communicate with '
+ 'Swift. The format is "AUTH_uuid". "uuid" is the '
+ 'UUID for the account configured in the glance-api.conf. '
+ 'Required for temporary URLs. For example: '
+ '"AUTH_a422b2-91f3-2f46-74b7-d7c9e8958f5d30". '
+ 'Swift temporary URL format: '
+ '"endpoint_url/api_version/account/container/object_id"'),
+ cfg.StrOpt('swift_container',
+ default='glance',
+ help='The Swift container Glance is configured to store its '
+ 'images in. Defaults to "glance", which is the default '
+ 'in glance-api.conf. '
+ 'Swift temporary URL format: '
+ '"endpoint_url/api_version/account/container/object_id"'),
+ cfg.IntOpt('swift_store_multiple_containers_seed',
+ default=0,
+ help='This should match a config by the same name in the '
+ 'Glance configuration file. When set to 0, a '
+ 'single-tenant store will only use one '
+ 'container to store all images. When set to an integer '
+ 'value between 1 and 32, a single-tenant store will use '
+ 'multiple containers to store images, and this value '
+ 'will determine how many containers are created.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(glance_opts, group='glance')
+
+
+class GlanceImageService(base_image_service.BaseImageService,
+ service.ImageService):
+
+ def detail(self, **kwargs):
+ return self._detail(method='list', **kwargs)
+
+ def show(self, image_id):
+ return self._show(image_id, method='get')
+
+ def download(self, image_id, data=None):
+ return self._download(image_id, method='data', data=data)
+
+ def create(self, image_meta, data=None):
+ image_id = self._create(image_meta, method='create', data=None)['id']
+ return self.update(image_id, None, data)
+
+ def update(self, image_id, image_meta, data=None, purge_props=False):
+ # NOTE(ghe): purge_props not working until bug 1206472 solved
+ return self._update(image_id, image_meta, data, method='update',
+ purge_props=False)
+
+ def delete(self, image_id):
+ return self._delete(image_id, method='delete')
+
+ def swift_temp_url(self, image_info):
+ """Generate a no-auth Swift temporary URL.
+
+ This function will generate the temporary Swift URL using the image
+ id from Glance and the config options: 'swift_endpoint_url',
+ 'swift_api_version', 'swift_account' and 'swift_container'.
+ The temporary URL will be valid for 'swift_temp_url_duration' seconds.
+ This allows Iotronic to download a Glance image without passing around
+ an auth_token.
+
+ :param image_info: The return from a GET request to Glance for a
+ certain image_id. Should be a dictionary, with keys like 'name' and
+ 'checksum'. See
+ http://docs.openstack.org/developer/glance/glanceapi.html for
+ examples.
+ :returns: A signed Swift URL from which an image can be downloaded,
+ without authentication.
+
+ :raises: InvalidParameterValue if Swift config options are not set
+ correctly.
+ :raises: MissingParameterValue if a required parameter is not set.
+ :raises: ImageUnacceptable if the image info from Glance does not
+ have a image ID.
+ """
+ self._validate_temp_url_config()
+
+ if ('id' not in image_info or not
+ uuidutils.is_uuid_like(image_info['id'])):
+ raise exc.ImageUnacceptable(_(
+ 'The given image info does not have a valid image id: %s')
+ % image_info)
+
+ url_fragments = {
+ 'endpoint_url': CONF.glance.swift_endpoint_url,
+ 'api_version': CONF.glance.swift_api_version,
+ 'account': CONF.glance.swift_account,
+ 'container': self._get_swift_container(image_info['id']),
+ 'object_id': image_info['id']
+ }
+
+ template = '/{api_version}/{account}/{container}/{object_id}'
+ url_path = template.format(**url_fragments)
+ path = swift_utils.generate_temp_url(
+ path=url_path,
+ seconds=CONF.glance.swift_temp_url_duration,
+ key=CONF.glance.swift_temp_url_key,
+ method='GET')
+
+ return '{endpoint_url}{url_path}'.format(
+ endpoint_url=url_fragments['endpoint_url'], url_path=path)
+
+ def _validate_temp_url_config(self):
+ """Validate the required settings for a temporary URL."""
+ if not CONF.glance.swift_temp_url_key:
+ raise exc.MissingParameterValue(_(
+ 'Swift temporary URLs require a shared secret to be created. '
+ 'You must provide "swift_temp_url_key" as a config option.'))
+ if not CONF.glance.swift_endpoint_url:
+ raise exc.MissingParameterValue(_(
+ 'Swift temporary URLs require a Swift endpoint URL. '
+ 'You must provide "swift_endpoint_url" as a config option.'))
+ if not CONF.glance.swift_account:
+ raise exc.MissingParameterValue(_(
+ 'Swift temporary URLs require a Swift account string. '
+ 'You must provide "swift_account" as a config option.'))
+ if CONF.glance.swift_temp_url_duration < 0:
+ raise exc.InvalidParameterValue(_(
+ '"swift_temp_url_duration" must be a positive integer.'))
+ seed_num_chars = CONF.glance.swift_store_multiple_containers_seed
+ if (seed_num_chars is None or seed_num_chars < 0
+ or seed_num_chars > 32):
+ raise exc.InvalidParameterValue(_(
+ "An integer value between 0 and 32 is required for"
+ " swift_store_multiple_containers_seed."))
+
+ def _get_swift_container(self, image_id):
+ """Get the Swift container the image is stored in.
+
+ Code based on: https://github.com/openstack/glance_store/blob/3cd690b3
+ 7dc9d935445aca0998e8aec34a3e3530/glance_store/
+ _drivers/swift/store.py#L725
+
+ Returns appropriate container name depending upon value of
+ ``swift_store_multiple_containers_seed``. In single-container mode,
+ which is a seed value of 0, simply returns ``swift_container``.
+ In multiple-container mode, returns ``swift_container`` as the
+ prefix plus a suffix determined by the multiple container seed
+
+ examples:
+ single-container mode: 'glance'
+ multiple-container mode: 'glance_3a1' for image uuid 3A1xxxxxxx...
+
+ :param image_id: UUID of image
+ :returns: The name of the swift container the image is stored in
+ """
+ seed_num_chars = CONF.glance.swift_store_multiple_containers_seed
+
+ if seed_num_chars > 0:
+ image_id = str(image_id).lower()
+
+ num_dashes = image_id[:seed_num_chars].count('-')
+ num_chars = seed_num_chars + num_dashes
+ name_suffix = image_id[:num_chars]
+ new_container_name = (CONF.glance.swift_container +
+ '_' + name_suffix)
+ return new_container_name
+ else:
+ return CONF.glance.swift_container
+
+ def _get_location(self, image_id):
+ """Get storage URL.
+
+ Returns the direct url representing the backend storage location,
+ or None if this attribute is not shown by Glance.
+ """
+ image_meta = self.call('get', image_id)
+
+ if not service_utils.is_image_available(self.context, image_meta):
+ raise exc.ImageNotFound(image_id=image_id)
+
+ return getattr(image_meta, 'direct_url', None)
diff --git a/iotronic/common/grub_conf.template b/iotronic/common/grub_conf.template
new file mode 100644
index 0000000..2a979d2
--- /dev/null
+++ b/iotronic/common/grub_conf.template
@@ -0,0 +1,8 @@
+set default=0
+set timeout=5
+set hidden_timeout_quiet=false
+
+menuentry "boot_partition" {
+linuxefi {{ linux }} {{ kernel_params }} --
+initrdefi {{ initrd }}
+}
diff --git a/iotronic/common/hash_ring.py b/iotronic/common/hash_ring.py
new file mode 100644
index 0000000..10d52d5
--- /dev/null
+++ b/iotronic/common/hash_ring.py
@@ -0,0 +1,200 @@
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import bisect
+import hashlib
+import threading
+
+from oslo_config import cfg
+import six
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.db import api as dbapi
+
+hash_opts = [
+ cfg.IntOpt('hash_partition_exponent',
+ default=5,
+ help='Exponent to determine number of hash partitions to use '
+ 'when distributing load across conductors. Larger values '
+ 'will result in more even distribution of load and less '
+ 'load when rebalancing the ring, but more memory usage. '
+ 'Number of partitions per conductor is '
+ '(2^hash_partition_exponent). This determines the '
+ 'granularity of rebalancing: given 10 hosts, and an '
+ 'exponent of the 2, there are 40 partitions in the ring.'
+ 'A few thousand partitions should make rebalancing '
+ 'smooth in most cases. The default is suitable for up to '
+ 'a few hundred conductors. Too many partitions has a CPU '
+ 'impact.'),
+ cfg.IntOpt('hash_distribution_replicas',
+ default=1,
+ help='[Experimental Feature] '
+ 'Number of hosts to map onto each hash partition. '
+ 'Setting this to more than one will cause additional '
+ 'conductor services to prepare deployment environments '
+ 'and potentially allow the Iotronic cluster to recover '
+ 'more quickly if a conductor instance is terminated.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(hash_opts)
+
+
+class HashRing(object):
+ """A stable hash ring.
+
+ We map item N to a host Y based on the closest lower hash:
+
+ - hash(item) -> partition
+ - hash(host) -> divider
+ - closest lower divider is the host to use
+ - we hash each host many times to spread load more finely
+ as otherwise adding a host gets (on average) 50% of the load of
+ just one other host assigned to it.
+ """
+
+ def __init__(self, hosts, replicas=None):
+ """Create a new hash ring across the specified hosts.
+
+ :param hosts: an iterable of hosts which will be mapped.
+ :param replicas: number of hosts to map to each hash partition,
+ or len(hosts), which ever is lesser.
+ Default: CONF.hash_distribution_replicas
+
+ """
+ if replicas is None:
+ replicas = CONF.hash_distribution_replicas
+
+ try:
+ self.hosts = set(hosts)
+ self.replicas = replicas if replicas <= len(hosts) else len(hosts)
+ except TypeError:
+ raise exception.Invalid(
+ _("Invalid hosts supplied when building HashRing."))
+
+ self._host_hashes = {}
+ for host in hosts:
+ key = str(host).encode('utf8')
+ key_hash = hashlib.md5(key)
+ for p in range(2 ** CONF.hash_partition_exponent):
+ key_hash.update(key)
+ hashed_key = self._hash2int(key_hash)
+ self._host_hashes[hashed_key] = host
+ # Gather the (possibly colliding) resulting hashes into a bisectable
+ # list.
+ self._partitions = sorted(self._host_hashes.keys())
+
+ def _hash2int(self, key_hash):
+ """Convert the given hash's digest to a numerical value for the ring.
+
+ :returns: An integer equivalent value of the digest.
+ """
+ return int(key_hash.hexdigest(), 16)
+
+ def _get_partition(self, data):
+ try:
+ if six.PY3 and data is not None:
+ data = data.encode('utf-8')
+ key_hash = hashlib.md5(data)
+ hashed_key = self._hash2int(key_hash)
+ position = bisect.bisect(self._partitions, hashed_key)
+ return position if position < len(self._partitions) else 0
+ except TypeError:
+ raise exception.Invalid(
+ _("Invalid data supplied to HashRing.get_hosts."))
+
+ def get_hosts(self, data, ignore_hosts=None):
+ """Get the list of hosts which the supplied data maps onto.
+
+ :param data: A string identifier to be mapped across the ring.
+ :param ignore_hosts: A list of hosts to skip when performing the hash.
+ Useful to temporarily skip down hosts without
+ performing a full rebalance.
+ Default: None.
+ :returns: a list of hosts.
+ The length of this list depends on the number of replicas
+ this `HashRing` was created with. It may be less than this
+ if ignore_hosts is not None.
+ """
+ hosts = []
+ if ignore_hosts is None:
+ ignore_hosts = set()
+ else:
+ ignore_hosts = set(ignore_hosts)
+ ignore_hosts.intersection_update(self.hosts)
+ partition = self._get_partition(data)
+ for replica in range(0, self.replicas):
+ if len(hosts) + len(ignore_hosts) == len(self.hosts):
+ # prevent infinite loop - cannot allocate more fallbacks.
+ break
+ # Linear probing: partition N, then N+1 etc.
+ host = self._get_host(partition)
+ while host in hosts or host in ignore_hosts:
+ partition += 1
+ if partition >= len(self._partitions):
+ partition = 0
+ host = self._get_host(partition)
+ hosts.append(host)
+ return hosts
+
+ def _get_host(self, partition):
+ """Find what host is serving a partition.
+
+ :param partition: The index of the partition in the partition map.
+ e.g. 0 is the first partition, 1 is the second.
+ :return: The host object the ring was constructed with.
+ """
+ return self._host_hashes[self._partitions[partition]]
+
+
+class HashRingManager(object):
+ _hash_rings = None
+ _lock = threading.Lock()
+
+ def __init__(self):
+ self.dbapi = dbapi.get_instance()
+
+ @property
+ def ring(self):
+ # Hot path, no lock
+ if self._hash_rings is not None:
+ return self._hash_rings
+
+ with self._lock:
+ if self._hash_rings is None:
+ rings = self._load_hash_rings()
+ self.__class__._hash_rings = rings
+ return self._hash_rings
+
+ def _load_hash_rings(self):
+ rings = {}
+ d2c = self.dbapi.get_active_driver_dict()
+
+ for driver_name, hosts in d2c.items():
+ rings[driver_name] = HashRing(hosts)
+ return rings
+
+ @classmethod
+ def reset(cls):
+ with cls._lock:
+ cls._hash_rings = None
+
+ def __getitem__(self, driver_name):
+ try:
+ return self.ring[driver_name]
+ except KeyError:
+ raise exception.DriverNotFound(
+ _("The driver '%s' is unknown.") % driver_name)
diff --git a/iotronic/common/i18n.py b/iotronic/common/i18n.py
new file mode 100644
index 0000000..86ab68e
--- /dev/null
+++ b/iotronic/common/i18n.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import oslo_i18n as i18n
+
+_translators = i18n.TranslatorFactory(domain='iotronic')
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
diff --git a/iotronic/common/image_service.py b/iotronic/common/image_service.py
new file mode 100644
index 0000000..5b70812
--- /dev/null
+++ b/iotronic/common/image_service.py
@@ -0,0 +1,294 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import abc
+import os
+import shutil
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import importutils
+import requests
+import sendfile
+import six
+import six.moves.urllib.parse as urlparse
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import keystone
+
+LOG = logging.getLogger(__name__)
+
+IMAGE_CHUNK_SIZE = 1024 * 1024 # 1mb
+
+
+CONF = cfg.CONF
+# Import this opt early so that it is available when registering
+# glance_opts below.
+CONF.import_opt('my_ip', 'iotronic.netconf')
+
+glance_opts = [
+ cfg.StrOpt('glance_host',
+ default='$my_ip',
+ help='Default glance hostname or IP address.'),
+ cfg.IntOpt('glance_port',
+ default=9292,
+ help='Default glance port.'),
+ cfg.StrOpt('glance_protocol',
+ default='http',
+ help='Default protocol to use when connecting to glance. '
+ 'Set to https for SSL.'),
+ cfg.ListOpt('glance_api_servers',
+ help='A list of the glance api servers available to iotronic. '
+ 'Prefix with https:// for SSL-based glance API servers. '
+ 'Format is [hostname|IP]:port.'),
+ cfg.BoolOpt('glance_api_insecure',
+ default=False,
+ help='Allow to perform insecure SSL (https) requests to '
+ 'glance.'),
+ cfg.IntOpt('glance_num_retries',
+ default=0,
+ help='Number of retries when downloading an image from '
+ 'glance.'),
+ cfg.StrOpt('auth_strategy',
+ default='keystone',
+ help='Authentication strategy to use when connecting to '
+ 'glance. Only "keystone" and "noauth" are currently '
+ 'supported by iotronic.'),
+]
+
+CONF.register_opts(glance_opts, group='glance')
+
+
+def import_versioned_module(version, submodule=None):
+ module = 'iotronic.common.glance_service.v%s' % version
+ if submodule:
+ module = '.'.join((module, submodule))
+ return importutils.try_import(module)
+
+
+def GlanceImageService(client=None, version=1, context=None):
+ module = import_versioned_module(version, 'image_service')
+ service_class = getattr(module, 'GlanceImageService')
+ if (context is not None and CONF.glance.auth_strategy == 'keystone'
+ and not context.auth_token):
+ context.auth_token = keystone.get_admin_auth_token()
+ return service_class(client, version, context)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class BaseImageService(object):
+ """Provides retrieval of disk images."""
+
+ @abc.abstractmethod
+ def validate_href(self, image_href):
+ """Validate image reference.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed.
+ :returns: Information needed to further operate with an image.
+ """
+
+ @abc.abstractmethod
+ def download(self, image_href, image_file):
+ """Downloads image to specified location.
+
+ :param image_href: Image reference.
+ :param image_file: File object to write data to.
+ :raises: exception.ImageRefValidationFailed.
+ :raises: exception.ImageDownloadFailed.
+ """
+
+ @abc.abstractmethod
+ def show(self, image_href):
+ """Get dictionary of image properties.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed.
+ :returns: dictionary of image properties.
+ """
+
+
+class HttpImageService(BaseImageService):
+ """Provides retrieval of disk images using HTTP."""
+
+ def validate_href(self, image_href):
+ """Validate HTTP image reference.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed if HEAD request failed or
+ returned response code not equal to 200.
+ :returns: Response to HEAD request.
+ """
+ try:
+ response = requests.head(image_href)
+ if response.status_code != 200:
+ raise exception.ImageRefValidationFailed(
+ image_href=image_href,
+ reason=_("Got HTTP code %s instead of 200 in response to "
+ "HEAD request.") % response.status_code)
+ except requests.RequestException as e:
+ raise exception.ImageRefValidationFailed(image_href=image_href,
+ reason=e)
+ return response
+
+ def download(self, image_href, image_file):
+ """Downloads image to specified location.
+
+ :param image_href: Image reference.
+ :param image_file: File object to write data to.
+ :raises: exception.ImageRefValidationFailed if GET request returned
+ response code not equal to 200.
+ :raises: exception.ImageDownloadFailed if:
+ * IOError happened during file write;
+ * GET request failed.
+ """
+ try:
+ response = requests.get(image_href, stream=True)
+ if response.status_code != 200:
+ raise exception.ImageRefValidationFailed(
+ image_href=image_href,
+ reason=_("Got HTTP code %s instead of 200 in response to "
+ "GET request.") % response.status_code)
+ with response.raw as input_img:
+ shutil.copyfileobj(input_img, image_file, IMAGE_CHUNK_SIZE)
+ except (requests.RequestException, IOError) as e:
+ raise exception.ImageDownloadFailed(image_href=image_href,
+ reason=e)
+
+ def show(self, image_href):
+ """Get dictionary of image properties.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed if:
+ * HEAD request failed;
+ * HEAD request returned response code not equal to 200;
+ * Content-Length header not found in response to HEAD request.
+ :returns: dictionary of image properties.
+ """
+ response = self.validate_href(image_href)
+ image_size = response.headers.get('Content-Length')
+ if image_size is None:
+ raise exception.ImageRefValidationFailed(
+ image_href=image_href,
+ reason=_("Cannot determine image size as there is no "
+ "Content-Length header specified in response "
+ "to HEAD request."))
+ return {
+ 'size': int(image_size),
+ 'properties': {}
+ }
+
+
+class FileImageService(BaseImageService):
+ """Provides retrieval of disk images available locally on the conductor."""
+
+ def validate_href(self, image_href):
+ """Validate local image reference.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed if source image file
+ doesn't exist.
+ :returns: Path to image file if it exists.
+ """
+ image_path = urlparse.urlparse(image_href).path
+ if not os.path.isfile(image_path):
+ raise exception.ImageRefValidationFailed(
+ image_href=image_href,
+ reason=_("Specified image file not found."))
+ return image_path
+
+ def download(self, image_href, image_file):
+ """Downloads image to specified location.
+
+ :param image_href: Image reference.
+ :param image_file: File object to write data to.
+ :raises: exception.ImageRefValidationFailed if source image file
+ doesn't exist.
+ :raises: exception.ImageDownloadFailed if exceptions were raised while
+ writing to file or creating hard link.
+ """
+ source_image_path = self.validate_href(image_href)
+ dest_image_path = image_file.name
+ local_device = os.stat(dest_image_path).st_dev
+ try:
+ # We should have read and write access to source file to create
+ # hard link to it.
+ if (local_device == os.stat(source_image_path).st_dev and
+ os.access(source_image_path, os.R_OK | os.W_OK)):
+ image_file.close()
+ os.remove(dest_image_path)
+ os.link(source_image_path, dest_image_path)
+ else:
+ filesize = os.path.getsize(source_image_path)
+ with open(source_image_path, 'rb') as input_img:
+ sendfile.sendfile(image_file.fileno(), input_img.fileno(),
+ 0, filesize)
+ except Exception as e:
+ raise exception.ImageDownloadFailed(image_href=image_href,
+ reason=e)
+
+ def show(self, image_href):
+ """Get dictionary of image properties.
+
+ :param image_href: Image reference.
+ :raises: exception.ImageRefValidationFailed if image file specified
+ doesn't exist.
+ :returns: dictionary of image properties.
+ """
+ source_image_path = self.validate_href(image_href)
+ return {
+ 'size': os.path.getsize(source_image_path),
+ 'properties': {}
+ }
+
+
+protocol_mapping = {
+ 'http': HttpImageService,
+ 'https': HttpImageService,
+ 'file': FileImageService,
+ 'glance': GlanceImageService,
+}
+
+
+def get_image_service(image_href, client=None, version=1, context=None):
+ """Get image service instance to download the image.
+
+ :param image_href: String containing href to get image service for.
+ :param client: Glance client to be used for download, used only if
+ image_href is Glance href.
+ :param version: Version of Glance API to use, used only if image_href is
+ Glance href.
+ :param context: request context, used only if image_href is Glance href.
+ :raises: exception.ImageRefValidationFailed if no image service can
+ handle specified href.
+ :returns: Instance of an image service class that is able to download
+ specified image.
+ """
+ scheme = urlparse.urlparse(image_href).scheme.lower()
+ try:
+ cls = protocol_mapping[scheme or 'glance']
+ except KeyError:
+ raise exception.ImageRefValidationFailed(
+ image_href=image_href,
+ reason=_('Image download protocol '
+ '%s is not supported.') % scheme
+ )
+
+ if cls == GlanceImageService:
+ return cls(client, version, context)
+ return cls()
diff --git a/iotronic/common/images.py b/iotronic/common/images.py
new file mode 100644
index 0000000..4a67840
--- /dev/null
+++ b/iotronic/common/images.py
@@ -0,0 +1,577 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handling of VM disk images.
+"""
+
+import os
+import shutil
+
+import jinja2
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from iotronic.common import exception
+from iotronic.common.glance_service import service_utils as glance_utils
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LE
+from iotronic.common import image_service as service
+from iotronic.common import paths
+from iotronic.common import utils
+from iotronic.openstack.common import fileutils
+from iotronic.openstack.common import imageutils
+
+LOG = logging.getLogger(__name__)
+
+image_opts = [
+ cfg.BoolOpt('force_raw_images',
+ default=True,
+ help='If True, convert backing images to "raw" disk image '
+ 'format.'),
+ cfg.StrOpt('isolinux_bin',
+ default='/usr/lib/syslinux/isolinux.bin',
+ help='Path to isolinux binary file.'),
+ cfg.StrOpt('isolinux_config_template',
+ default=paths.basedir_def('common/isolinux_config.template'),
+ help='Template file for isolinux configuration file.'),
+ cfg.StrOpt('grub_config_template',
+ default=paths.basedir_def('common/grub_conf.template'),
+ help='Template file for grub configuration file.'),
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(image_opts)
+
+
+def _create_root_fs(root_directory, files_info):
+ """Creates a filesystem root in given directory.
+
+ Given a mapping of absolute path of files to their relative paths
+ within the filesystem, this method copies the files to their
+ destination.
+
+ :param root_directory: the filesystem root directory.
+ :param files_info: A dict containing absolute path of file to be copied
+ -> relative path within the vfat image. For example,
+ {
+ '/absolute/path/to/file' -> 'relative/path/within/root'
+ ...
+ }
+ :raises: OSError, if creation of any directory failed.
+ :raises: IOError, if copying any of the files failed.
+ """
+ for src_file, path in files_info.items():
+ target_file = os.path.join(root_directory, path)
+ dirname = os.path.dirname(target_file)
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+
+ shutil.copyfile(src_file, target_file)
+
+
+def _umount_without_raise(mount_dir):
+ """Helper method to umount without raise."""
+ try:
+ utils.umount(mount_dir)
+ except processutils.ProcessExecutionError:
+ pass
+
+
+def create_vfat_image(output_file, files_info=None, parameters=None,
+ parameters_file='parameters.txt', fs_size_kib=100):
+ """Creates the fat fs image on the desired file.
+
+ This method copies the given files to a root directory (optional),
+ writes the parameters specified to the parameters file within the
+ root directory (optional), and then creates a vfat image of the root
+ directory.
+
+ :param output_file: The path to the file where the fat fs image needs
+ to be created.
+ :param files_info: A dict containing absolute path of file to be copied
+ -> relative path within the vfat image. For example,
+ {
+ '/absolute/path/to/file' -> 'relative/path/within/root'
+ ...
+ }
+ :param parameters: A dict containing key-value pairs of parameters.
+ :param parameters_file: The filename for the parameters file.
+ :param fs_size_kib: size of the vfat filesystem in KiB.
+ :raises: ImageCreationFailed, if image creation failed while doing any
+ of filesystem manipulation activities like creating dirs, mounting,
+ creating filesystem, copying files, etc.
+ """
+ try:
+ utils.dd('/dev/zero', output_file, 'count=1', "bs=%dKiB" % fs_size_kib)
+ except processutils.ProcessExecutionError as e:
+ raise exception.ImageCreationFailed(image_type='vfat', error=e)
+
+ with utils.tempdir() as tmpdir:
+
+ try:
+ # The label helps ramdisks to find the partition containing
+ # the parameters (by using /dev/disk/by-label/ir-vfd-dev).
+ # NOTE: FAT filesystem label can be up to 11 characters long.
+ utils.mkfs('vfat', output_file, label="ir-vfd-dev")
+ utils.mount(output_file, tmpdir, '-o', 'umask=0')
+ except processutils.ProcessExecutionError as e:
+ raise exception.ImageCreationFailed(image_type='vfat', error=e)
+
+ try:
+ if files_info:
+ _create_root_fs(tmpdir, files_info)
+
+ if parameters:
+ parameters_file = os.path.join(tmpdir, parameters_file)
+ params_list = ['%(key)s=%(val)s' % {'key': k, 'val': v}
+ for k, v in parameters.items()]
+ file_contents = '\n'.join(params_list)
+ utils.write_to_file(parameters_file, file_contents)
+
+ except Exception as e:
+ LOG.exception(_LE("vfat image creation failed. Error: %s"), e)
+ raise exception.ImageCreationFailed(image_type='vfat', error=e)
+
+ finally:
+ try:
+ utils.umount(tmpdir)
+ except processutils.ProcessExecutionError as e:
+ raise exception.ImageCreationFailed(image_type='vfat', error=e)
+
+
+def _generate_cfg(kernel_params, template, options):
+ """Generates a isolinux or grub configuration file.
+
+ Given a given a list of strings containing kernel parameters, this method
+ returns the kernel cmdline string.
+ :param kernel_params: a list of strings(each element being a string like
+ 'K=V' or 'K' or combination of them like 'K1=V1 K2 K3=V3') to be added
+ as the kernel cmdline.
+ :param template: the path of the config template file.
+ :param options: a dictionary of keywords which need to be replaced in
+ template file to generate a proper config file.
+ :returns: a string containing the contents of the isolinux configuration
+ file.
+ """
+ if not kernel_params:
+ kernel_params = []
+ kernel_params_str = ' '.join(kernel_params)
+
+ tmpl_path, tmpl_file = os.path.split(template)
+ env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
+ template = env.get_template(tmpl_file)
+
+ options.update({'kernel_params': kernel_params_str})
+
+ cfg = template.render(options)
+ return cfg
+
+
+def create_isolinux_image_for_bios(output_file, kernel, ramdisk,
+ kernel_params=None):
+ """Creates an isolinux image on the specified file.
+
+ Copies the provided kernel, ramdisk to a directory, generates the isolinux
+ configuration file using the kernel parameters provided, and then generates
+ a bootable ISO image.
+
+ :param output_file: the path to the file where the iso image needs to be
+ created.
+ :param kernel: the kernel to use.
+ :param ramdisk: the ramdisk to use.
+ :param kernel_params: a list of strings(each element being a string like
+ 'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
+ as the kernel cmdline.
+ :raises: ImageCreationFailed, if image creation failed while copying files
+ or while running command to generate iso.
+ """
+ ISOLINUX_BIN = 'isolinux/isolinux.bin'
+ ISOLINUX_CFG = 'isolinux/isolinux.cfg'
+
+ options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
+
+ with utils.tempdir() as tmpdir:
+ files_info = {
+ kernel: 'vmlinuz',
+ ramdisk: 'initrd',
+ CONF.isolinux_bin: ISOLINUX_BIN,
+ }
+ try:
+ _create_root_fs(tmpdir, files_info)
+ except (OSError, IOError) as e:
+ LOG.exception(_LE("Creating the filesystem root failed."))
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+
+ cfg = _generate_cfg(kernel_params,
+ CONF.isolinux_config_template, options)
+
+ isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
+ utils.write_to_file(isolinux_cfg, cfg)
+
+ try:
+ utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
+ '-cache-inodes', '-J', '-l', '-no-emul-boot',
+ '-boot-load-size', '4', '-boot-info-table',
+ '-b', ISOLINUX_BIN, '-o', output_file, tmpdir)
+ except processutils.ProcessExecutionError as e:
+ LOG.exception(_LE("Creating ISO image failed."))
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+
+
+def create_isolinux_image_for_uefi(output_file, deploy_iso, kernel, ramdisk,
+ kernel_params=None):
+ """Creates an isolinux image on the specified file.
+
+ Copies the provided kernel, ramdisk, efiboot.img to a directory, creates
+ the path for grub config file, generates the isolinux configuration file
+ using the kernel parameters provided, generates the grub configuration
+ file using kernel parameters and then generates a bootable ISO image
+ for uefi.
+
+ :param output_file: the path to the file where the iso image needs to be
+ created.
+ :param deploy_iso: deploy iso used to initiate the deploy.
+ :param kernel: the kernel to use.
+ :param ramdisk: the ramdisk to use.
+ :param kernel_params: a list of strings(each element being a string like
+ 'K=V' or 'K' or combination of them like 'K1=V1,K2,...') to be added
+ as the kernel cmdline.
+ :raises: ImageCreationFailed, if image creation failed while copying files
+ or while running command to generate iso.
+ """
+ ISOLINUX_BIN = 'isolinux/isolinux.bin'
+ ISOLINUX_CFG = 'isolinux/isolinux.cfg'
+
+ isolinux_options = {'kernel': '/vmlinuz', 'ramdisk': '/initrd'}
+ grub_options = {'linux': '/vmlinuz', 'initrd': '/initrd'}
+
+ with utils.tempdir() as tmpdir:
+ files_info = {
+ kernel: 'vmlinuz',
+ ramdisk: 'initrd',
+ CONF.isolinux_bin: ISOLINUX_BIN,
+ }
+
+ # Open the deploy iso used to initiate deploy and copy the
+ # efiboot.img i.e. boot loader to the current temporary
+ # directory.
+ with utils.tempdir() as mountdir:
+ uefi_path_info, e_img_rel_path, grub_rel_path = (
+ _mount_deploy_iso(deploy_iso, mountdir))
+
+ # if either of these variables are not initialized then the
+ # uefi efiboot.img cannot be created.
+ files_info.update(uefi_path_info)
+ try:
+ _create_root_fs(tmpdir, files_info)
+ except (OSError, IOError) as e:
+ LOG.exception(_LE("Creating the filesystem root failed."))
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+ finally:
+ _umount_without_raise(mountdir)
+
+ cfg = _generate_cfg(kernel_params,
+ CONF.isolinux_config_template, isolinux_options)
+
+ isolinux_cfg = os.path.join(tmpdir, ISOLINUX_CFG)
+ utils.write_to_file(isolinux_cfg, cfg)
+
+ # Generate and copy grub config file.
+ grub_cfg = os.path.join(tmpdir, grub_rel_path)
+ grub_conf = _generate_cfg(kernel_params,
+ CONF.grub_config_template, grub_options)
+ utils.write_to_file(grub_cfg, grub_conf)
+
+ # Create the boot_iso.
+ try:
+ utils.execute('mkisofs', '-r', '-V', "VMEDIA_BOOT_ISO",
+ '-cache-inodes', '-J', '-l', '-no-emul-boot',
+ '-boot-load-size', '4', '-boot-info-table',
+ '-b', ISOLINUX_BIN, '-eltorito-alt-boot',
+ '-e', e_img_rel_path, '-no-emul-boot',
+ '-o', output_file, tmpdir)
+ except processutils.ProcessExecutionError as e:
+ LOG.exception(_LE("Creating ISO image failed."))
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+
+
+def qemu_img_info(path):
+ """Return an object containing the parsed output from qemu-img info."""
+ if not os.path.exists(path):
+ return imageutils.QemuImgInfo()
+
+ out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
+ 'qemu-img', 'info', path)
+ return imageutils.QemuImgInfo(out)
+
+
+def convert_image(source, dest, out_format, run_as_root=False):
+ """Convert image to other format."""
+ cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
+ utils.execute(*cmd, run_as_root=run_as_root)
+
+
+def fetch(context, image_href, path, image_service=None, force_raw=False):
+ # TODO(vish): Improve context handling and add owner and auth data
+ # when it is added to glance. Right now there is no
+ # auth checking in glance, so we assume that access was
+ # checked before we got here.
+ if not image_service:
+ image_service = service.get_image_service(image_href,
+ context=context)
+ LOG.debug("Using %(image_service)s to download image %(image_href)s." %
+ {'image_service': image_service.__class__,
+ 'image_href': image_href})
+
+ with fileutils.remove_path_on_error(path):
+ with open(path, "wb") as image_file:
+ image_service.download(image_href, image_file)
+
+ if force_raw:
+ image_to_raw(image_href, path, "%s.part" % path)
+
+
+def image_to_raw(image_href, path, path_tmp):
+ with fileutils.remove_path_on_error(path_tmp):
+ data = qemu_img_info(path_tmp)
+
+ fmt = data.file_format
+ if fmt is None:
+ raise exception.ImageUnacceptable(
+ reason=_("'qemu-img info' parsing failed."),
+ image_id=image_href)
+
+ backing_file = data.backing_file
+ if backing_file is not None:
+ raise exception.ImageUnacceptable(
+ image_id=image_href,
+ reason=_("fmt=%(fmt)s backed by: %(backing_file)s") %
+ {'fmt': fmt, 'backing_file': backing_file})
+
+ if fmt != "raw":
+ staged = "%s.converted" % path
+ LOG.debug("%(image)s was %(format)s, converting to raw" %
+ {'image': image_href, 'format': fmt})
+ with fileutils.remove_path_on_error(staged):
+ convert_image(path_tmp, staged, 'raw')
+ os.unlink(path_tmp)
+
+ data = qemu_img_info(staged)
+ if data.file_format != "raw":
+ raise exception.ImageConvertFailed(
+ image_id=image_href,
+ reason=_("Converted to raw, but format is "
+ "now %s") % data.file_format)
+
+ os.rename(staged, path)
+ else:
+ os.rename(path_tmp, path)
+
+
+def download_size(context, image_href, image_service=None):
+ if not image_service:
+ image_service = service.get_image_service(image_href, context=context)
+ return image_service.show(image_href)['size']
+
+
+def converted_size(path):
+ """Get size of converted raw image.
+
+ The size of image converted to raw format can be growing up to the virtual
+ size of the image.
+
+ :param path: path to the image file.
+ :returns: virtual size of the image or 0 if conversion not needed.
+
+ """
+ data = qemu_img_info(path)
+ return data.virtual_size
+
+
+def get_image_properties(context, image_href, properties="all"):
+ """Returns the values of several properties of an image
+
+ :param context: context
+ :param image_href: href of the image
+ :param properties: the properties whose values are required.
+ This argument is optional, default value is "all", so if not specified
+ all properties will be returned.
+ :returns: a dict of the values of the properties. A property not on the
+ glance metadata will have a value of None.
+ """
+ img_service = service.get_image_service(image_href, context=context)
+ iproperties = img_service.show(image_href)['properties']
+
+ if properties == "all":
+ return iproperties
+
+ return {p: iproperties.get(p) for p in properties}
+
+
+def get_temp_url_for_glance_image(context, image_uuid):
+ """Returns the tmp url for a glance image.
+
+ :param context: context
+ :param image_uuid: the UUID of the image in glance
+ :returns: the tmp url for the glance image.
+ """
+ # Glance API version 2 is required for getting direct_url of the image.
+ glance_service = service.GlanceImageService(version=2, context=context)
+ image_properties = glance_service.show(image_uuid)
+ LOG.debug('Got image info: %(info)s for image %(image_uuid)s.',
+ {'info': image_properties, 'image_uuid': image_uuid})
+ return glance_service.swift_temp_url(image_properties)
+
+
+def create_boot_iso(context, output_filename, kernel_href,
+ ramdisk_href, deploy_iso_uuid, root_uuid=None,
+ kernel_params=None, boot_mode=None):
+ """Creates a bootable ISO image for a node.
+
+ Given the hrefs for kernel, ramdisk, root partition's UUID and
+ kernel cmdline arguments, this method fetches the kernel and ramdisk,
+ and builds a bootable ISO image that can be used to boot up the
+ baremetal node.
+
+ :param context: context
+ :param output_filename: the absolute path of the output ISO file
+ :param kernel_href: URL or glance uuid of the kernel to use
+ :param ramdisk_href: URL or glance uuid of the ramdisk to use
+ :param deploy_iso_uuid: URL or glance uuid of the deploy iso used
+ :param root_uuid: uuid of the root filesystem (optional)
+ :param kernel_params: a string containing whitespace separated values
+ kernel cmdline arguments of the form K=V or K (optional).
+ :boot_mode: the boot mode in which the deploy is to happen.
+ :raises: ImageCreationFailed, if creating boot ISO failed.
+ """
+ with utils.tempdir() as tmpdir:
+ kernel_path = os.path.join(tmpdir, kernel_href.split('/')[-1])
+ ramdisk_path = os.path.join(tmpdir, ramdisk_href.split('/')[-1])
+ fetch(context, kernel_href, kernel_path)
+ fetch(context, ramdisk_href, ramdisk_path)
+
+ params = []
+ if root_uuid:
+ params.append('root=UUID=%s' % root_uuid)
+ if kernel_params:
+ params.append(kernel_params)
+
+ if boot_mode == 'uefi':
+ deploy_iso = os.path.join(tmpdir, deploy_iso_uuid)
+ fetch(context, deploy_iso_uuid, deploy_iso)
+ create_isolinux_image_for_uefi(output_filename,
+ deploy_iso,
+ kernel_path,
+ ramdisk_path,
+ params)
+ else:
+ create_isolinux_image_for_bios(output_filename,
+ kernel_path,
+ ramdisk_path,
+ params)
+
+
+def is_whole_disk_image(ctx, instance_info):
+ """Find out if the image is a partition image or a whole disk image.
+
+ :param ctx: an admin context
+ :param instance_info: a node's instance info dict
+
+ :returns True for whole disk images and False for partition images
+ and None on no image_source or Error.
+ """
+ image_source = instance_info.get('image_source')
+ if not image_source:
+ return
+
+ is_whole_disk_image = False
+ if glance_utils.is_glance_image(image_source):
+ try:
+ iproperties = get_image_properties(ctx, image_source)
+ except Exception:
+ return
+ is_whole_disk_image = (not iproperties.get('kernel_id') and
+ not iproperties.get('ramdisk_id'))
+ else:
+ # Non glance image ref
+ if (not instance_info.get('kernel') and
+ not instance_info.get('ramdisk')):
+ is_whole_disk_image = True
+
+ return is_whole_disk_image
+
+
+def _mount_deploy_iso(deploy_iso, mountdir):
+ """This function opens up the deploy iso used for deploy.
+
+ :param: deploy_iso: path to the deploy iso where its
+ contents are fetched to.
+ :raises: ImageCreationFailed if mount fails.
+ :returns: a tuple consisting of - 1. a dictionary containing
+ the values as required
+ by create_isolinux_image,
+ 2. efiboot.img relative path, and
+ 3. grub.cfg relative path.
+
+ """
+ e_img_rel_path = None
+ e_img_path = None
+ grub_rel_path = None
+ grub_path = None
+
+ try:
+ utils.mount(deploy_iso, mountdir, '-o', 'loop')
+ except processutils.ProcessExecutionError as e:
+ LOG.exception(_LE("mounting the deploy iso failed."))
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+
+ try:
+ for (dir, subdir, files) in os.walk(mountdir):
+ if 'efiboot.img' in files:
+ e_img_path = os.path.join(dir, 'efiboot.img')
+ e_img_rel_path = os.path.relpath(e_img_path,
+ mountdir)
+ if 'grub.cfg' in files:
+ grub_path = os.path.join(dir, 'grub.cfg')
+ grub_rel_path = os.path.relpath(grub_path,
+ mountdir)
+ except (OSError, IOError) as e:
+ LOG.exception(_LE("examining the deploy iso failed."))
+ _umount_without_raise(mountdir)
+ raise exception.ImageCreationFailed(image_type='iso', error=e)
+
+ # check if the variables are assigned some values or not during
+ # walk of the mountdir.
+ if not (e_img_path and e_img_rel_path and grub_path and grub_rel_path):
+ error = (_("Deploy iso didn't contain efiboot.img or grub.cfg"))
+ _umount_without_raise(mountdir)
+ raise exception.ImageCreationFailed(image_type='iso', error=error)
+
+ uefi_path_info = {e_img_path: e_img_rel_path,
+ grub_path: grub_rel_path}
+
+ # Returning a tuple as it makes the code simpler and clean.
+ # uefi_path_info: is needed by the caller for _create_root_fs to create
+ # appropriate directory structures for uefi boot iso.
+ # grub_rel_path: is needed to copy the new grub.cfg generated using
+ # generate_cfg() to the same directory path structure where it was
+ # present in deploy iso. This path varies for different OS vendors.
+ # e_img_rel_path: is required by mkisofs to generate boot iso.
+ return uefi_path_info, e_img_rel_path, grub_rel_path
diff --git a/iotronic/common/isolinux_config.template b/iotronic/common/isolinux_config.template
new file mode 100644
index 0000000..5adf287
--- /dev/null
+++ b/iotronic/common/isolinux_config.template
@@ -0,0 +1,5 @@
+default boot
+
+label boot
+kernel {{ kernel }}
+append initrd={{ ramdisk }} text {{ kernel_params }} --
diff --git a/iotronic/common/keystone.py b/iotronic/common/keystone.py
new file mode 100644
index 0000000..90829fe
--- /dev/null
+++ b/iotronic/common/keystone.py
@@ -0,0 +1,139 @@
+# coding=utf-8
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystoneclient import exceptions as ksexception
+from oslo_config import cfg
+from six.moves.urllib import parse
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+
+CONF = cfg.CONF
+
+keystone_opts = [
+ cfg.StrOpt('region_name',
+ help='The region used for getting endpoints of OpenStack'
+ 'services.'),
+]
+
+CONF.register_opts(keystone_opts, group='keystone')
+CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
+
+
+def _is_apiv3(auth_url, auth_version):
+ """Checks if V3 version of API is being used or not.
+
+ This method inspects auth_url and auth_version, and checks whether V3
+ version of the API is being used or not.
+
+ :param auth_url: a http or https url to be inspected (like
+ 'http://127.0.0.1:9898/').
+ :param auth_version: a string containing the version (like 'v2', 'v3.0')
+ :returns: True if V3 of the API is being used.
+ """
+ return auth_version == 'v3.0' or '/v3' in parse.urlparse(auth_url).path
+
+
+def _get_ksclient(token=None):
+ auth_url = CONF.keystone_authtoken.auth_uri
+ if not auth_url:
+ raise exception.KeystoneFailure(_('Keystone API endpoint is missing'))
+
+ auth_version = CONF.keystone_authtoken.auth_version
+ api_v3 = _is_apiv3(auth_url, auth_version)
+
+ if api_v3:
+ from keystoneclient.v3 import client
+ else:
+ from keystoneclient.v2_0 import client
+
+ auth_url = get_keystone_url(auth_url, auth_version)
+ try:
+ if token:
+ return client.Client(token=token, auth_url=auth_url)
+ else:
+ return client.Client(
+ username=CONF.keystone_authtoken.admin_user,
+ password=CONF.keystone_authtoken.admin_password,
+ tenant_name=CONF.keystone_authtoken.admin_tenant_name,
+ region_name=CONF.keystone.region_name,
+ auth_url=auth_url)
+ except ksexception.Unauthorized:
+ raise exception.KeystoneUnauthorized()
+ except ksexception.AuthorizationFailure as err:
+ raise exception.KeystoneFailure(_('Could not authorize in Keystone:'
+ ' %s') % err)
+
+
+def get_keystone_url(auth_url, auth_version):
+ """Gives an http/https url to contact keystone.
+
+ Given an auth_url and auth_version, this method generates the url in
+ which keystone can be reached.
+
+ :param auth_url: a http or https url to be inspected (like
+ 'http://127.0.0.1:9898/').
+ :param auth_version: a string containing the version (like v2, v3.0, etc)
+ :returns: a string containing the keystone url
+ """
+ api_v3 = _is_apiv3(auth_url, auth_version)
+ api_version = 'v3' if api_v3 else 'v2.0'
+ # NOTE(lucasagomes): Get rid of the trailing '/' otherwise urljoin()
+ # fails to override the version in the URL
+ return parse.urljoin(auth_url.rstrip('/'), api_version)
+
+
+def get_service_url(service_type='baremetal', endpoint_type='internal'):
+ """Wrapper for get service url from keystone service catalog.
+
+ Given a service_type and an endpoint_type, this method queries keystone
+ service catalog and provides the url for the desired endpoint.
+
+ :param service_type: the keystone service for which url is required.
+ :param endpoint_type: the type of endpoint for the service.
+ :returns: an http/https url for the desired endpoint.
+ """
+ ksclient = _get_ksclient()
+
+ if not ksclient.has_service_catalog():
+ raise exception.KeystoneFailure(_('No Keystone service catalog '
+ 'loaded'))
+
+ try:
+ endpoint = ksclient.service_catalog.url_for(
+ service_type=service_type,
+ endpoint_type=endpoint_type,
+ region_name=CONF.keystone.region_name)
+
+ except ksexception.EndpointNotFound:
+ raise exception.CatalogNotFound(service_type=service_type,
+ endpoint_type=endpoint_type)
+
+ return endpoint
+
+
+def get_admin_auth_token():
+ """Get an admin auth_token from the Keystone."""
+ ksclient = _get_ksclient()
+ return ksclient.auth_token
+
+
+def token_expires_soon(token, duration=None):
+ """Determines if token expiration is about to occur.
+
+ :param duration: time interval in seconds
+ :returns: boolean : true if expiration is within the given duration
+ """
+ ksclient = _get_ksclient(token=token)
+ return ksclient.auth_ref.will_expire_soon(stale_duration=duration)
diff --git a/iotronic/common/network.py b/iotronic/common/network.py
new file mode 100644
index 0000000..de55979
--- /dev/null
+++ b/iotronic/common/network.py
@@ -0,0 +1,30 @@
+# Copyright 2014 Rackspace, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def get_node_vif_ids(task):
+ """Get all VIF ids for a node.
+
+ This function does not handle multi node operations.
+
+ :param task: a TaskManager instance.
+ :returns: A dict of the Node's port UUIDs and their associated VIFs
+
+ """
+ port_vifs = {}
+ for port in task.ports:
+ vif = port.extra.get('vif_port_id')
+ if vif:
+ port_vifs[port.uuid] = vif
+ return port_vifs
diff --git a/iotronic/common/paths.py b/iotronic/common/paths.py
new file mode 100644
index 0000000..20b1db7
--- /dev/null
+++ b/iotronic/common/paths.py
@@ -0,0 +1,66 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from oslo_config import cfg
+
+path_opts = [
+ cfg.StrOpt('pybasedir',
+ default=os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../')),
+ help='Directory where the iotronic python module is installed.'),
+ cfg.StrOpt('bindir',
+ default='$pybasedir/bin',
+ help='Directory where iotronic binaries are installed.'),
+ cfg.StrOpt('state_path',
+ default='$pybasedir',
+ help="Top-level directory for maintaining iotronic's state."),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(path_opts)
+
+
+def basedir_def(*args):
+ """Return an uninterpolated path relative to $pybasedir."""
+ return os.path.join('$pybasedir', *args)
+
+
+def bindir_def(*args):
+ """Return an uninterpolated path relative to $bindir."""
+ return os.path.join('$bindir', *args)
+
+
+def state_path_def(*args):
+ """Return an uninterpolated path relative to $state_path."""
+ return os.path.join('$state_path', *args)
+
+
+def basedir_rel(*args):
+ """Return a path relative to $pybasedir."""
+ return os.path.join(CONF.pybasedir, *args)
+
+
+def bindir_rel(*args):
+ """Return a path relative to $bindir."""
+ return os.path.join(CONF.bindir, *args)
+
+
+def state_path_rel(*args):
+ """Return a path relative to $state_path."""
+ return os.path.join(CONF.state_path, *args)
diff --git a/iotronic/common/policy.py b/iotronic/common/policy.py
new file mode 100644
index 0000000..754782e
--- /dev/null
+++ b/iotronic/common/policy.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Policy Engine For Iotronic."""
+
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_policy import policy
+
+_ENFORCER = None
+CONF = cfg.CONF
+
+
+@lockutils.synchronized('policy_enforcer', 'iotronic-')
+def init_enforcer(policy_file=None, rules=None,
+ default_rule=None, use_conf=True):
+ """Synchronously initializes the policy enforcer
+
+ :param policy_file: Custom policy file to use, if none is specified,
+ `CONF.policy_file` will be used.
+ :param rules: Default dictionary / Rules to use. It will be
+ considered just in the first instantiation.
+ :param default_rule: Default rule to use, CONF.default_rule will
+ be used if none is specified.
+ :param use_conf: Whether to load rules from config file.
+
+ """
+ global _ENFORCER
+
+ if _ENFORCER:
+ return
+
+ _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file,
+ rules=rules,
+ default_rule=default_rule,
+ use_conf=use_conf)
+
+
+def get_enforcer():
+ """Provides access to the single instance of Policy enforcer."""
+
+ if not _ENFORCER:
+ init_enforcer()
+
+ return _ENFORCER
+
+
+def enforce(rule, target, creds, do_raise=False, exc=None, *args, **kwargs):
+ """A shortcut for policy.Enforcer.enforce()
+
+ Checks authorization of a rule against the target and credentials.
+
+ """
+ enforcer = get_enforcer()
+ return enforcer.enforce(rule, target, creds, do_raise=do_raise,
+ exc=exc, *args, **kwargs)
diff --git a/iotronic/common/pxe_utils.py b/iotronic/common/pxe_utils.py
new file mode 100644
index 0000000..9aa9798
--- /dev/null
+++ b/iotronic/common/pxe_utils.py
@@ -0,0 +1,285 @@
+#
+# Copyright 2014 Rackspace, Inc
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import jinja2
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from iotronic.common import dhcp_factory
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import utils
+from iotronic.drivers.modules import deploy_utils
+from iotronic.drivers import utils as driver_utils
+from iotronic.openstack.common import fileutils
+
+CONF = cfg.CONF
+
+LOG = logging.getLogger(__name__)
+
+PXE_CFG_DIR_NAME = 'pxelinux.cfg'
+
+
+def get_root_dir():
+ """Returns the directory where the config files and images will live."""
+ if CONF.pxe.ipxe_enabled:
+ return CONF.pxe.http_root
+ else:
+ return CONF.pxe.tftp_root
+
+
+def _ensure_config_dirs_exist(node_uuid):
+ """Ensure that the node's and PXE configuration directories exist.
+
+ :param node_uuid: the UUID of the node.
+
+ """
+ root_dir = get_root_dir()
+ fileutils.ensure_tree(os.path.join(root_dir, node_uuid))
+ fileutils.ensure_tree(os.path.join(root_dir, PXE_CFG_DIR_NAME))
+
+
+def _build_pxe_config(pxe_options, template):
+ """Build the PXE boot configuration file.
+
+ This method builds the PXE boot configuration file by rendering the
+ template with the given parameters.
+
+ :param pxe_options: A dict of values to set on the configuration file.
+ :param template: The PXE configuration template.
+ :returns: A formatted string with the file content.
+
+ """
+ tmpl_path, tmpl_file = os.path.split(template)
+ env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
+ template = env.get_template(tmpl_file)
+ return template.render({'pxe_options': pxe_options,
+ 'ROOT': '{{ ROOT }}',
+ 'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}',
+ })
+
+
+def _link_mac_pxe_configs(task):
+ """Link each MAC address with the PXE configuration file.
+
+ :param task: A TaskManager instance.
+
+ """
+
+ def create_link(mac_path):
+ utils.unlink_without_raise(mac_path)
+ utils.create_link_without_raise(pxe_config_file_path, mac_path)
+
+ pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
+ for mac in driver_utils.get_node_mac_addresses(task):
+ create_link(_get_pxe_mac_path(mac))
+ # TODO(lucasagomes): Backward compatibility with :hexraw,
+ # to be removed in M.
+ # see: https://bugs.launchpad.net/iotronic/+bug/1441710
+ if CONF.pxe.ipxe_enabled:
+ create_link(_get_pxe_mac_path(mac, delimiter=''))
+
+
+def _link_ip_address_pxe_configs(task):
+ """Link each IP address with the PXE configuration file.
+
+ :param task: A TaskManager instance.
+ :raises: FailedToGetIPAddressOnPort
+ :raises: InvalidIPv4Address
+
+ """
+ pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
+
+ api = dhcp_factory.DHCPFactory().provider
+ ip_addrs = api.get_ip_addresses(task)
+ if not ip_addrs:
+ raise exception.FailedToGetIPAddressOnPort(_(
+ "Failed to get IP address for any port on node %s.") %
+ task.node.uuid)
+ for port_ip_address in ip_addrs:
+ ip_address_path = _get_pxe_ip_address_path(port_ip_address)
+ utils.unlink_without_raise(ip_address_path)
+ utils.create_link_without_raise(pxe_config_file_path,
+ ip_address_path)
+
+
+def _get_pxe_mac_path(mac, delimiter=None):
+ """Convert a MAC address into a PXE config file name.
+
+ :param mac: A MAC address string in the format xx:xx:xx:xx:xx:xx.
+ :param delimiter: The MAC address delimiter. Defaults to dash ('-').
+ :returns: the path to the config file.
+
+ """
+ if delimiter is None:
+ delimiter = '-'
+
+ mac_file_name = mac.replace(':', delimiter).lower()
+ if not CONF.pxe.ipxe_enabled:
+ mac_file_name = '01-' + mac_file_name
+
+ return os.path.join(get_root_dir(), PXE_CFG_DIR_NAME, mac_file_name)
+
+
+def _get_pxe_ip_address_path(ip_address):
+ """Convert an ipv4 address into a PXE config file name.
+
+ :param ip_address: A valid IPv4 address string in the format 'n.n.n.n'.
+ :returns: the path to the config file.
+
+ """
+ ip = ip_address.split('.')
+ hex_ip = '{0:02X}{1:02X}{2:02X}{3:02X}'.format(*map(int, ip))
+
+ return os.path.join(
+ CONF.pxe.tftp_root, hex_ip + ".conf"
+ )
+
+
+def get_deploy_kr_info(node_uuid, driver_info):
+ """Get href and tftp path for deploy kernel and ramdisk.
+
+ Note: driver_info should be validated outside of this method.
+ """
+ root_dir = get_root_dir()
+ image_info = {}
+ for label in ('deploy_kernel', 'deploy_ramdisk'):
+ image_info[label] = (
+ str(driver_info[label]),
+ os.path.join(root_dir, node_uuid, label)
+ )
+ return image_info
+
+
+def get_pxe_config_file_path(node_uuid):
+ """Generate the path for the node's PXE configuration file.
+
+ :param node_uuid: the UUID of the node.
+ :returns: The path to the node's PXE configuration file.
+
+ """
+ return os.path.join(get_root_dir(), node_uuid, 'config')
+
+
+def create_pxe_config(task, pxe_options, template=None):
+ """Generate PXE configuration file and MAC address links for it.
+
+ This method will generate the PXE configuration file for the task's
+ node under a directory named with the UUID of that node. For each
+ MAC address (port) of that node, a symlink for the configuration file
+ will be created under the PXE configuration directory, so regardless
+ of which port boots first they'll get the same PXE configuration.
+
+ :param task: A TaskManager instance.
+ :param pxe_options: A dictionary with the PXE configuration
+ parameters.
+ :param template: The PXE configuration template. If no template is
+ given the CONF.pxe.pxe_config_template will be used.
+
+ """
+ LOG.debug("Building PXE config for node %s", task.node.uuid)
+
+ if template is None:
+ template = CONF.pxe.pxe_config_template
+
+ _ensure_config_dirs_exist(task.node.uuid)
+
+ pxe_config_file_path = get_pxe_config_file_path(task.node.uuid)
+ pxe_config = _build_pxe_config(pxe_options, template)
+ utils.write_to_file(pxe_config_file_path, pxe_config)
+
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
+ _link_ip_address_pxe_configs(task)
+ else:
+ _link_mac_pxe_configs(task)
+
+
+def clean_up_pxe_config(task):
+ """Clean up the TFTP environment for the task's node.
+
+ :param task: A TaskManager instance.
+
+ """
+ LOG.debug("Cleaning up PXE config for node %s", task.node.uuid)
+
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
+ api = dhcp_factory.DHCPFactory().provider
+ ip_addresses = api.get_ip_addresses(task)
+ if not ip_addresses:
+ return
+
+ for port_ip_address in ip_addresses:
+ try:
+ ip_address_path = _get_pxe_ip_address_path(port_ip_address)
+ except exception.InvalidIPv4Address:
+ continue
+ utils.unlink_without_raise(ip_address_path)
+ else:
+ for mac in driver_utils.get_node_mac_addresses(task):
+ utils.unlink_without_raise(_get_pxe_mac_path(mac))
+ # TODO(lucasagomes): Backward compatibility with :hexraw,
+ # to be removed in M.
+ # see: https://bugs.launchpad.net/iotronic/+bug/1441710
+ if CONF.pxe.ipxe_enabled:
+ utils.unlink_without_raise(_get_pxe_mac_path(mac,
+ delimiter=''))
+
+ utils.rmtree_without_raise(os.path.join(get_root_dir(),
+ task.node.uuid))
+
+
+def dhcp_options_for_instance(task):
+ """Retrieves the DHCP PXE boot options.
+
+ :param task: A TaskManager instance.
+ """
+ dhcp_opts = []
+ if CONF.pxe.ipxe_enabled:
+ script_name = os.path.basename(CONF.pxe.ipxe_boot_script)
+ ipxe_script_url = '/'.join([CONF.pxe.http_url, script_name])
+ dhcp_provider_name = dhcp_factory.CONF.dhcp.dhcp_provider
+ # if the request comes from dumb firmware send them the iPXE
+ # boot image.
+ if dhcp_provider_name == 'neutron':
+ # Neutron use dnsmasq as default DHCP agent, add extra config
+ # to neutron "dhcp-match=set:ipxe,175" and use below option
+ dhcp_opts.append({'opt_name': 'tag:!ipxe,bootfile-name',
+ 'opt_value': CONF.pxe.pxe_bootfile_name})
+ dhcp_opts.append({'opt_name': 'tag:ipxe,bootfile-name',
+ 'opt_value': ipxe_script_url})
+ else:
+ # !175 == non-iPXE.
+ # http://ipxe.org/howto/dhcpd#ipxe-specific_options
+ dhcp_opts.append({'opt_name': '!175,bootfile-name',
+ 'opt_value': CONF.pxe.pxe_bootfile_name})
+ dhcp_opts.append({'opt_name': 'bootfile-name',
+ 'opt_value': ipxe_script_url})
+ else:
+ if deploy_utils.get_boot_mode_for_deploy(task.node) == 'uefi':
+ boot_file = CONF.pxe.uefi_pxe_bootfile_name
+ else:
+ boot_file = CONF.pxe.pxe_bootfile_name
+
+ dhcp_opts.append({'opt_name': 'bootfile-name',
+ 'opt_value': boot_file})
+
+ dhcp_opts.append({'opt_name': 'server-ip-address',
+ 'opt_value': CONF.pxe.tftp_server})
+ dhcp_opts.append({'opt_name': 'tftp-server',
+ 'opt_value': CONF.pxe.tftp_server})
+ return dhcp_opts
diff --git a/iotronic/common/rpc.py b/iotronic/common/rpc.py
new file mode 100644
index 0000000..20ec8e1
--- /dev/null
+++ b/iotronic/common/rpc.py
@@ -0,0 +1,150 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+__all__ = [
+ 'init',
+ 'cleanup',
+ 'set_defaults',
+ 'add_extra_exmods',
+ 'clear_extra_exmods',
+ 'get_allowed_exmods',
+ 'RequestContextSerializer',
+ 'get_client',
+ 'get_server',
+ 'get_notifier',
+ 'TRANSPORT_ALIASES',
+]
+
+from oslo_config import cfg
+import oslo_messaging as messaging
+from oslo_serialization import jsonutils
+
+from iotronic.common import context as iotronic_context
+from iotronic.common import exception
+
+
+CONF = cfg.CONF
+#print CONF.transport_url
+TRANSPORT = None
+NOTIFIER = None
+
+ALLOWED_EXMODS = [
+ exception.__name__,
+]
+EXTRA_EXMODS = []
+
+# NOTE(lucasagomes): The iotronic.openstack.common.rpc entries are for
+# backwards compat with IceHouse rpc_backend configuration values.
+TRANSPORT_ALIASES = {
+ 'iotronic.openstack.common.rpc.impl_kombu': 'rabbit',
+ 'iotronic.openstack.common.rpc.impl_qpid': 'qpid',
+ 'iotronic.openstack.common.rpc.impl_zmq': 'zmq',
+ 'iotronic.rpc.impl_kombu': 'rabbit',
+ 'iotronic.rpc.impl_qpid': 'qpid',
+ 'iotronic.rpc.impl_zmq': 'zmq',
+}
+
+
+def init(conf):
+ global TRANSPORT, NOTIFIER
+ exmods = get_allowed_exmods()
+ TRANSPORT = messaging.get_transport(conf,
+ allowed_remote_exmods=exmods,
+ aliases=TRANSPORT_ALIASES)
+ serializer = RequestContextSerializer(JsonPayloadSerializer())
+ NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
+
+
+def cleanup():
+ global TRANSPORT, NOTIFIER
+ assert TRANSPORT is not None
+ assert NOTIFIER is not None
+ TRANSPORT.cleanup()
+ TRANSPORT = NOTIFIER = None
+
+
+def set_defaults(control_exchange):
+ messaging.set_transport_defaults(control_exchange)
+
+
+def add_extra_exmods(*args):
+ EXTRA_EXMODS.extend(args)
+
+
+def clear_extra_exmods():
+ del EXTRA_EXMODS[:]
+
+
+def get_allowed_exmods():
+ return ALLOWED_EXMODS + EXTRA_EXMODS
+
+
+class JsonPayloadSerializer(messaging.NoOpSerializer):
+ @staticmethod
+ def serialize_entity(context, entity):
+ return jsonutils.to_primitive(entity, convert_instances=True)
+
+
+class RequestContextSerializer(messaging.Serializer):
+
+ def __init__(self, base):
+ self._base = base
+
+ def serialize_entity(self, context, entity):
+ if not self._base:
+ return entity
+ return self._base.serialize_entity(context, entity)
+
+ def deserialize_entity(self, context, entity):
+ if not self._base:
+ return entity
+ return self._base.deserialize_entity(context, entity)
+
+ def serialize_context(self, context):
+ return context.to_dict()
+
+ def deserialize_context(self, context):
+ return iotronic_context.RequestContext.from_dict(context)
+
+
+def get_transport_url(url_str=None):
+ #LOG.info('yoooooooooooo')
+ return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
+
+
+def get_client(target, version_cap=None, serializer=None):
+ assert TRANSPORT is not None
+ serializer = RequestContextSerializer(serializer)
+ return messaging.RPCClient(TRANSPORT,
+ target,
+ version_cap=version_cap,
+ serializer=serializer)
+
+
+def get_server(target, endpoints, serializer=None):
+ assert TRANSPORT is not None
+ serializer = RequestContextSerializer(serializer)
+ return messaging.get_rpc_server(TRANSPORT,
+ target,
+ endpoints,
+ executor='eventlet',
+ serializer=serializer)
+
+
+def get_notifier(service=None, host=None, publisher_id=None):
+ assert NOTIFIER is not None
+ if not publisher_id:
+ publisher_id = "%s.%s" % (service, host or CONF.host)
+ return NOTIFIER.prepare(publisher_id=publisher_id)
diff --git a/iotronic/common/safe_utils.py b/iotronic/common/safe_utils.py
new file mode 100644
index 0000000..0d4572e
--- /dev/null
+++ b/iotronic/common/safe_utils.py
@@ -0,0 +1,53 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities and helper functions that won't produce circular imports."""
+
+import inspect
+
+
+def getcallargs(function, *args, **kwargs):
+ """This is a simplified inspect.getcallargs (2.7+).
+
+ It should be replaced when python >= 2.7 is standard.
+ """
+ keyed_args = {}
+ argnames, varargs, keywords, defaults = inspect.getargspec(function)
+
+ keyed_args.update(kwargs)
+
+ # NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
+ # argnames but not in args or kwargs. Uses 'in' rather than '==' because
+ # some tests use 'self2'.
+ if 'self' in argnames[0] or 'cls' == argnames[0]:
+ # The function may not actually be a method or have __self__.
+ # Typically seen when it's stubbed with mox.
+ if inspect.ismethod(function) and hasattr(function, '__self__'):
+ keyed_args[argnames[0]] = function.__self__
+ else:
+ keyed_args[argnames[0]] = None
+
+ remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
+ keyed_args.update(dict(zip(remaining_argnames, args)))
+
+ if defaults:
+ num_defaults = len(defaults)
+ for argname, value in zip(argnames[-num_defaults:], defaults):
+ if argname not in keyed_args:
+ keyed_args[argname] = value
+
+ return keyed_args
diff --git a/iotronic/common/service.py b/iotronic/common/service.py
new file mode 100644
index 0000000..cab51b2
--- /dev/null
+++ b/iotronic/common/service.py
@@ -0,0 +1,138 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright © 2012 eNovance
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import signal
+import socket
+
+from oslo_config import cfg
+from oslo_context import context
+from oslo_log import log
+import oslo_messaging as messaging
+from oslo_utils import importutils
+
+from iotronic.common import config
+from iotronic.common.i18n import _LE
+from iotronic.common.i18n import _LI
+from iotronic.common import rpc
+from iotronic.objects import base as objects_base
+from iotronic.openstack.common import service
+
+
+service_opts = [
+ cfg.IntOpt('periodic_interval',
+ default=60,
+ help='Seconds between running periodic tasks.'),
+ cfg.StrOpt('host',
+ default=socket.getfqdn(),
+ help='Name of this node. This can be an opaque identifier. '
+ 'It is not necessarily a hostname, FQDN, or IP address. '
+ 'However, the node name must be valid within '
+ 'an AMQP key, and if using ZeroMQ, a valid '
+ 'hostname, FQDN, or IP address.'),
+]
+
+cfg.CONF.register_opts(service_opts)
+
+LOG = log.getLogger(__name__)
+
+
+class RPCService(service.Service):
+
+ def __init__(self, host, manager_module, manager_class):
+ super(RPCService, self).__init__()
+ self.host = host
+ manager_module = importutils.try_import(manager_module)
+ manager_class = getattr(manager_module, manager_class)
+ self.manager = manager_class(host, manager_module.MANAGER_TOPIC)
+ self.topic = self.manager.topic
+ self.rpcserver = None
+ self.deregister = True
+
+ def start(self):
+ super(RPCService, self).start()
+ admin_context = context.RequestContext('admin', 'admin', is_admin=True)
+
+ target = messaging.Target(topic=self.topic, server=self.host)
+ endpoints = [self.manager]
+ serializer = objects_base.IotronicObjectSerializer()
+ self.rpcserver = rpc.get_server(target, endpoints, serializer)
+ self.rpcserver.start()
+
+ self.handle_signal()
+ self.manager.init_host()
+ self.tg.add_dynamic_timer(
+ self.manager.periodic_tasks,
+ periodic_interval_max=cfg.CONF.periodic_interval,
+ context=admin_context)
+
+ LOG.info(_LI('Created RPC server for service %(service)s on host '
+ '%(host)s.'),
+ {'service': self.topic, 'host': self.host})
+
+ def stop(self):
+ try:
+ self.rpcserver.stop()
+ self.rpcserver.wait()
+ except Exception as e:
+ LOG.exception(_LE('Service error occurred when stopping the '
+ 'RPC server. Error: %s'), e)
+ try:
+ self.manager.del_host(deregister=self.deregister)
+ except Exception as e:
+ LOG.exception(_LE('Service error occurred when cleaning up '
+ 'the RPC manager. Error: %s'), e)
+
+ super(RPCService, self).stop(graceful=True)
+ LOG.info(_LI('Stopped RPC server for service %(service)s on host '
+ '%(host)s.'),
+ {'service': self.topic, 'host': self.host})
+
+ def _handle_signal(self, signo, frame):
+ LOG.info(_LI('Got signal SIGUSR1. Not deregistering on next shutdown '
+ 'of service %(service)s on host %(host)s.'),
+ {'service': self.topic, 'host': self.host})
+ self.deregister = False
+
+ def handle_signal(self):
+ """Add a signal handler for SIGUSR1.
+
+ The handler ensures that the manager is not deregistered when it is
+ shutdown.
+ """
+ signal.signal(signal.SIGUSR1, self._handle_signal)
+
+
+def prepare_service(argv=[]):
+ log.register_options(cfg.CONF)
+
+ log.set_defaults(default_log_levels=['amqp=WARN',
+ 'amqplib=WARN',
+ 'qpid.messagregister_optionsing=INFO',
+ 'oslo.messaging=INFO',
+ 'sqlalchemy=WARN',
+ 'keystoneclient=INFO',
+ 'stevedore=INFO',
+ 'eventlet.wsgi.server=WARN',
+ 'iso8601=WARN',
+ 'paramiko=WARN',
+ 'requests=WARN',
+ 'neutronclient=WARN',
+ 'glanceclient=WARN',
+ 'iotronic.openstack.common=WARN',
+ 'urllib3.connectionpool=WARN',
+ ])
+ config.parse_args(argv)
+ log.setup(cfg.CONF, 'iotronic')
diff --git a/iotronic/common/states.py b/iotronic/common/states.py
new file mode 100644
index 0000000..8632767
--- /dev/null
+++ b/iotronic/common/states.py
@@ -0,0 +1,298 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Mapping of bare metal node states.
+
+Setting the node `power_state` is handled by the conductor's power
+synchronization thread. Based on the power state retrieved from the driver
+for the node, the state is set to POWER_ON or POWER_OFF, accordingly.
+Should this fail, the `power_state` value is left unchanged, and the node
+is placed into maintenance mode.
+
+The `power_state` can also be set manually via the API. A failure to change
+the state leaves the current state unchanged. The node is NOT placed into
+maintenance mode in this case.
+"""
+
+from oslo_log import log as logging
+
+from iotronic.common import fsm
+
+LOG = logging.getLogger(__name__)
+
+#####################
+# Provisioning states
+#####################
+
+# TODO(deva): add add'l state mappings here
+VERBS = {
+ 'active': 'deploy',
+ 'deleted': 'delete',
+ 'manage': 'manage',
+ 'provide': 'provide',
+ 'inspect': 'inspect',
+}
+""" Mapping of state-changing events that are PUT to the REST API
+
+This is a mapping of target states which are PUT to the API, eg,
+ PUT /v1/node/states/provision {'target': 'active'}
+
+The dict format is:
+ {target string used by the API: internal verb}
+
+This provides a reference set of supported actions, and in the future
+may be used to support renaming these actions.
+"""
+
+NOSTATE = None
+""" No state information.
+
+This state is used with power_state to represent a lack of knowledge of
+power state, and in target_*_state fields when there is no target.
+"""
+
+MANAGEABLE = 'manageable'
+""" Node is in a manageable state.
+
+This state indicates that Iotronic has verified, at least once, that it had
+sufficient information to manage the hardware. While in this state, the node
+is not available for provisioning (it must be in the AVAILABLE state for that).
+"""
+
+AVAILABLE = 'available'
+""" Node is available for use and scheduling.
+
+This state is replacing the NOSTATE state used prior to Kilo.
+"""
+
+ACTIVE = 'active'
+""" Node is successfully deployed and associated with an instance. """
+
+DEPLOYWAIT = 'wait call-back'
+""" Node is waiting to be deployed.
+
+This will be the node `provision_state` while the node is waiting for
+the driver to finish deployment.
+"""
+
+DEPLOYING = 'deploying'
+""" Node is ready to receive a deploy request, or is currently being deployed.
+
+A node will have its `provision_state` set to DEPLOYING briefly before it
+receives its initial deploy request. It will also move to this state from
+DEPLOYWAIT after the callback is triggered and deployment is continued
+(disk partitioning and image copying).
+"""
+
+DEPLOYFAIL = 'deploy failed'
+""" Node deployment failed. """
+
+DEPLOYDONE = 'deploy complete'
+""" Node was successfully deployed.
+
+This is mainly a target provision state used during deployment. A successfully
+deployed node should go to ACTIVE status.
+"""
+
+DELETING = 'deleting'
+""" Node is actively being torn down. """
+
+DELETED = 'deleted'
+""" Node tear down was successful.
+
+In Juno, target_provision_state was set to this value during node tear down.
+
+In Kilo, this will be a transitory value of provision_state, and never
+represented in target_provision_state.
+"""
+
+CLEANING = 'cleaning'
+""" Node is being automatically cleaned to prepare it for provisioning. """
+
+CLEANFAIL = 'clean failed'
+""" Node failed cleaning. This requires operator intervention to resolve. """
+
+ERROR = 'error'
+""" An error occurred during node processing.
+
+The `last_error` attribute of the node details should contain an error message.
+"""
+
+REBUILD = 'rebuild'
+""" Node is to be rebuilt.
+
+This is not used as a state, but rather as a "verb" when changing the node's
+provision_state via the REST API.
+"""
+
+INSPECTING = 'inspecting'
+""" Node is under inspection.
+
+This is the provision state used when inspection is started. A successfully
+inspected node shall transition to MANAGEABLE status.
+"""
+
+
+INSPECTFAIL = 'inspect failed'
+""" Node inspection failed. """
+
+
+UPDATE_ALLOWED_STATES = (DEPLOYFAIL, INSPECTING, INSPECTFAIL, CLEANFAIL)
+"""Transitional states in which we allow updating a node."""
+
+
+##############
+# Power states
+##############
+
+POWER_ON = 'power on'
+""" Node is powered on. """
+
+POWER_OFF = 'power off'
+""" Node is powered off. """
+
+REBOOT = 'rebooting'
+""" Node is rebooting. """
+
+
+#####################
+# State machine model
+#####################
+def on_exit(old_state, event):
+ """Used to log when a state is exited."""
+ LOG.debug("Exiting old state '%s' in response to event '%s'",
+ old_state, event)
+
+
+def on_enter(new_state, event):
+ """Used to log when entering a state."""
+ LOG.debug("Entering new state '%s' in response to event '%s'",
+ new_state, event)
+
+watchers = {}
+watchers['on_exit'] = on_exit
+watchers['on_enter'] = on_enter
+
+machine = fsm.FSM()
+
+# Add stable states
+machine.add_state(MANAGEABLE, stable=True, **watchers)
+machine.add_state(AVAILABLE, stable=True, **watchers)
+machine.add_state(ACTIVE, stable=True, **watchers)
+machine.add_state(ERROR, stable=True, **watchers)
+
+# Add deploy* states
+# NOTE(deva): Juno shows a target_provision_state of DEPLOYDONE
+# this is changed in Kilo to ACTIVE
+machine.add_state(DEPLOYING, target=ACTIVE, **watchers)
+machine.add_state(DEPLOYWAIT, target=ACTIVE, **watchers)
+machine.add_state(DEPLOYFAIL, target=ACTIVE, **watchers)
+
+# Add clean* states
+machine.add_state(CLEANING, target=AVAILABLE, **watchers)
+machine.add_state(CLEANFAIL, target=AVAILABLE, **watchers)
+
+# Add delete* states
+machine.add_state(DELETING, target=AVAILABLE, **watchers)
+
+# From AVAILABLE, a deployment may be started
+machine.add_transition(AVAILABLE, DEPLOYING, 'deploy')
+
+# Add inspect* states.
+machine.add_state(INSPECTING, target=MANAGEABLE, **watchers)
+machine.add_state(INSPECTFAIL, target=MANAGEABLE, **watchers)
+
+# A deployment may fail
+machine.add_transition(DEPLOYING, DEPLOYFAIL, 'fail')
+
+# A failed deployment may be retried
+# iotronic/conductor/manager.py:do_node_deploy()
+machine.add_transition(DEPLOYFAIL, DEPLOYING, 'rebuild')
+# NOTE(deva): Juno allows a client to send "active" to initiate a rebuild
+machine.add_transition(DEPLOYFAIL, DEPLOYING, 'deploy')
+
+# A deployment may also wait on external callbacks
+machine.add_transition(DEPLOYING, DEPLOYWAIT, 'wait')
+machine.add_transition(DEPLOYWAIT, DEPLOYING, 'resume')
+
+# A deployment waiting on callback may time out
+machine.add_transition(DEPLOYWAIT, DEPLOYFAIL, 'fail')
+
+# A deployment may complete
+machine.add_transition(DEPLOYING, ACTIVE, 'done')
+
+# An active instance may be re-deployed
+# iotronic/conductor/manager.py:do_node_deploy()
+machine.add_transition(ACTIVE, DEPLOYING, 'rebuild')
+
+# An active instance may be deleted
+# iotronic/conductor/manager.py:do_node_tear_down()
+machine.add_transition(ACTIVE, DELETING, 'delete')
+
+# While a deployment is waiting, it may be deleted
+# iotronic/conductor/manager.py:do_node_tear_down()
+machine.add_transition(DEPLOYWAIT, DELETING, 'delete')
+
+# A failed deployment may also be deleted
+# iotronic/conductor/manager.py:do_node_tear_down()
+machine.add_transition(DEPLOYFAIL, DELETING, 'delete')
+
+# This state can also transition to error
+machine.add_transition(DELETING, ERROR, 'error')
+
+# When finished deleting, a node will begin cleaning
+machine.add_transition(DELETING, CLEANING, 'clean')
+
+# If cleaning succeeds, it becomes available for scheduling
+machine.add_transition(CLEANING, AVAILABLE, 'done')
+
+# If cleaning fails, wait for operator intervention
+machine.add_transition(CLEANING, CLEANFAIL, 'fail')
+
+# An operator may want to move a CLEANFAIL node to MANAGEABLE, to perform
+# other actions like zapping
+machine.add_transition(CLEANFAIL, MANAGEABLE, 'manage')
+
+# From MANAGEABLE, a node may move to available after going through cleaning
+machine.add_transition(MANAGEABLE, CLEANING, 'provide')
+
+# From AVAILABLE, a node may be made unavailable by managing it
+machine.add_transition(AVAILABLE, MANAGEABLE, 'manage')
+
+# An errored instance can be rebuilt
+# iotronic/conductor/manager.py:do_node_deploy()
+machine.add_transition(ERROR, DEPLOYING, 'rebuild')
+# or deleted
+# iotronic/conductor/manager.py:do_node_tear_down()
+machine.add_transition(ERROR, DELETING, 'delete')
+
+# Added transitions for inspection.
+# Initiate inspection.
+machine.add_transition(MANAGEABLE, INSPECTING, 'inspect')
+
+# iotronic/conductor/manager.py:inspect_hardware().
+machine.add_transition(INSPECTING, MANAGEABLE, 'done')
+
+# Inspection may fail.
+machine.add_transition(INSPECTING, INSPECTFAIL, 'fail')
+
+# Move the node to manageable state for any other
+# action.
+machine.add_transition(INSPECTFAIL, MANAGEABLE, 'manage')
+
+# Reinitiate the inspect after inspectfail.
+machine.add_transition(INSPECTFAIL, INSPECTING, 'inspect')
diff --git a/iotronic/common/swift.py b/iotronic/common/swift.py
new file mode 100644
index 0000000..9a0df29
--- /dev/null
+++ b/iotronic/common/swift.py
@@ -0,0 +1,191 @@
+#
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from six.moves.urllib import parse
+from swiftclient import client as swift_client
+from swiftclient import exceptions as swift_exceptions
+from swiftclient import utils as swift_utils
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common import keystone
+
+swift_opts = [
+ cfg.IntOpt('swift_max_retries',
+ default=2,
+ help='Maximum number of times to retry a Swift request, '
+ 'before failing.')
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(swift_opts, group='swift')
+
+CONF.import_opt('admin_user', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('admin_tenant_name', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('admin_password', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('auth_uri', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('auth_version', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('insecure', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+CONF.import_opt('cafile', 'keystonemiddleware.auth_token',
+ group='keystone_authtoken')
+
+LOG = logging.getLogger(__name__)
+
+
+class SwiftAPI(object):
+ """API for communicating with Swift."""
+
+ def __init__(self,
+ user=CONF.keystone_authtoken.admin_user,
+ tenant_name=CONF.keystone_authtoken.admin_tenant_name,
+ key=CONF.keystone_authtoken.admin_password,
+ auth_url=CONF.keystone_authtoken.auth_uri,
+ auth_version=CONF.keystone_authtoken.auth_version):
+ """Constructor for creating a SwiftAPI object.
+
+ :param user: the name of the user for Swift account
+ :param tenant_name: the name of the tenant for Swift account
+ :param key: the 'password' or key to authenticate with
+ :param auth_url: the url for authentication
+ :param auth_version: the version of api to use for authentication
+ """
+ auth_url = keystone.get_keystone_url(auth_url, auth_version)
+ params = {'retries': CONF.swift.swift_max_retries,
+ 'insecure': CONF.keystone_authtoken.insecure,
+ 'cacert': CONF.keystone_authtoken.cafile,
+ 'user': user,
+ 'tenant_name': tenant_name,
+ 'key': key,
+ 'authurl': auth_url,
+ 'auth_version': auth_version}
+
+ self.connection = swift_client.Connection(**params)
+
+ def create_object(self, container, object, filename,
+ object_headers=None):
+ """Uploads a given file to Swift.
+
+ :param container: The name of the container for the object.
+ :param object: The name of the object in Swift
+ :param filename: The file to upload, as the object data
+ :param object_headers: the headers for the object to pass to Swift
+ :returns: The Swift UUID of the object
+ :raises: SwiftOperationError, if any operation with Swift fails.
+ """
+ try:
+ self.connection.put_container(container)
+ except swift_exceptions.ClientException as e:
+ operation = _("put container")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ with open(filename, "r") as fileobj:
+
+ try:
+ obj_uuid = self.connection.put_object(container,
+ object,
+ fileobj,
+ headers=object_headers)
+ except swift_exceptions.ClientException as e:
+ operation = _("put object")
+ raise exception.SwiftOperationError(operation=operation,
+ error=e)
+
+ return obj_uuid
+
+ def get_temp_url(self, container, object, timeout):
+ """Returns the temp url for the given Swift object.
+
+ :param container: The name of the container in which Swift object
+ is placed.
+ :param object: The name of the Swift object.
+ :param timeout: The timeout in seconds after which the generated url
+ should expire.
+ :returns: The temp url for the object.
+ :raises: SwiftOperationError, if any operation with Swift fails.
+ """
+ try:
+ account_info = self.connection.head_account()
+ except swift_exceptions.ClientException as e:
+ operation = _("head account")
+ raise exception.SwiftOperationError(operation=operation,
+ error=e)
+
+ storage_url, token = self.connection.get_auth()
+ parse_result = parse.urlparse(storage_url)
+ swift_object_path = '/'.join((parse_result.path, container, object))
+ temp_url_key = account_info['x-account-meta-temp-url-key']
+ url_path = swift_utils.generate_temp_url(swift_object_path, timeout,
+ temp_url_key, 'GET')
+ return parse.urlunparse((parse_result.scheme,
+ parse_result.netloc,
+ url_path,
+ None,
+ None,
+ None))
+
+ def delete_object(self, container, object):
+ """Deletes the given Swift object.
+
+ :param container: The name of the container in which Swift object
+ is placed.
+ :param object: The name of the object in Swift to be deleted.
+ :raises: SwiftOperationError, if operation with Swift fails.
+ """
+ try:
+ self.connection.delete_object(container, object)
+ except swift_exceptions.ClientException as e:
+ operation = _("delete object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ def head_object(self, container, object):
+ """Retrieves the information about the given Swift object.
+
+ :param container: The name of the container in which Swift object
+ is placed.
+ :param object: The name of the object in Swift
+ :returns: The information about the object as returned by
+ Swift client's head_object call.
+ :raises: SwiftOperationError, if operation with Swift fails.
+ """
+ try:
+ return self.connection.head_object(container, object)
+ except swift_exceptions.ClientException as e:
+ operation = _("head object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
+
+ def update_object_meta(self, container, object, object_headers):
+ """Update the metadata of a given Swift object.
+
+ :param container: The name of the container in which Swift object
+ is placed.
+ :param object: The name of the object in Swift
+ :param object_headers: the headers for the object to pass to Swift
+ :raises: SwiftOperationError, if operation with Swift fails.
+ """
+ try:
+ self.connection.post_object(container, object, object_headers)
+ except swift_exceptions.ClientException as e:
+ operation = _("post object")
+ raise exception.SwiftOperationError(operation=operation, error=e)
diff --git a/iotronic/common/utils.py b/iotronic/common/utils.py
new file mode 100644
index 0000000..1fb268a
--- /dev/null
+++ b/iotronic/common/utils.py
@@ -0,0 +1,599 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utilities and helper functions."""
+
+import contextlib
+import errno
+import hashlib
+import os
+import random
+import re
+import shutil
+import tempfile
+
+import netaddr
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import paramiko
+import six
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LE
+from iotronic.common.i18n import _LW
+
+utils_opts = [
+ cfg.StrOpt('rootwrap_config',
+ default="/etc/iotronic/rootwrap.conf",
+ help='Path to the rootwrap configuration file to use for '
+ 'running commands as root.'),
+ cfg.StrOpt('tempdir',
+ help='Explicitly specify the temporary working directory.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(utils_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+def _get_root_helper():
+ return 'sudo iotronic-rootwrap %s' % CONF.rootwrap_config
+
+
+def execute(*cmd, **kwargs):
+ """Convenience wrapper around oslo's execute() method.
+
+ :param cmd: Passed to processutils.execute.
+ :param use_standard_locale: True | False. Defaults to False. If set to
+ True, execute command with standard locale
+ added to environment variables.
+ :returns: (stdout, stderr) from process execution
+ :raises: UnknownArgumentError
+ :raises: ProcessExecutionError
+ """
+
+ use_standard_locale = kwargs.pop('use_standard_locale', False)
+ if use_standard_locale:
+ env = kwargs.pop('env_variables', os.environ.copy())
+ env['LC_ALL'] = 'C'
+ kwargs['env_variables'] = env
+ if kwargs.get('run_as_root') and 'root_helper' not in kwargs:
+ kwargs['root_helper'] = _get_root_helper()
+ result = processutils.execute(*cmd, **kwargs)
+ LOG.debug('Execution completed, command line is "%s"',
+ ' '.join(map(str, cmd)))
+ LOG.debug('Command stdout is: "%s"' % result[0])
+ LOG.debug('Command stderr is: "%s"' % result[1])
+ return result
+
+
+def trycmd(*args, **kwargs):
+ """Convenience wrapper around oslo's trycmd() method."""
+ if kwargs.get('run_as_root') and 'root_helper' not in kwargs:
+ kwargs['root_helper'] = _get_root_helper()
+ return processutils.trycmd(*args, **kwargs)
+
+
+def ssh_connect(connection):
+ """Method to connect to a remote system using ssh protocol.
+
+ :param connection: a dict of connection parameters.
+ :returns: paramiko.SSHClient -- an active ssh connection.
+ :raises: SSHConnectFailed
+
+ """
+ try:
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ key_contents = connection.get('key_contents')
+ if key_contents:
+ data = six.moves.StringIO(key_contents)
+ if "BEGIN RSA PRIVATE" in key_contents:
+ pkey = paramiko.RSAKey.from_private_key(data)
+ elif "BEGIN DSA PRIVATE" in key_contents:
+ pkey = paramiko.DSSKey.from_private_key(data)
+ else:
+ # Can't include the key contents - secure material.
+ raise ValueError(_("Invalid private key"))
+ else:
+ pkey = None
+ ssh.connect(connection.get('host'),
+ username=connection.get('username'),
+ password=connection.get('password'),
+ port=connection.get('port', 22),
+ pkey=pkey,
+ key_filename=connection.get('key_filename'),
+ timeout=connection.get('timeout', 10))
+
+ # send TCP keepalive packets every 20 seconds
+ ssh.get_transport().set_keepalive(20)
+ except Exception as e:
+ LOG.debug("SSH connect failed: %s" % e)
+ raise exception.SSHConnectFailed(host=connection.get('host'))
+
+ return ssh
+
+
+def generate_uid(topic, size=8):
+ characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+ choices = [random.choice(characters) for _x in range(size)]
+ return '%s-%s' % (topic, ''.join(choices))
+
+
+def random_alnum(size=32):
+ characters = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+ return ''.join(random.choice(characters) for _ in range(size))
+
+
+def delete_if_exists(pathname):
+ """delete a file, but ignore file not found error."""
+
+ try:
+ os.unlink(pathname)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+
+def is_valid_boolstr(val):
+ """Check if the provided string is a valid bool string or not."""
+ boolstrs = ('true', 'false', 'yes', 'no', 'y', 'n', '1', '0')
+ return str(val).lower() in boolstrs
+
+
+def is_valid_mac(address):
+ """Verify the format of a MAC address.
+
+ Check if a MAC address is valid and contains six octets. Accepts
+ colon-separated format only.
+
+ :param address: MAC address to be validated.
+ :returns: True if valid. False if not.
+
+ """
+ m = "[0-9a-f]{2}(:[0-9a-f]{2}){5}$"
+ return (isinstance(address, six.string_types) and
+ re.match(m, address.lower()))
+
+
+def is_hostname_safe(hostname):
+ """Determine if the supplied hostname is RFC compliant.
+
+ Check that the supplied hostname conforms to:
+ * http://en.wikipedia.org/wiki/Hostname
+ * http://tools.ietf.org/html/rfc952
+ * http://tools.ietf.org/html/rfc1123
+ Allowing for hostnames, and hostnames + domains.
+
+ :param hostname: The hostname to be validated.
+ :returns: True if valid. False if not.
+
+ """
+ if not isinstance(hostname, six.string_types) or len(hostname) > 255:
+ return False
+
+ # Periods on the end of a hostname are ok, but complicates the
+ # regex so we'll do this manually
+ if hostname.endswith('.'):
+ hostname = hostname[:-1]
+
+ host = '[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?'
+ domain = '[a-z0-9\-_]{0,62}[a-z0-9]'
+
+ m = '^' + host + '(\.' + domain + ')*$'
+
+ return re.match(m, hostname) is not None
+
+
+def validate_and_normalize_mac(address):
+ """Validate a MAC address and return normalized form.
+
+ Checks whether the supplied MAC address is formally correct and
+ normalize it to all lower case.
+
+ :param address: MAC address to be validated and normalized.
+ :returns: Normalized and validated MAC address.
+ :raises: InvalidMAC If the MAC address is not valid.
+
+ """
+ if not is_valid_mac(address):
+ raise exception.InvalidMAC(mac=address)
+ return address.lower()
+
+
+def is_valid_ipv6_cidr(address):
+ try:
+ str(netaddr.IPNetwork(address, version=6).cidr)
+ return True
+ except Exception:
+ return False
+
+
+def get_shortened_ipv6(address):
+ addr = netaddr.IPAddress(address, version=6)
+ return str(addr.ipv6())
+
+
+def get_shortened_ipv6_cidr(address):
+ net = netaddr.IPNetwork(address, version=6)
+ return str(net.cidr)
+
+
+def is_valid_cidr(address):
+ """Check if the provided ipv4 or ipv6 address is a valid CIDR address."""
+ try:
+ # Validate the correct CIDR Address
+ netaddr.IPNetwork(address)
+ except netaddr.core.AddrFormatError:
+ return False
+ except UnboundLocalError:
+ # NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
+ # https://github.com/drkjam/netaddr/issues/2)
+ return False
+
+ # Prior validation partially verify /xx part
+ # Verify it here
+ ip_segment = address.split('/')
+
+ if (len(ip_segment) <= 1 or
+ ip_segment[1] == ''):
+ return False
+
+ return True
+
+
+def get_ip_version(network):
+ """Returns the IP version of a network (IPv4 or IPv6).
+
+ :raises: AddrFormatError if invalid network.
+ """
+ if netaddr.IPNetwork(network).version == 6:
+ return "IPv6"
+ elif netaddr.IPNetwork(network).version == 4:
+ return "IPv4"
+
+
+def convert_to_list_dict(lst, label):
+ """Convert a value or list into a list of dicts."""
+ if not lst:
+ return None
+ if not isinstance(lst, list):
+ lst = [lst]
+ return [{label: x} for x in lst]
+
+
+def sanitize_hostname(hostname):
+ """Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
+ if isinstance(hostname, six.text_type):
+ hostname = hostname.encode('latin-1', 'ignore')
+
+ hostname = re.sub(b'[ _]', b'-', hostname)
+ hostname = re.sub(b'[^\w.-]+', b'', hostname)
+ hostname = hostname.lower()
+ hostname = hostname.strip(b'.-')
+
+ return hostname
+
+
+def read_cached_file(filename, cache_info, reload_func=None):
+ """Read from a file if it has been modified.
+
+ :param cache_info: dictionary to hold opaque cache.
+ :param reload_func: optional function to be called with data when
+ file is reloaded due to a modification.
+
+ :returns: data from file
+
+ """
+ mtime = os.path.getmtime(filename)
+ if not cache_info or mtime != cache_info.get('mtime'):
+ LOG.debug("Reloading cached file %s" % filename)
+ with open(filename) as fap:
+ cache_info['data'] = fap.read()
+ cache_info['mtime'] = mtime
+ if reload_func:
+ reload_func(cache_info['data'])
+ return cache_info['data']
+
+
+def file_open(*args, **kwargs):
+ """Open file
+
+ see built-in file() documentation for more details
+
+ Note: The reason this is kept in a separate module is to easily
+ be able to provide a stub module that doesn't alter system
+ state at all (for unit tests)
+ """
+ return file(*args, **kwargs)
+
+
+def hash_file(file_like_object):
+ """Generate a hash for the contents of a file."""
+ checksum = hashlib.sha1()
+ for chunk in iter(lambda: file_like_object.read(32768), b''):
+ checksum.update(chunk)
+ return checksum.hexdigest()
+
+
+@contextlib.contextmanager
+def temporary_mutation(obj, **kwargs):
+ """Temporarily change object attribute.
+
+ Temporarily set the attr on a particular object to a given value then
+ revert when finished.
+
+ One use of this is to temporarily set the read_deleted flag on a context
+ object:
+
+ with temporary_mutation(context, read_deleted="yes"):
+ do_something_that_needed_deleted_objects()
+ """
+ def is_dict_like(thing):
+ return hasattr(thing, 'has_key')
+
+ def get(thing, attr, default):
+ if is_dict_like(thing):
+ return thing.get(attr, default)
+ else:
+ return getattr(thing, attr, default)
+
+ def set_value(thing, attr, val):
+ if is_dict_like(thing):
+ thing[attr] = val
+ else:
+ setattr(thing, attr, val)
+
+ def delete(thing, attr):
+ if is_dict_like(thing):
+ del thing[attr]
+ else:
+ delattr(thing, attr)
+
+ NOT_PRESENT = object()
+
+ old_values = {}
+ for attr, new_value in kwargs.items():
+ old_values[attr] = get(obj, attr, NOT_PRESENT)
+ set_value(obj, attr, new_value)
+
+ try:
+ yield
+ finally:
+ for attr, old_value in old_values.items():
+ if old_value is NOT_PRESENT:
+ delete(obj, attr)
+ else:
+ set_value(obj, attr, old_value)
+
+
+@contextlib.contextmanager
+def tempdir(**kwargs):
+ tempfile.tempdir = CONF.tempdir
+ tmpdir = tempfile.mkdtemp(**kwargs)
+ try:
+ yield tmpdir
+ finally:
+ try:
+ shutil.rmtree(tmpdir)
+ except OSError as e:
+ LOG.error(_LE('Could not remove tmpdir: %s'), e)
+
+
+def mkfs(fs, path, label=None):
+ """Format a file or block device
+
+ :param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
+ 'btrfs', etc.)
+ :param path: Path to file or block device to format
+ :param label: Volume label to use
+ """
+ if fs == 'swap':
+ args = ['mkswap']
+ else:
+ args = ['mkfs', '-t', fs]
+ # add -F to force no interactive execute on non-block device.
+ if fs in ('ext3', 'ext4'):
+ args.extend(['-F'])
+ if label:
+ if fs in ('msdos', 'vfat'):
+ label_opt = '-n'
+ else:
+ label_opt = '-L'
+ args.extend([label_opt, label])
+ args.append(path)
+ try:
+ execute(*args, run_as_root=True, use_standard_locale=True)
+ except processutils.ProcessExecutionError as e:
+ with excutils.save_and_reraise_exception() as ctx:
+ if os.strerror(errno.ENOENT) in e.stderr:
+ ctx.reraise = False
+ LOG.exception(_LE('Failed to make file system. '
+ 'File system %s is not supported.'), fs)
+ raise exception.FileSystemNotSupported(fs=fs)
+ else:
+ LOG.exception(_LE('Failed to create a file system '
+ 'in %(path)s. Error: %(error)s'),
+ {'path': path, 'error': e})
+
+
+def unlink_without_raise(path):
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ LOG.warn(_LW("Failed to unlink %(path)s, error: %(e)s"),
+ {'path': path, 'e': e})
+
+
+def rmtree_without_raise(path):
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ except OSError as e:
+ LOG.warn(_LW("Failed to remove dir %(path)s, error: %(e)s"),
+ {'path': path, 'e': e})
+
+
+def write_to_file(path, contents):
+ with open(path, 'w') as f:
+ f.write(contents)
+
+
+def create_link_without_raise(source, link):
+ try:
+ os.symlink(source, link)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ return
+ else:
+ LOG.warn(_LW("Failed to create symlink from %(source)s to %(link)s"
+ ", error: %(e)s"),
+ {'source': source, 'link': link, 'e': e})
+
+
+def safe_rstrip(value, chars=None):
+ """Removes trailing characters from a string if that does not make it empty
+
+ :param value: A string value that will be stripped.
+ :param chars: Characters to remove.
+ :return: Stripped value.
+
+ """
+ if not isinstance(value, six.string_types):
+ LOG.warn(_LW("Failed to remove trailing character. Returning original "
+ "object. Supplied object is not a string: %s,"), value)
+ return value
+
+ return value.rstrip(chars) or value
+
+
+def mount(src, dest, *args):
+ """Mounts a device/image file on specified location.
+
+ :param src: the path to the source file for mounting
+ :param dest: the path where it needs to be mounted.
+ :param args: a tuple containing the arguments to be
+ passed to mount command.
+ :raises: processutils.ProcessExecutionError if it failed
+ to run the process.
+ """
+ args = ('mount', ) + args + (src, dest)
+ execute(*args, run_as_root=True, check_exit_code=[0])
+
+
+def umount(loc, *args):
+ """Umounts a mounted location.
+
+ :param loc: the path to be unmounted.
+ :param args: a tuple containing the argumnets to be
+ passed to the umount command.
+ :raises: processutils.ProcessExecutionError if it failed
+ to run the process.
+ """
+ args = ('umount', ) + args + (loc, )
+ execute(*args, run_as_root=True, check_exit_code=[0])
+
+
+def dd(src, dst, *args):
+ """Execute dd from src to dst.
+
+ :param src: the input file for dd command.
+ :param dst: the output file for dd command.
+ :param args: a tuple containing the arguments to be
+ passed to dd command.
+ :raises: processutils.ProcessExecutionError if it failed
+ to run the process.
+ """
+ LOG.debug("Starting dd process.")
+ execute('dd', 'if=%s' % src, 'of=%s' % dst, *args,
+ run_as_root=True, check_exit_code=[0])
+
+
+def is_http_url(url):
+ url = url.lower()
+ return url.startswith('http://') or url.startswith('https://')
+
+
+def check_dir(directory_to_check=None, required_space=1):
+ """Check a directory is usable.
+
+ This function can be used by drivers to check that directories
+ they need to write to are usable. This should be called from the
+ drivers init function. This function checks that the directory
+ exists and then calls check_dir_writable and check_dir_free_space.
+ If directory_to_check is not provided the default is to use the
+ temp directory.
+
+ :param directory_to_check: the directory to check.
+ :param required_space: amount of space to check for in MiB.
+ :raises: PathNotFound if directory can not be found
+ :raises: DirectoryNotWritable if user is unable to write to the
+ directory
+ :raises InsufficientDiskSpace: if free space is < required space
+ """
+ # check if directory_to_check is passed in, if not set to tempdir
+ if directory_to_check is None:
+ directory_to_check = (tempfile.gettempdir() if CONF.tempdir
+ is None else CONF.tempdir)
+
+ LOG.debug("checking directory: %s", directory_to_check)
+
+ if not os.path.exists(directory_to_check):
+ raise exception.PathNotFound(dir=directory_to_check)
+
+ _check_dir_writable(directory_to_check)
+ _check_dir_free_space(directory_to_check, required_space)
+
+
+def _check_dir_writable(chk_dir):
+ """Check that the chk_dir is able to be written to.
+
+ :param chk_dir: Directory to check
+ :raises: DirectoryNotWritable if user is unable to write to the
+ directory
+ """
+ is_writable = os.access(chk_dir, os.W_OK)
+ if not is_writable:
+ raise exception.DirectoryNotWritable(dir=chk_dir)
+
+
+def _check_dir_free_space(chk_dir, required_space=1):
+ """Check that directory has some free space.
+
+ :param chk_dir: Directory to check
+ :param required_space: amount of space to check for in MiB.
+ :raises InsufficientDiskSpace: if free space is < required space
+ """
+ # check that we have some free space
+ stat = os.statvfs(chk_dir)
+ # get dir free space in MiB.
+ free_space = float(stat.f_bsize * stat.f_bavail) / 1024 / 1024
+ # check for at least required_space MiB free
+ if free_space < required_space:
+ raise exception.InsufficientDiskSpace(path=chk_dir,
+ required=required_space,
+ actual=free_space)
diff --git a/iotronic/conductor/__init__.py b/iotronic/conductor/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/conductor/__old/manager.py b/iotronic/conductor/__old/manager.py
new file mode 100644
index 0000000..050c365
--- /dev/null
+++ b/iotronic/conductor/__old/manager.py
@@ -0,0 +1,2195 @@
+# coding=utf-8
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 International Business Machines Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Conduct all activity related to bare-metal deployments.
+
+A single instance of :py:class:`iotronic.iotconductor.manager.ConductorManager` is
+created within the *iotronic-conductor* process, and is responsible for
+performing all actions on bare metal resources (Chassis, Nodes, and Ports).
+Commands are received via RPCs. The conductor service also performs periodic
+tasks, eg. to monitor the status of active deployments.
+
+Drivers are loaded via entrypoints by the
+:py:class:`iotronic.common.driver_factory` class. Each driver is instantiated
+only once, when the ConductorManager service starts. In this way, a single
+ConductorManager may use multiple drivers, and manage heterogeneous hardware.
+
+When multiple :py:class:`ConductorManager` are run on different hosts, they are
+all active and cooperatively manage all nodes in the deployment. Nodes are
+locked by each conductor when performing actions which change the state of that
+node; these locks are represented by the
+:py:class:`iotronic.iotconductor.task_manager.TaskManager` class.
+
+A :py:class:`iotronic.common.hash_ring.HashRing` is used to distribute nodes
+across the set of active conductors which support each node's driver.
+Rebalancing this ring can trigger various actions by each conductor, such as
+building or tearing down the TFTP environment for a node, notifying Neutron of
+a change, etc.
+"""
+'''
+import collections
+import datetime
+import inspect
+import tempfile
+
+
+from iotronic.common import dhcp_factory
+
+
+from iotronic.common.glance_service import service_utils as glance_utils
+
+from iotronic.common import images
+from iotronic.common import rpc
+from iotronic.common import states
+from iotronic.common import swift
+from iotronic.iotconductor import task_manager
+from iotronic.iotconductor import utils
+
+from iotronic import objects
+from iotronic.openstack.common import periodic_task
+'''
+import threading
+import eventlet
+from eventlet import greenpool
+
+from iotronic.db import api as dbapi
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_log import log
+import oslo_messaging as messaging
+from oslo_utils import excutils
+from oslo_utils import uuidutils
+
+from iotronic.common import hash_ring as hash
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LC
+from iotronic.common.i18n import _LE
+from iotronic.common.i18n import _LI
+from iotronic.common.i18n import _LW
+#from iotronic.common import driver_factory
+
+from iotronic.conductor import task_manager
+
+from iotronic.openstack.common import periodic_task
+from iotronic.common import exception
+
+MANAGER_TOPIC = 'iotronic.conductor_manager'
+WORKER_SPAWN_lOCK = "conductor_worker_spawn"
+
+LOG = log.getLogger(__name__)
+
+conductor_opts = [
+ cfg.StrOpt('api_url',
+ help=('URL of Iotronic API service. If not set iotronic can '
+ 'get the current value from the keystone service '
+ 'catalog.')),
+ cfg.IntOpt('heartbeat_interval',
+ default=10,
+ help='Seconds between conductor heart beats.'),
+ cfg.IntOpt('heartbeat_timeout',
+ default=60,
+ help='Maximum time (in seconds) since the last check-in '
+ 'of a conductor. A conductor is considered inactive '
+ 'when this time has been exceeded.'),
+ cfg.IntOpt('sync_power_state_interval',
+ default=60,
+ help='Interval between syncing the node power state to the '
+ 'database, in seconds.'),
+ cfg.IntOpt('check_provision_state_interval',
+ default=60,
+ help='Interval between checks of provision timeouts, '
+ 'in seconds.'),
+ cfg.IntOpt('deploy_callback_timeout',
+ default=1800,
+ help='Timeout (seconds) to wait for a callback from '
+ 'a deploy ramdisk. Set to 0 to disable timeout.'),
+ cfg.BoolOpt('force_power_state_during_sync',
+ default=True,
+ help='During sync_power_state, should the hardware power '
+ 'state be set to the state recorded in the database '
+ '(True) or should the database be updated based on '
+ 'the hardware state (False).'),
+ cfg.IntOpt('power_state_sync_max_retries',
+ default=3,
+ help='During sync_power_state failures, limit the '
+ 'number of times Iotronic should try syncing the '
+ 'hardware node power state with the node power state '
+ 'in DB'),
+ cfg.IntOpt('periodic_max_workers',
+ default=8,
+ help='Maximum number of worker threads that can be started '
+ 'simultaneously by a periodic task. Should be less '
+ 'than RPC thread pool size.'),
+ cfg.IntOpt('workers_pool_size',
+ default=100,
+ help='The size of the workers greenthread pool.'),
+ cfg.IntOpt('node_locked_retry_attempts',
+ default=3,
+ help='Number of attempts to grab a node lock.'),
+ cfg.IntOpt('node_locked_retry_interval',
+ default=1,
+ help='Seconds to sleep between node lock attempts.'),
+ cfg.BoolOpt('send_sensor_data',
+ default=False,
+ help='Enable sending sensor data message via the '
+ 'notification bus'),
+ cfg.IntOpt('send_sensor_data_interval',
+ default=600,
+ help='Seconds between conductor sending sensor data message'
+ ' to ceilometer via the notification bus.'),
+ cfg.ListOpt('send_sensor_data_types',
+ default=['ALL'],
+ help='List of comma separated meter types which need to be'
+ ' sent to Ceilometer. The default value, "ALL", is a '
+ 'special value meaning send all the sensor data.'),
+ cfg.IntOpt('sync_local_state_interval',
+ default=180,
+ help='When conductors join or leave the cluster, existing '
+ 'conductors may need to update any persistent '
+ 'local state as nodes are moved around the cluster. '
+ 'This option controls how often, in seconds, each '
+ 'conductor will check for nodes that it should '
+ '"take over". Set it to a negative value to disable '
+ 'the check entirely.'),
+ cfg.BoolOpt('configdrive_use_swift',
+ default=False,
+ help='Whether to upload the config drive to Swift.'),
+ cfg.StrOpt('configdrive_swift_container',
+ default='iotronic_configdrive_container',
+ help='Name of the Swift container to store config drive '
+ 'data. Used when configdrive_use_swift is True.'),
+ cfg.IntOpt('inspect_timeout',
+ default=1800,
+ help='Timeout (seconds) for waiting for node inspection. '
+ '0 - unlimited.'),
+ cfg.BoolOpt('clean_nodes',
+ default=True,
+ help='Cleaning is a configurable set of steps, such as '
+ 'erasing disk drives, that are performed on the node '
+ 'to ensure it is in a baseline state and ready to be '
+ 'deployed to. '
+ 'This is done after instance deletion, and during '
+ 'the transition from a "managed" to "available" '
+ 'state. When enabled, the particular steps '
+ 'performed to clean a node depend on which driver '
+ 'that node is managed by; see the individual '
+ 'driver\'s documentation for details. '
+ 'NOTE: The introduction of the cleaning operation '
+ 'causes instance deletion to take significantly '
+ 'longer. In an environment where all tenants are '
+ 'trusted (eg, because there is only one tenant), '
+ 'this option could be safely disabled.'),
+]
+CONF = cfg.CONF
+CONF.register_opts(conductor_opts, 'conductor')
+
+CLEANING_INTERFACE_PRIORITY = {
+ # When two clean steps have the same priority, their order is determined
+ # by which interface is implementing the clean step. The clean step of the
+ # interface with the highest value here, will be executed first in that
+ # case.
+ 'power': 3,
+ 'management': 2,
+ 'deploy': 1
+}
+
+
+class ConductorManager(periodic_task.PeriodicTasks):
+ """Iotronic Conductor manager main class."""
+
+ # NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
+ RPC_API_VERSION = '1.0'
+
+ target = messaging.Target(version=RPC_API_VERSION)
+
+ def __init__(self, host, topic):
+ super(ConductorManager, self).__init__()
+ if not host:
+ host = CONF.host
+ self.host = host
+ self.topic = topic
+ #self.power_state_sync_count = collections.defaultdict(int)
+ #self.notifier = rpc.get_notifier()
+ '''
+ def _get_driver(self, driver_name):
+ """Get the driver.
+
+ :param driver_name: name of the driver.
+ :returns: the driver; an instance of a class which implements
+ :class:`iotronic.drivers.base.BaseDriver`.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ try:
+ return self._driver_factory[driver_name].obj
+ except KeyError:
+ raise exception.DriverNotFound(driver_name=driver_name)
+ '''
+ def init_host(self):
+ self.dbapi = dbapi.get_instance()
+
+ self._keepalive_evt = threading.Event()
+ """Event for the keepalive thread."""
+
+ self._worker_pool = greenpool.GreenPool(
+ size=CONF.conductor.workers_pool_size)
+ """GreenPool of background workers for performing tasks async."""
+
+ self.ring_manager = hash.HashRingManager()
+ """Consistent hash ring which maps drivers to conductors."""
+
+ # NOTE(deva): instantiating DriverFactory may raise DriverLoadError
+ # or DriverNotFound
+ #self._driver_factory = driver_factory.DriverFactory()
+ #"""Driver factory loads all enabled drivers."""
+
+ #self.drivers = self._driver_factory.names
+ """List of driver names which this conductor supports."""
+ '''
+ if not self.drivers:
+ msg = _LE("Conductor %s cannot be started because no drivers "
+ "were loaded. This could be because no drivers were "
+ "specified in 'enabled_drivers' config option.")
+ LOG.error(msg, self.host)
+ raise exception.NoDriversLoaded(conductor=self.host)
+
+ # Collect driver-specific periodic tasks
+ for driver_obj in driver_factory.drivers().values():
+ self._collect_periodic_tasks(driver_obj)
+ for iface_name in (driver_obj.core_interfaces +
+ driver_obj.standard_interfaces +
+ ['vendor']):
+ iface = getattr(driver_obj, iface_name, None)
+ if iface:
+ self._collect_periodic_tasks(iface)
+ '''
+ # clear all locks held by this conductor before registering
+ self.dbapi.clear_node_reservations_for_conductor(self.host)
+ try:
+ # Register this conductor with the cluster
+ cdr = self.dbapi.register_conductor({'hostname': self.host,'drivers': ['fake']})
+ except exception.ConductorAlreadyRegistered:
+ # This conductor was already registered and did not shut down
+ # properly, so log a warning and update the record.
+ LOG.warn(_LW("A conductor with hostname %(hostname)s "
+ "was previously registered. Updating registration"),
+ {'hostname': self.host})
+ cdr = self.dbapi.register_conductor({'hostname': self.host,
+ 'drivers': self.drivers},
+ update_existing=True)
+ self.conductor = cdr
+
+ # Spawn a dedicated greenthread for the keepalive
+ try:
+ self._spawn_worker(self._conductor_service_record_keepalive)
+ LOG.info(_LI('Successfully started conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ except exception.NoFreeConductorWorker:
+ with excutils.save_and_reraise_exception():
+ LOG.critical(_LC('Failed to start keepalive'))
+ self.del_host()
+
+ def _collect_periodic_tasks(self, obj):
+ for n, method in inspect.getmembers(obj, inspect.ismethod):
+ if getattr(method, '_periodic_enabled', False):
+ self.add_periodic_task(method)
+
+ def del_host(self, deregister=True):
+ self._keepalive_evt.set()
+ if deregister:
+ try:
+ # Inform the cluster that this conductor is shutting down.
+ # Note that rebalancing will not occur immediately, but when
+ # the periodic sync takes place.
+ self.dbapi.unregister_conductor(self.host)
+ LOG.info(_LI('Successfully stopped conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ except exception.ConductorNotFound:
+ pass
+ else:
+ LOG.info(_LI('Not deregistering conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ # Waiting here to give workers the chance to finish. This has the
+ # benefit of releasing locks workers placed on nodes, as well as
+ # having work complete normally.
+ self._worker_pool.waitall()
+
+ def periodic_tasks(self, context, raise_on_error=False):
+ """Periodic tasks are run at pre-specified interval."""
+ return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
+
+ @lockutils.synchronized(WORKER_SPAWN_lOCK, 'iotronic-')
+ def _spawn_worker(self, func, *args, **kwargs):
+
+ """Create a greenthread to run func(*args, **kwargs).
+
+ Spawns a greenthread if there are free slots in pool, otherwise raises
+ exception. Execution control returns immediately to the caller.
+
+ :returns: GreenThread object.
+ :raises: NoFreeConductorWorker if worker pool is currently full.
+
+ """
+ if self._worker_pool.free():
+ return self._worker_pool.spawn(func, *args, **kwargs)
+ else:
+ raise exception.NoFreeConductorWorker()
+
+ def _conductor_service_record_keepalive(self):
+ while not self._keepalive_evt.is_set():
+ try:
+ self.dbapi.touch_conductor(self.host)
+ except db_exception.DBConnectionError:
+ LOG.warning(_LW('Conductor could not connect to database '
+ 'while heartbeating.'))
+ self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
+
+ @messaging.expected_exceptions(exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.NodeLocked)
+ def update_node(self, context, node_obj):
+ """Update a node with the supplied data.
+
+ This method is the main "hub" for PUT and PATCH requests in the API.
+ It ensures that the requested change is safe to perform,
+ validates the parameters with the node's driver, if necessary.
+
+ :param context: an admin context
+ :param node_obj: a changed (but not saved) node object.
+
+ """
+ node_id = node_obj.uuid
+ LOG.debug("RPC update_node called for node %s." % node_id)
+
+ # NOTE(jroll) clear maintenance_reason if node.update sets
+ # maintenance to False for backwards compatibility, for tools
+ # not using the maintenance endpoint.
+ delta = node_obj.obj_what_changed()
+ if 'maintenance' in delta and not node_obj.maintenance:
+ node_obj.maintenance_reason = None
+
+ driver_name = node_obj.driver if 'driver' in delta else None
+ with task_manager.acquire(context, node_id, shared=False,
+ driver_name=driver_name):
+ node_obj.save()
+
+ return node_obj
+
+ @messaging.expected_exceptions(exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.NoFreeConductorWorker,
+ exception.NodeLocked)
+ def change_node_power_state(self, context, node_id, new_state):
+ """RPC method to encapsulate changes to a node's state.
+
+ Perform actions such as power on, power off. The validation is
+ performed synchronously, and if successful, the power action is
+ updated in the background (asynchronously). Once the power action
+ is finished and successful, it updates the power_state for the
+ node with the new power state.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param new_state: the desired power state of the node.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+
+ """
+ LOG.debug("RPC change_node_power_state called for node %(node)s. "
+ "The desired new state is %(state)s."
+ % {'node': node_id, 'state': new_state})
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ task.driver.power.validate(task)
+ # Set the target_power_state and clear any last_error, since we're
+ # starting a new operation. This will expose to other processes
+ # and clients that work is in progress.
+ if new_state == states.REBOOT:
+ task.node.target_power_state = states.POWER_ON
+ else:
+ task.node.target_power_state = new_state
+ task.node.last_error = None
+ task.node.save()
+ task.set_spawn_error_hook(power_state_error_handler,
+ task.node, task.node.power_state)
+ task.spawn_after(self._spawn_worker, utils.node_power_action,
+ task, new_state)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InvalidParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.MissingParameterValue)
+ def vendor_passthru(self, context, node_id, driver_method,
+ http_method, info):
+ """RPC method to encapsulate vendor action.
+
+ Synchronously validate driver specific info or get driver status,
+ and if successful invokes the vendor method. If the method mode
+ is 'async' the conductor will start background worker to perform
+ vendor action.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param driver_method: the name of the vendor method.
+ :param http_method: the HTTP method used for the request.
+ :param info: vendor method args.
+ :raises: InvalidParameterValue if supplied info is not valid.
+ :raises: MissingParameterValue if missing supplied info
+ :raises: UnsupportedDriverExtension if current driver does not have
+ vendor interface or method is unsupported.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: NodeLocked if node is locked by another conductor.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+ """
+ LOG.debug("RPC vendor_passthru called for node %s." % node_id)
+ # NOTE(max_lobur): Even though not all vendor_passthru calls may
+ # require an exclusive lock, we need to do so to guarantee that the
+ # state doesn't unexpectedly change between doing a vendor.validate
+ # and vendor.vendor_passthru.
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if not getattr(task.driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver,
+ extension='vendor interface')
+
+ vendor_iface = task.driver.vendor
+
+ # NOTE(lucasagomes): Before the vendor_passthru() method was
+ # a self-contained method and each driver implemented their own
+ # version of it, now we have a common mechanism that drivers
+ # should use to expose their vendor methods. If a driver still
+ # have their own vendor_passthru() method we call it to be
+ # backward compat. This code should be removed once L opens.
+ if hasattr(vendor_iface, 'vendor_passthru'):
+ LOG.warning(_LW("Drivers implementing their own version "
+ "of vendor_passthru() has been deprecated. "
+ "Please update the code to use the "
+ "@passthru decorator."))
+ vendor_iface.validate(task, method=driver_method,
+ **info)
+ task.spawn_after(self._spawn_worker,
+ vendor_iface.vendor_passthru, task,
+ method=driver_method, **info)
+ # NodeVendorPassthru was always async
+ return (None, True)
+
+ try:
+ vendor_opts = vendor_iface.vendor_routes[driver_method]
+ vendor_func = vendor_opts['func']
+ except KeyError:
+ raise exception.InvalidParameterValue(
+ _('No handler for method %s') % driver_method)
+
+ http_method = http_method.upper()
+ if http_method not in vendor_opts['http_methods']:
+ raise exception.InvalidParameterValue(
+ _('The method %(method)s does not support HTTP %(http)s') %
+ {'method': driver_method, 'http': http_method})
+
+ vendor_iface.validate(task, method=driver_method,
+ http_method=http_method, **info)
+
+ # Inform the vendor method which HTTP method it was invoked with
+ info['http_method'] = http_method
+
+ # Invoke the vendor method accordingly with the mode
+ is_async = vendor_opts['async']
+ ret = None
+ if is_async:
+ task.spawn_after(self._spawn_worker, vendor_func, task, **info)
+ else:
+ ret = vendor_func(task, **info)
+
+ return (ret, is_async)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.DriverNotFound)
+ def driver_vendor_passthru(self, context, driver_name, driver_method,
+ http_method, info):
+ """Handle top-level vendor actions.
+
+ RPC method which handles driver-level vendor passthru calls. These
+ calls don't require a node UUID and are executed on a random
+ conductor with the specified driver. If the method mode is
+ async the conductor will start background worker to perform
+ vendor action.
+
+ :param context: an admin context.
+ :param driver_name: name of the driver on which to call the method.
+ :param driver_method: name of the vendor method, for use by the driver.
+ :param http_method: the HTTP method used for the request.
+ :param info: user-supplied data to pass through to the driver.
+ :raises: MissingParameterValue if missing supplied info
+ :raises: InvalidParameterValue if supplied info is not valid.
+ :raises: UnsupportedDriverExtension if current driver does not have
+ vendor interface, if the vendor interface does not implement
+ driver-level vendor passthru or if the passthru method is
+ unsupported.
+ :raises: DriverNotFound if the supplied driver is not loaded.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+ """
+ # Any locking in a top-level vendor action will need to be done by the
+ # implementation, as there is little we could reasonably lock on here.
+ LOG.debug("RPC driver_vendor_passthru for driver %s." % driver_name)
+ driver = self._get_driver(driver_name)
+ if not getattr(driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=driver_name,
+ extension='vendor interface')
+
+ # NOTE(lucasagomes): Before the driver_vendor_passthru()
+ # method was a self-contained method and each driver implemented
+ # their own version of it, now we have a common mechanism that
+ # drivers should use to expose their vendor methods. If a driver
+ # still have their own driver_vendor_passthru() method we call
+ # it to be backward compat. This code should be removed
+ # once L opens.
+ if hasattr(driver.vendor, 'driver_vendor_passthru'):
+ LOG.warning(_LW("Drivers implementing their own version "
+ "of driver_vendor_passthru() has been "
+ "deprecated. Please update the code to use "
+ "the @driver_passthru decorator."))
+
+ driver.vendor.driver_validate(method=driver_method, **info)
+ ret = driver.vendor.driver_vendor_passthru(
+ context, method=driver_method, **info)
+ # DriverVendorPassthru was always sync
+ return (ret, False)
+
+ try:
+ vendor_opts = driver.vendor.driver_routes[driver_method]
+ vendor_func = vendor_opts['func']
+ except KeyError:
+ raise exception.InvalidParameterValue(
+ _('No handler for method %s') % driver_method)
+
+ http_method = http_method.upper()
+ if http_method not in vendor_opts['http_methods']:
+ raise exception.InvalidParameterValue(
+ _('The method %(method)s does not support HTTP %(http)s') %
+ {'method': driver_method, 'http': http_method})
+
+ # Inform the vendor method which HTTP method it was invoked with
+ info['http_method'] = http_method
+
+ # Invoke the vendor method accordingly with the mode
+ is_async = vendor_opts['async']
+ ret = None
+ driver.vendor.driver_validate(method=driver_method, **info)
+
+ if is_async:
+ self._spawn_worker(vendor_func, context, **info)
+ else:
+ ret = vendor_func(context, **info)
+
+ return (ret, is_async)
+
+ @messaging.expected_exceptions(exception.UnsupportedDriverExtension)
+ def get_node_vendor_passthru_methods(self, context, node_id):
+ """Retrieve information about vendor methods of the given node.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :returns: dictionary of : entries.
+
+ """
+ LOG.debug("RPC get_node_vendor_passthru_methods called for node %s"
+ % node_id)
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ if not getattr(task.driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver,
+ extension='vendor interface')
+
+ return get_vendor_passthru_metadata(
+ task.driver.vendor.vendor_routes)
+
+ @messaging.expected_exceptions(exception.UnsupportedDriverExtension,
+ exception.DriverNotFound)
+ def get_driver_vendor_passthru_methods(self, context, driver_name):
+ """Retrieve information about vendor methods of the given driver.
+
+ :param context: an admin context.
+ :param driver_name: name of the driver.
+ :returns: dictionary of : entries.
+
+ """
+ # Any locking in a top-level vendor action will need to be done by the
+ # implementation, as there is little we could reasonably lock on here.
+ LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s"
+ % driver_name)
+ driver = self._get_driver(driver_name)
+ if not getattr(driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=driver_name,
+ extension='vendor interface')
+
+ return get_vendor_passthru_metadata(driver.vendor.driver_routes)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.NodeInMaintenance,
+ exception.InstanceDeployFailure,
+ exception.InvalidStateRequested)
+ def do_node_deploy(self, context, node_id, rebuild=False,
+ configdrive=None):
+ """RPC method to initiate deployment to a node.
+
+ Initiate the deployment of a node. Validations are done
+ synchronously and the actual deploy work is performed in
+ background (asynchronously).
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param rebuild: True if this is a rebuild request. A rebuild will
+ recreate the instance on the same node, overwriting
+ all disk. The ephemeral partition, if it exists, can
+ optionally be preserved.
+ :param configdrive: Optional. A gzipped and base64 encoded configdrive.
+ :raises: InstanceDeployFailure
+ :raises: NodeInMaintenance if the node is in maintenance mode.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: InvalidStateRequested when the requested state is not a valid
+ target from the current state.
+
+ """
+ LOG.debug("RPC do_node_deploy called for node %s." % node_id)
+
+ # NOTE(comstud): If the _sync_power_states() periodic task happens
+ # to have locked this node, we'll fail to acquire the lock. The
+ # client should perhaps retry in this case unless we decide we
+ # want to add retries or extra synchronization here.
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ node = task.node
+ if node.maintenance:
+ raise exception.NodeInMaintenance(op=_('provisioning'),
+ node=node.uuid)
+
+ if rebuild:
+ event = 'rebuild'
+
+ # Note(gilliard) Clear these to force the driver to
+ # check whether they have been changed in glance
+ # NOTE(vdrok): If image_source is not from Glance we should
+ # not clear kernel and ramdisk as they're input manually
+ if glance_utils.is_glance_image(
+ node.instance_info.get('image_source')):
+ instance_info = node.instance_info
+ instance_info.pop('kernel', None)
+ instance_info.pop('ramdisk', None)
+ node.instance_info = instance_info
+ else:
+ event = 'deploy'
+
+ driver_internal_info = node.driver_internal_info
+ # Infer the image type to make sure the deploy driver
+ # validates only the necessary variables for different
+ # image types.
+ # NOTE(sirushtim): The iwdi variable can be None. It's up to
+ # the deploy driver to validate this.
+ iwdi = images.is_whole_disk_image(context, node.instance_info)
+ driver_internal_info['is_whole_disk_image'] = iwdi
+ node.driver_internal_info = driver_internal_info
+ node.save()
+
+ try:
+ task.driver.power.validate(task)
+ task.driver.deploy.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ raise exception.InstanceDeployFailure(_(
+ "RPC do_node_deploy failed to validate deploy or "
+ "power info. Error: %(msg)s") % {'msg': e})
+
+ LOG.debug("do_node_deploy Calling event: %(event)s for node: "
+ "%(node)s", {'event': event, 'node': node.uuid})
+ try:
+ task.process_event(event,
+ callback=self._spawn_worker,
+ call_args=(do_node_deploy, task,
+ self.conductor.id,
+ configdrive),
+ err_handler=provisioning_error_handler)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action=event, node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InstanceDeployFailure,
+ exception.InvalidStateRequested)
+ def do_node_tear_down(self, context, node_id):
+ """RPC method to tear down an existing node deployment.
+
+ Validate driver specific information synchronously, and then
+ spawn a background worker to tear down the node asynchronously.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :raises: InstanceDeployFailure
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: InvalidStateRequested when the requested state is not a valid
+ target from the current state.
+
+ """
+ LOG.debug("RPC do_node_tear_down called for node %s." % node_id)
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ try:
+ # NOTE(ghe): Valid power driver values are needed to perform
+ # a tear-down. Deploy info is useful to purge the cache but not
+ # required for this method.
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ raise exception.InstanceDeployFailure(_(
+ "Failed to validate power driver interface. "
+ "Can not delete instance. Error: %(msg)s") % {'msg': e})
+
+ try:
+ task.process_event('delete',
+ callback=self._spawn_worker,
+ call_args=(self._do_node_tear_down, task),
+ err_handler=provisioning_error_handler)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='delete', node=task.node.uuid,
+ state=task.node.provision_state)
+
+ def _do_node_tear_down(self, task):
+ """Internal RPC method to tear down an existing node deployment."""
+ node = task.node
+ try:
+ task.driver.deploy.clean_up(task)
+ task.driver.deploy.tear_down(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE('Error in tear_down of node %(node)s: '
+ '%(err)s'),
+ {'node': node.uuid, 'err': e})
+ node.last_error = _("Failed to tear down. Error: %s") % e
+ task.process_event('error')
+ else:
+ # NOTE(deva): When tear_down finishes, the deletion is done,
+ # cleaning will start next
+ LOG.info(_LI('Successfully unprovisioned node %(node)s with '
+ 'instance %(instance)s.'),
+ {'node': node.uuid, 'instance': node.instance_uuid})
+ finally:
+ # NOTE(deva): there is no need to unset conductor_affinity
+ # because it is a reference to the most recent conductor which
+ # deployed a node, and does not limit any future actions.
+ # But we do need to clear the instance_info
+ node.instance_info = {}
+ node.save()
+
+ # Begin cleaning
+ try:
+ task.process_event('clean')
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='clean', node=node.uuid,
+ state=node.provision_state)
+ self._do_node_clean(task)
+
+ def continue_node_clean(self, context, node_id):
+ """RPC method to continue cleaning a node.
+
+ This is useful for cleaning tasks that are async. When they complete,
+ they call back via RPC, a new worker and lock are set up, and cleaning
+ continues. This can also be used to resume cleaning on take_over.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :raises: InvalidStateRequested if the node is not in CLEANING state
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeNotFound if the node no longer appears in the database
+
+ """
+ LOG.debug("RPC continue_node_clean called for node %s.", node_id)
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if task.node.provision_state != states.CLEANING:
+ raise exception.InvalidStateRequested(_(
+ 'Cannot continue cleaning on %(node)s, node is in '
+ '%(state)s state, should be %(clean_state)s') %
+ {'node': task.node.uuid,
+ 'state': task.node.provision_state,
+ 'clean_state': states.CLEANING})
+ task.set_spawn_error_hook(cleaning_error_handler, task.node,
+ 'Failed to run next clean step')
+ task.spawn_after(
+ self._spawn_worker,
+ self._do_next_clean_step,
+ task,
+ task.node.driver_internal_info.get('clean_steps', []),
+ task.node.clean_step)
+
+ def _do_node_clean(self, task):
+ """Internal RPC method to perform automated cleaning of a node."""
+ node = task.node
+ LOG.debug('Starting cleaning for node %s', node.uuid)
+
+ if not CONF.conductor.clean_nodes:
+ # Skip cleaning, move to AVAILABLE.
+ node.clean_step = None
+ node.save()
+
+ task.process_event('done')
+ LOG.info(_LI('Cleaning is disabled, node %s has been successfully '
+ 'moved to AVAILABLE state.'), node.uuid)
+ return
+
+ try:
+ # NOTE(ghe): Valid power driver values are needed to perform
+ # a cleaning.
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ msg = (_('Failed to validate power driver interface. '
+ 'Can not clean node %(node)s. Error: %(msg)s') %
+ {'node': node.uuid, 'msg': e})
+ return cleaning_error_handler(task, msg)
+
+ # Allow the deploy driver to set up the ramdisk again (necessary for
+ # IPA cleaning/zapping)
+ try:
+ prepare_result = task.driver.deploy.prepare_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
+ % {'node': node.uuid, 'e': e})
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg)
+ if prepare_result == states.CLEANING:
+ # Prepare is asynchronous, the deploy driver will need to
+ # set node.driver_internal_info['clean_steps'] and
+ # node.clean_step and then make an RPC call to
+ # continue_node_cleaning to start cleaning.
+ return
+
+ set_node_cleaning_steps(task)
+ self._do_next_clean_step(
+ task,
+ node.driver_internal_info.get('clean_steps', []),
+ node.clean_step)
+
+ def _do_next_clean_step(self, task, steps, last_step):
+ """Start executing cleaning/zapping steps from the last step (if any).
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param steps: The complete list of steps that need to be executed
+ on the node
+ :param last_step: The last step that was executed. {} will start
+ from the beginning
+ """
+ node = task.node
+ # Trim already executed steps
+ if last_step:
+ try:
+ # Trim off last_step (now finished) and all previous steps.
+ steps = steps[steps.index(last_step) + 1:]
+ except ValueError:
+ msg = (_('Node %(node)s got an invalid last step for '
+ '%(state)s: %(step)s.') %
+ {'node': node.uuid, 'step': last_step,
+ 'state': node.provision_state})
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg)
+
+ LOG.info(_LI('Executing %(state)s on node %(node)s, remaining steps: '
+ '%(steps)s'), {'node': node.uuid, 'steps': steps,
+ 'state': node.provision_state})
+ # Execute each step until we hit an async step or run out of steps
+ for step in steps:
+ # Save which step we're about to start so we can restart
+ # if necessary
+ node.clean_step = step
+ node.save()
+ interface = getattr(task.driver, step.get('interface'))
+ LOG.info(_LI('Executing %(step)s on node %(node)s'),
+ {'step': step, 'node': node.uuid})
+ try:
+ result = interface.execute_clean_step(task, step)
+ except Exception as e:
+ msg = (_('Node %(node)s failed step %(step)s: '
+ '%(exc)s') %
+ {'node': node.uuid, 'exc': e,
+ 'step': node.clean_step})
+ LOG.exception(msg)
+ cleaning_error_handler(task, msg)
+ return
+
+ # Check if the step is done or not. The step should return
+ # states.CLEANING if the step is still being executed, or
+ # None if the step is done.
+ if result == states.CLEANING:
+ # Kill this worker, the async step will make an RPC call to
+ # continue_node_clean to continue cleaning
+ LOG.info(_LI('Clean step %(step)s on node %(node)s being '
+ 'executed asynchronously, waiting for driver.') %
+ {'node': node.uuid, 'step': step})
+ return
+ elif result is not None:
+ msg = (_('While executing step %(step)s on node '
+ '%(node)s, step returned invalid value: %(val)s')
+ % {'step': step, 'node': node.uuid, 'val': result})
+ LOG.error(msg)
+ return cleaning_error_handler(task, msg)
+ LOG.info(_LI('Node %(node)s finished clean step %(step)s'),
+ {'node': node.uuid, 'step': step})
+
+ # Clear clean_step
+ node.clean_step = None
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_steps'] = None
+ node.driver_internal_info = driver_internal_info
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to tear down from cleaning for node %s')
+ % node.uuid)
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg, tear_down_cleaning=False)
+
+ LOG.info(_LI('Node %s cleaning complete'), node.uuid)
+ task.process_event('done')
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.InvalidStateRequested)
+ def do_provisioning_action(self, context, node_id, action):
+ """RPC method to initiate certain provisioning state transitions.
+
+ Initiate a provisioning state change through the state machine,
+ rather than through an RPC call to do_node_deploy / do_node_tear_down
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param action: an action. One of iotronic.common.states.VERBS
+ :raises: InvalidParameterValue
+ :raises: InvalidStateRequested
+ :raises: NoFreeConductorWorker
+
+ """
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if (action == states.VERBS['provide'] and
+ task.node.provision_state == states.MANAGEABLE):
+ task.process_event('provide',
+ callback=self._spawn_worker,
+ call_args=(self._do_node_clean, task),
+ err_handler=provisioning_error_handler)
+ else:
+ try:
+ task.process_event(action)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action=action, node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.sync_power_state_interval)
+ def _sync_power_states(self, context):
+ """Periodic task to sync power states for the nodes.
+
+ Attempt to grab a lock and sync only if the following
+ conditions are met:
+
+ 1) Node is mapped to this conductor.
+ 2) Node is not in maintenance mode.
+ 3) Node is not in DEPLOYWAIT provision state.
+ 4) Node doesn't have a reservation
+
+ NOTE: Grabbing a lock here can cause other methods to fail to
+ grab it. We want to avoid trying to grab a lock while a
+ node is in the DEPLOYWAIT state so we don't unnecessarily
+ cause a deploy callback to fail. There's not much we can do
+ here to avoid failing a brand new deploy to a node that we've
+ locked here, though.
+ """
+ # FIXME(comstud): Since our initial state checks are outside
+ # of the lock (to try to avoid the lock), some checks are
+ # repeated after grabbing the lock so we can unlock quickly.
+ # The node mapping is not re-checked because it doesn't much
+ # matter if things happened to re-balance.
+ #
+ # This is inefficient and racey. We end up with calling DB API's
+ # get_node() twice (once here, and once in acquire(). Ideally we
+ # add a way to pass constraints to task_manager.acquire()
+ # (through to its DB API call) so that we can eliminate our call
+ # and first set of checks below.
+
+ filters = {'reserved': False, 'maintenance': False}
+ node_iter = self.iter_nodes(fields=['id'], filters=filters)
+ for (node_uuid, driver, node_id) in node_iter:
+ try:
+ # NOTE(deva): we should not acquire a lock on a node in
+ # DEPLOYWAIT, as this could cause an error within
+ # a deploy ramdisk POSTing back at the same time.
+ # TODO(deva): refactor this check, because it needs to be done
+ # in every periodic task, not just this one.
+ node = objects.Node.get_by_id(context, node_id)
+ if (node.provision_state == states.DEPLOYWAIT or
+ node.maintenance or node.reservation is not None):
+ continue
+
+ with task_manager.acquire(context, node_uuid) as task:
+ if (task.node.provision_state == states.DEPLOYWAIT or
+ task.node.maintenance):
+ continue
+ count = do_sync_power_state(
+ task, self.power_state_sync_count[node_uuid])
+ if count:
+ self.power_state_sync_count[node_uuid] = count
+ else:
+ # don't bloat the dict with non-failing nodes
+ del self.power_state_sync_count[node_uuid]
+ except exception.NodeNotFound:
+ LOG.info(_LI("During sync_power_state, node %(node)s was not "
+ "found and presumed deleted by another process."),
+ {'node': node_uuid})
+ except exception.NodeLocked:
+ LOG.info(_LI("During sync_power_state, node %(node)s was "
+ "already locked by another process. Skip."),
+ {'node': node_uuid})
+ finally:
+ # Yield on every iteration
+ eventlet.sleep(0)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.check_provision_state_interval)
+ def _check_deploy_timeouts(self, context):
+ """Periodically checks whether a deploy RPC call has timed out.
+
+ If a deploy call has timed out, the deploy failed and we clean up.
+
+ :param context: request context.
+ """
+ callback_timeout = CONF.conductor.deploy_callback_timeout
+ if not callback_timeout:
+ return
+
+ filters = {'reserved': False,
+ 'provision_state': states.DEPLOYWAIT,
+ 'maintenance': False,
+ 'provisioned_before': callback_timeout}
+ sort_key = 'provision_updated_at'
+ callback_method = utils.cleanup_after_timeout
+ err_handler = provisioning_error_handler
+ self._fail_if_in_state(context, filters, states.DEPLOYWAIT,
+ sort_key, callback_method, err_handler)
+
+ def _do_takeover(self, task):
+ """Take over this node.
+
+ Prepares a node for takeover by this conductor, performs the takeover,
+ and changes the conductor associated with the node. The node with the
+ new conductor affiliation is saved to the DB.
+
+ :param task: a TaskManager instance
+ """
+ LOG.debug(('Conductor %(cdr)s taking over node %(node)s'),
+ {'cdr': self.host, 'node': task.node.uuid})
+ task.driver.deploy.prepare(task)
+ task.driver.deploy.take_over(task)
+ # NOTE(lucasagomes): Set the ID of the new conductor managing
+ # this node
+ task.node.conductor_affinity = self.conductor.id
+ task.node.save()
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.sync_local_state_interval)
+ def _sync_local_state(self, context):
+ """Perform any actions necessary to sync local state.
+
+ This is called periodically to refresh the conductor's copy of the
+ consistent hash ring. If any mappings have changed, this method then
+ determines which, if any, nodes need to be "taken over".
+ The ensuing actions could include preparing a PXE environment,
+ updating the DHCP server, and so on.
+ """
+ self.ring_manager.reset()
+ filters = {'reserved': False,
+ 'maintenance': False,
+ 'provision_state': states.ACTIVE}
+ node_iter = self.iter_nodes(fields=['id', 'conductor_affinity'],
+ filters=filters)
+
+ workers_count = 0
+ for node_uuid, driver, node_id, conductor_affinity in node_iter:
+ if conductor_affinity == self.conductor.id:
+ continue
+
+ # Node is mapped here, but not updated by this conductor last
+ try:
+ with task_manager.acquire(context, node_uuid) as task:
+ # NOTE(deva): now that we have the lock, check again to
+ # avoid racing with deletes and other state changes
+ node = task.node
+ if (node.maintenance or
+ node.conductor_affinity == self.conductor.id or
+ node.provision_state != states.ACTIVE):
+ continue
+
+ task.spawn_after(self._spawn_worker,
+ self._do_takeover, task)
+
+ except exception.NoFreeConductorWorker:
+ break
+ except (exception.NodeLocked, exception.NodeNotFound):
+ continue
+ workers_count += 1
+ if workers_count == CONF.conductor.periodic_max_workers:
+ break
+
+ def _mapped_to_this_conductor(self, node_uuid, driver):
+ """Check that node is mapped to this conductor.
+
+ Note that because mappings are eventually consistent, it is possible
+ for two conductors to simultaneously believe that a node is mapped to
+ them. Any operation that depends on exclusive control of a node should
+ take out a lock.
+ """
+ try:
+ ring = self.ring_manager[driver]
+ except exception.DriverNotFound:
+ return False
+
+ return self.host in ring.get_hosts(node_uuid)
+
+ def iter_nodes(self, fields=None, **kwargs):
+ """Iterate over nodes mapped to this conductor.
+
+ Requests node set from and filters out nodes that are not
+ mapped to this conductor.
+
+ Yields tuples (node_uuid, driver, ...) where ... is derived from
+ fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
+ fields=['foo'] means yielding ('uuid', 'driver', 'foo').
+
+ :param fields: list of fields to fetch in addition to uuid and driver
+ :param kwargs: additional arguments to pass to dbapi when looking for
+ nodes
+ :return: generator yielding tuples of requested fields
+ """
+ columns = ['uuid', 'driver'] + list(fields or ())
+ node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
+ for result in node_list:
+ if self._mapped_to_this_conductor(*result[:2]):
+ yield result
+
+ @messaging.expected_exceptions(exception.NodeLocked)
+ def validate_driver_interfaces(self, context, node_id):
+ """Validate the `core` and `standardized` interfaces for drivers.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :returns: a dictionary containing the results of each
+ interface validation.
+
+ """
+ LOG.debug('RPC validate_driver_interfaces called for node %s.',
+ node_id)
+ ret_dict = {}
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ # NOTE(sirushtim): the is_whole_disk_image variable is needed by
+ # deploy drivers for doing their validate(). Since the deploy
+ # isn't being done yet and the driver information could change in
+ # the meantime, we don't know if the is_whole_disk_image value will
+ # change or not. It isn't saved to the DB, but only used with this
+ # node instance for the current validations.
+ iwdi = images.is_whole_disk_image(context,
+ task.node.instance_info)
+ task.node.driver_internal_info['is_whole_disk_image'] = iwdi
+ for iface_name in (task.driver.core_interfaces +
+ task.driver.standard_interfaces):
+ iface = getattr(task.driver, iface_name, None)
+ result = reason = None
+ if iface:
+ try:
+ iface.validate(task)
+ result = True
+ except (exception.InvalidParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.MissingParameterValue) as e:
+ result = False
+ reason = str(e)
+ else:
+ reason = _('not supported')
+
+ ret_dict[iface_name] = {}
+ ret_dict[iface_name]['result'] = result
+ if reason is not None:
+ ret_dict[iface_name]['reason'] = reason
+ return ret_dict
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.NodeAssociated,
+ exception.InvalidState)
+ def destroy_node(self, context, node_id):
+ """Delete a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeAssociated if the node contains an instance
+ associated with it.
+ :raises: InvalidState if the node is in the wrong provision
+ state to perform deletion.
+
+ """
+ '''
+ node.destroy()
+ LOG.info(_LI('Successfully deleted node %(node)s.'),
+ {'node': node.uuid})
+ '''
+ with task_manager.acquire(context, node_id) as task:
+ node = task.node
+ node.destroy()
+ LOG.info(_LI('Successfully deleted node %(node)s.'),
+ {'node': node.uuid})
+ #if node.instance_uuid is not None:
+ # raise exception.NodeAssociated(node=node.uuid,
+ # instance=node.instance_uuid)
+
+ # TODO(lucasagomes): We should add ENROLLED once it's part of our
+ # state machine
+ # NOTE(lucasagomes): For the *FAIL states we users should
+ # move it to a safe state prior to deletion. This is because we
+ # should try to avoid deleting a node in a dirty/whacky state,
+ # e.g: A node in DEPLOYFAIL, if deleted without passing through
+ # tear down/cleaning may leave data from the previous tenant
+ # in the disk. So nodes in *FAIL states should first be moved to:
+ # CLEANFAIL -> MANAGEABLE
+ # INSPECTIONFAIL -> MANAGEABLE
+ # DEPLOYFAIL -> DELETING
+ # ZAPFAIL -> MANAGEABLE (in the future)
+ '''
+ valid_states = (states.AVAILABLE, states.NOSTATE,
+ states.MANAGEABLE)
+ if node.provision_state not in valid_states:
+ msg = (_('Can not delete node "%(node)s" while it is in '
+ 'provision state "%(state)s". Valid provision states '
+ 'to perform deletion are: "%(valid_states)s"') %
+ {'node': node.uuid, 'state': node.provision_state,
+ 'valid_states': valid_states})
+ raise exception.InvalidState(msg)
+ if node.console_enabled:
+ try:
+ task.driver.console.stop_console(task)
+ except Exception as err:
+ LOG.error(_LE('Failed to stop console while deleting '
+ 'the node %(node)s: %(err)s.'),
+ {'node': node.uuid, 'err': err})
+ node.destroy()
+ LOG.info(_LI('Successfully deleted node %(node)s.'),
+ {'node': node.uuid})
+ '''
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.NodeNotFound)
+ def destroy_port(self, context, port):
+ """Delete a port.
+
+ :param context: request context.
+ :param port: port object
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeNotFound if the node associated with the port does not
+ exist.
+
+ """
+ LOG.debug('RPC destroy_port called for port %(port)s',
+ {'port': port.uuid})
+ with task_manager.acquire(context, port.node_id) as task:
+ port.destroy()
+ LOG.info(_LI('Successfully deleted port %(port)s. '
+ 'The node associated with the port was '
+ '%(node)s'),
+ {'port': port.uuid, 'node': task.node.uuid})
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.NodeConsoleNotEnabled,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_console_information(self, context, node_id):
+ """Get connection information about the console.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: NodeConsoleNotEnabled if the console is not enabled.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ """
+ LOG.debug('RPC get_console_information called for node %s' % node_id)
+
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ node = task.node
+
+ if not getattr(task.driver, 'console', None):
+ raise exception.UnsupportedDriverExtension(driver=node.driver,
+ extension='console')
+ if not node.console_enabled:
+ raise exception.NodeConsoleNotEnabled(node=node_id)
+
+ task.driver.console.validate(task)
+ return task.driver.console.get_console(task)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def set_console_mode(self, context, node_id, enabled):
+ """Enable/Disable the console.
+
+ Validate driver specific information synchronously, and then
+ spawn a background worker to set console mode asynchronously.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param enabled: Boolean value; whether the console is enabled or
+ disabled.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ """
+ LOG.debug('RPC set_console_mode called for node %(node)s with '
+ 'enabled %(enabled)s' % {'node': node_id,
+ 'enabled': enabled})
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ node = task.node
+ if not getattr(task.driver, 'console', None):
+ raise exception.UnsupportedDriverExtension(driver=node.driver,
+ extension='console')
+
+ task.driver.console.validate(task)
+
+ if enabled == node.console_enabled:
+ op = _('enabled') if enabled else _('disabled')
+ LOG.info(_LI("No console action was triggered because the "
+ "console is already %s"), op)
+ task.release_resources()
+ else:
+ node.last_error = None
+ node.save()
+ task.spawn_after(self._spawn_worker,
+ self._set_console_mode, task, enabled)
+
+ def _set_console_mode(self, task, enabled):
+ """Internal method to set console mode on a node."""
+ node = task.node
+ try:
+ if enabled:
+ task.driver.console.start_console(task)
+ # TODO(deva): We should be updating conductor_affinity here
+ # but there is no support for console sessions in
+ # take_over() right now.
+ else:
+ task.driver.console.stop_console(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ op = _('enabling') if enabled else _('disabling')
+ msg = (_('Error %(op)s the console on node %(node)s. '
+ 'Reason: %(error)s') % {'op': op,
+ 'node': node.uuid,
+ 'error': e})
+ node.last_error = msg
+ else:
+ node.console_enabled = enabled
+ node.last_error = None
+ finally:
+ node.save()
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.FailedToUpdateMacOnPort,
+ exception.MACAlreadyExists)
+ def update_port(self, context, port_obj):
+ """Update a port.
+
+ :param context: request context.
+ :param port_obj: a changed (but not saved) port object.
+ :raises: DHCPLoadError if the dhcp_provider cannot be loaded.
+ :raises: FailedToUpdateMacOnPort if MAC address changed and update
+ failed.
+ :raises: MACAlreadyExists if the update is setting a MAC which is
+ registered on another port already.
+ """
+ port_uuid = port_obj.uuid
+ LOG.debug("RPC update_port called for port %s.", port_uuid)
+
+ with task_manager.acquire(context, port_obj.node_id) as task:
+ node = task.node
+ if 'address' in port_obj.obj_what_changed():
+ vif = port_obj.extra.get('vif_port_id')
+ if vif:
+ api = dhcp_factory.DHCPFactory()
+ api.provider.update_port_address(vif, port_obj.address,
+ token=context.auth_token)
+ # Log warning if there is no vif_port_id and an instance
+ # is associated with the node.
+ elif node.instance_uuid:
+ LOG.warning(_LW(
+ "No VIF found for instance %(instance)s "
+ "port %(port)s when attempting to update port MAC "
+ "address."),
+ {'port': port_uuid, 'instance': node.instance_uuid})
+
+ port_obj.save()
+
+ return port_obj
+
+ @messaging.expected_exceptions(exception.DriverNotFound)
+ def get_driver_properties(self, context, driver_name):
+ """Get the properties of the driver.
+
+ :param context: request context.
+ :param driver_name: name of the driver.
+ :returns: a dictionary with :
+ entries.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ LOG.debug("RPC get_driver_properties called for driver %s.",
+ driver_name)
+ driver = self._get_driver(driver_name)
+ return driver.get_properties()
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.send_sensor_data_interval)
+ def _send_sensor_data(self, context):
+ """Periodically sends sensor data to Ceilometer."""
+ # do nothing if send_sensor_data option is False
+ if not CONF.conductor.send_sensor_data:
+ return
+
+ filters = {'associated': True}
+ node_iter = self.iter_nodes(fields=['instance_uuid'],
+ filters=filters)
+
+ for (node_uuid, driver, instance_uuid) in node_iter:
+ # populate the message which will be sent to ceilometer
+ message = {'message_id': uuidutils.generate_uuid(),
+ 'instance_uuid': instance_uuid,
+ 'node_uuid': node_uuid,
+ 'timestamp': datetime.datetime.utcnow(),
+ 'event_type': 'hardware.ipmi.metrics.update'}
+
+ try:
+ with task_manager.acquire(context,
+ node_uuid,
+ shared=True) as task:
+ task.driver.management.validate(task)
+ sensors_data = task.driver.management.get_sensors_data(
+ task)
+ except NotImplementedError:
+ LOG.warn(_LW(
+ 'get_sensors_data is not implemented for driver'
+ ' %(driver)s, node_uuid is %(node)s'),
+ {'node': node_uuid, 'driver': driver})
+ except exception.FailedToParseSensorData as fps:
+ LOG.warn(_LW(
+ "During get_sensors_data, could not parse "
+ "sensor data for node %(node)s. Error: %(err)s."),
+ {'node': node_uuid, 'err': str(fps)})
+ except exception.FailedToGetSensorData as fgs:
+ LOG.warn(_LW(
+ "During get_sensors_data, could not get "
+ "sensor data for node %(node)s. Error: %(err)s."),
+ {'node': node_uuid, 'err': str(fgs)})
+ except exception.NodeNotFound:
+ LOG.warn(_LW(
+ "During send_sensor_data, node %(node)s was not "
+ "found and presumed deleted by another process."),
+ {'node': node_uuid})
+ except Exception as e:
+ LOG.warn(_LW(
+ "Failed to get sensor data for node %(node)s. "
+ "Error: %(error)s"), {'node': node_uuid, 'error': str(e)})
+ else:
+ message['payload'] = (
+ self._filter_out_unsupported_types(sensors_data))
+ if message['payload']:
+ self.notifier.info(context, "hardware.ipmi.metrics",
+ message)
+ finally:
+ # Yield on every iteration
+ eventlet.sleep(0)
+
+ def _filter_out_unsupported_types(self, sensors_data):
+ """Filters out sensor data types that aren't specified in the config.
+
+ Removes sensor data types that aren't specified in
+ CONF.conductor.send_sensor_data_types.
+
+ :param sensors_data: dict containing sensor types and the associated
+ data
+ :returns: dict with unsupported sensor types removed
+ """
+ allowed = set(x.lower() for x in CONF.conductor.send_sensor_data_types)
+
+ if 'all' in allowed:
+ return sensors_data
+
+ return dict((sensor_type, sensor_value) for (sensor_type, sensor_value)
+ in sensors_data.items() if sensor_type.lower() in allowed)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def set_boot_device(self, context, node_id, device, persistent=False):
+ """Set the boot device for a node.
+
+ Set the boot device to use on next reboot of the node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param device: the boot device, one of
+ :mod:`iotronic.common.boot_devices`.
+ :param persistent: Whether to set next-boot, or make the change
+ permanent. Default: False.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified or an invalid boot device is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ """
+ LOG.debug('RPC set_boot_device called for node %(node)s with '
+ 'device %(device)s', {'node': node_id, 'device': device})
+ with task_manager.acquire(context, node_id) as task:
+ node = task.node
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=node.driver, extension='management')
+ task.driver.management.validate(task)
+ task.driver.management.set_boot_device(task, device,
+ persistent=persistent)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_boot_device(self, context, node_id):
+ """Get the current boot device.
+
+ Returns the current boot device of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: a dictionary containing:
+
+ :boot_device: the boot device, one of
+ :mod:`iotronic.common.boot_devices` or None if it is unknown.
+ :persistent: Whether the boot device will persist to all
+ future boots or not, None if it is unknown.
+
+ """
+ LOG.debug('RPC get_boot_device called for node %s', node_id)
+ with task_manager.acquire(context, node_id) as task:
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='management')
+ task.driver.management.validate(task)
+ return task.driver.management.get_boot_device(task)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_supported_boot_devices(self, context, node_id):
+ """Get the list of supported devices.
+
+ Returns the list of supported boot devices of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: A list with the supported boot devices defined
+ in :mod:`iotronic.common.boot_devices`.
+
+ """
+ LOG.debug('RPC get_supported_boot_devices called for node %s', node_id)
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='management')
+ return task.driver.management.get_supported_boot_devices()
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.HardwareInspectionFailure,
+ exception.InvalidStateRequested,
+ exception.UnsupportedDriverExtension)
+ def inspect_hardware(self, context, node_id):
+ """Inspect hardware to obtain hardware properties.
+
+ Initiate the inspection of a node. Validations are done
+ synchronously and the actual inspection work is performed in
+ background (asynchronously).
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support inspect.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: HardwareInspectionFailure when unable to get
+ essential scheduling properties from hardware.
+ :raises: InvalidStateRequested if 'inspect' is not a
+ valid action to do in the current state.
+
+ """
+ LOG.debug('RPC inspect_hardware called for node %s', node_id)
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if not getattr(task.driver, 'inspect', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='inspect')
+
+ try:
+ task.driver.power.validate(task)
+ task.driver.inspect.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ error = (_("RPC inspect_hardware failed to validate "
+ "inspection or power info. Error: %(msg)s")
+ % {'msg': e})
+ raise exception.HardwareInspectionFailure(error=error)
+
+ try:
+ task.process_event('inspect',
+ callback=self._spawn_worker,
+ call_args=(_do_inspect_hardware, task),
+ err_handler=provisioning_error_handler)
+
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='inspect', node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.check_provision_state_interval)
+ def _check_inspect_timeouts(self, context):
+ """Periodically checks inspect_timeout and fails upon reaching it.
+
+ :param: context: request context
+
+ """
+ callback_timeout = CONF.conductor.inspect_timeout
+ if not callback_timeout:
+ return
+
+ filters = {'reserved': False,
+ 'provision_state': states.INSPECTING,
+ 'inspection_started_before': callback_timeout}
+ sort_key = 'inspection_started_at'
+ last_error = _("timeout reached while inspecting the node")
+ self._fail_if_in_state(context, filters, states.INSPECTING,
+ sort_key, last_error=last_error)
+
+ def _fail_if_in_state(self, context, filters, provision_state,
+ sort_key, callback_method=None,
+ err_handler=None, last_error=None):
+ """Fail nodes that are in specified state.
+
+ Retrieves nodes that satisfy the criteria in 'filters'.
+ If any of these nodes is in 'provision_state', it has failed
+ in whatever provisioning activity it was currently doing.
+ That failure is processed here.
+
+ :param: context: request context
+ :param: filters: criteria (as a dictionary) to get the desired
+ list of nodes that satisfy the filter constraints.
+ For example, if filters['provisioned_before'] = 60,
+ this would process nodes whose provision_updated_at
+ field value was 60 or more seconds before 'now'.
+ :param: provision_state: provision_state that the node is in,
+ for the provisioning activity to have failed.
+ :param: sort_key: the nodes are sorted based on this key.
+ :param: callback_method: the callback method to be invoked in a
+ spawned thread, for a failed node. This
+ method must take a :class:`TaskManager` as
+ the first (and only required) parameter.
+ :param: err_handler: for a failed node, the error handler to invoke
+ if an error occurs trying to spawn an thread
+ to do the callback_method.
+ :param: last_error: the error message to be updated in node.last_error
+
+ """
+ node_iter = self.iter_nodes(filters=filters,
+ sort_key=sort_key,
+ sort_dir='asc')
+
+ workers_count = 0
+ for node_uuid, driver in node_iter:
+ try:
+ with task_manager.acquire(context, node_uuid) as task:
+ if (task.node.maintenance or
+ task.node.provision_state != provision_state):
+ continue
+
+ # timeout has been reached - process the event 'fail'
+ if callback_method:
+ task.process_event('fail',
+ callback=self._spawn_worker,
+ call_args=(callback_method, task),
+ err_handler=err_handler)
+ else:
+ task.node.last_error = last_error
+ task.process_event('fail')
+ except exception.NoFreeConductorWorker:
+ break
+ except (exception.NodeLocked, exception.NodeNotFound):
+ continue
+ workers_count += 1
+ if workers_count >= CONF.conductor.periodic_max_workers:
+ break
+
+
+def get_vendor_passthru_metadata(route_dict):
+ d = {}
+ for method, metadata in route_dict.items():
+ # 'func' is the vendor method reference, ignore it
+ d[method] = {k: metadata[k] for k in metadata if k != 'func'}
+ return d
+
+
+def power_state_error_handler(e, node, power_state):
+ """Set the node's power states if error occurs.
+
+ This hook gets called upon an execption being raised when spawning
+ the worker thread to change the power state of a node.
+
+ :param e: the exception object that was raised.
+ :param node: an Iotronic node object.
+ :param power_state: the power state to set on the node.
+
+ """
+ if isinstance(e, exception.NoFreeConductorWorker):
+ node.power_state = power_state
+ node.target_power_state = states.NOSTATE
+ node.last_error = (_("No free conductor workers available"))
+ node.save()
+ LOG.warning(_LW("No free conductor workers available to perform "
+ "an action on node %(node)s, setting node's "
+ "power state back to %(power_state)s."),
+ {'node': node.uuid, 'power_state': power_state})
+
+
+def provisioning_error_handler(e, node, provision_state,
+ target_provision_state):
+ """Set the node's provisioning states if error occurs.
+
+ This hook gets called upon an exception being raised when spawning
+ the worker to do the deployment or tear down of a node.
+
+ :param e: the exception object that was raised.
+ :param node: an Iotronic node object.
+ :param provision_state: the provision state to be set on
+ the node.
+ :param target_provision_state: the target provision state to be
+ set on the node.
+
+ """
+ if isinstance(e, exception.NoFreeConductorWorker):
+ # NOTE(deva): there is no need to clear conductor_affinity
+ # because it isn't updated on a failed deploy
+ node.provision_state = provision_state
+ node.target_provision_state = target_provision_state
+ node.last_error = (_("No free conductor workers available"))
+ node.save()
+ LOG.warning(_LW("No free conductor workers available to perform "
+ "an action on node %(node)s, setting node's "
+ "provision_state back to %(prov_state)s and "
+ "target_provision_state to %(tgt_prov_state)s."),
+ {'node': node.uuid, 'prov_state': provision_state,
+ 'tgt_prov_state': target_provision_state})
+
+
+def _get_configdrive_obj_name(node):
+ """Generate the object name for the config drive."""
+ return 'configdrive-%s' % node.uuid
+
+
+def _store_configdrive(node, configdrive):
+ """Handle the storage of the config drive.
+
+ If configured, the config drive data are uploaded to Swift. The Node's
+ instance_info is updated to include either the temporary Swift URL
+ from the upload, or if no upload, the actual config drive data.
+
+ :param node: an Iotronic node object.
+ :param configdrive: A gzipped and base64 encoded configdrive.
+ :raises: SwiftOperationError if an error occur when uploading the
+ config drive to Swift.
+
+ """
+ if CONF.conductor.configdrive_use_swift:
+ # NOTE(lucasagomes): No reason to use a different timeout than
+ # the one used for deploying the node
+ timeout = CONF.conductor.deploy_callback_timeout
+ container = CONF.conductor.configdrive_swift_container
+ object_name = _get_configdrive_obj_name(node)
+
+ object_headers = {'X-Delete-After': timeout}
+
+ with tempfile.NamedTemporaryFile() as fileobj:
+ fileobj.write(configdrive)
+ fileobj.flush()
+
+ swift_api = swift.SwiftAPI()
+ swift_api.create_object(container, object_name, fileobj.name,
+ object_headers=object_headers)
+ configdrive = swift_api.get_temp_url(container, object_name,
+ timeout)
+
+ i_info = node.instance_info
+ i_info['configdrive'] = configdrive
+ node.instance_info = i_info
+
+
+def do_node_deploy(task, conductor_id, configdrive=None):
+ """Prepare the environment and deploy a node."""
+ node = task.node
+
+ def handle_failure(e, task, logmsg, errmsg):
+ # NOTE(deva): there is no need to clear conductor_affinity
+ task.process_event('fail')
+ args = {'node': task.node.uuid, 'err': e}
+ LOG.warning(logmsg, args)
+ node.last_error = errmsg % e
+
+ try:
+ try:
+ if configdrive:
+ _store_configdrive(node, configdrive)
+ except exception.SwiftOperationError as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error while uploading the configdrive for '
+ '%(node)s to Swift'),
+ _('Failed to upload the configdrive to Swift. '
+ 'Error: %s'))
+
+ try:
+ task.driver.deploy.prepare(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error while preparing to deploy to node %(node)s: '
+ '%(err)s'),
+ _("Failed to prepare to deploy. Error: %s"))
+
+ try:
+ new_state = task.driver.deploy.deploy(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error in deploy of node %(node)s: %(err)s'),
+ _("Failed to deploy. Error: %s"))
+
+ # Update conductor_affinity to reference this conductor's ID
+ # since there may be local persistent state
+ node.conductor_affinity = conductor_id
+
+ # NOTE(deva): Some drivers may return states.DEPLOYWAIT
+ # eg. if they are waiting for a callback
+ if new_state == states.DEPLOYDONE:
+ task.process_event('done')
+ LOG.info(_LI('Successfully deployed node %(node)s with '
+ 'instance %(instance)s.'),
+ {'node': node.uuid, 'instance': node.instance_uuid})
+ elif new_state == states.DEPLOYWAIT:
+ task.process_event('wait')
+ else:
+ LOG.error(_LE('Unexpected state %(state)s returned while '
+ 'deploying node %(node)s.'),
+ {'state': new_state, 'node': node.uuid})
+ finally:
+ node.save()
+
+
+def handle_sync_power_state_max_retries_exceeded(task,
+ actual_power_state):
+ """Handles power state sync exceeding the max retries.
+
+ When synchronizing the power state between a node and the DB has exceeded
+ the maximum number of retries, change the DB power state to be the actual
+ node power state and place the node in maintenance.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param actual_power_state: the actual power state of the node; a power
+ state from iotronic.common.states
+ """
+ node = task.node
+ msg = (_("During sync_power_state, max retries exceeded "
+ "for node %(node)s, node state %(actual)s "
+ "does not match expected state '%(state)s'. "
+ "Updating DB state to '%(actual)s' "
+ "Switching node to maintenance mode.") %
+ {'node': node.uuid, 'actual': actual_power_state,
+ 'state': node.power_state})
+ node.power_state = actual_power_state
+ node.last_error = msg
+ node.maintenance = True
+ node.maintenance_reason = msg
+ node.save()
+ LOG.error(msg)
+
+
+def do_sync_power_state(task, count):
+ """Sync the power state for this node, incrementing the counter on failure.
+
+ When the limit of power_state_sync_max_retries is reached, the node is put
+ into maintenance mode and the error recorded.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param count: number of times this node has previously failed a sync
+ :returns: Count of failed attempts.
+ On success, the counter is set to 0.
+ On failure, the count is incremented by one
+ """
+ node = task.node
+ power_state = None
+ count += 1
+
+ max_retries = CONF.conductor.power_state_sync_max_retries
+ # If power driver info can not be validated, and node has no prior state,
+ # do not attempt to sync the node's power state.
+ if node.power_state is None:
+ try:
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue):
+ return 0
+
+ try:
+ # The driver may raise an exception, or may return ERROR.
+ # Handle both the same way.
+ power_state = task.driver.power.get_power_state(task)
+ if power_state == states.ERROR:
+ raise exception.PowerStateFailure(
+ _("Power driver returned ERROR state "
+ "while trying to sync power state."))
+ except Exception as e:
+ # Stop if any exception is raised when getting the power state
+ if count > max_retries:
+ handle_sync_power_state_max_retries_exceeded(task, power_state)
+ else:
+ LOG.warning(_LW("During sync_power_state, could not get power "
+ "state for node %(node)s, attempt %(attempt)s of "
+ "%(retries)s. Error: %(err)s."),
+ {'node': node.uuid, 'attempt': count,
+ 'retries': max_retries, 'err': e})
+ return count
+ else:
+ # If node has no prior state AND we successfully got a state,
+ # simply record that.
+ if node.power_state is None:
+ LOG.info(_LI("During sync_power_state, node %(node)s has no "
+ "previous known state. Recording current state "
+ "'%(state)s'."),
+ {'node': node.uuid, 'state': power_state})
+ node.power_state = power_state
+ node.save()
+ return 0
+
+ # If the node is now in the expected state, reset the counter
+ # otherwise, if we've exceeded the retry limit, stop here
+ if node.power_state == power_state:
+ return 0
+ else:
+ if count > max_retries:
+ handle_sync_power_state_max_retries_exceeded(task, power_state)
+ return count
+
+ if CONF.conductor.force_power_state_during_sync:
+ LOG.warning(_LW("During sync_power_state, node %(node)s state "
+ "'%(actual)s' does not match expected state. "
+ "Changing hardware state to '%(state)s'."),
+ {'node': node.uuid, 'actual': power_state,
+ 'state': node.power_state})
+ try:
+ # node_power_action will update the node record
+ # so don't do that again here.
+ utils.node_power_action(task, node.power_state)
+ except Exception as e:
+ LOG.error(_LE(
+ "Failed to change power state of node %(node)s "
+ "to '%(state)s', attempt %(attempt)s of %(retries)s."),
+ {'node': node.uuid,
+ 'state': node.power_state,
+ 'attempt': count,
+ 'retries': max_retries})
+ else:
+ LOG.warning(_LW("During sync_power_state, node %(node)s state "
+ "does not match expected state '%(state)s'. "
+ "Updating recorded state to '%(actual)s'."),
+ {'node': node.uuid, 'actual': power_state,
+ 'state': node.power_state})
+ node.power_state = power_state
+ node.save()
+
+ return count
+
+
+def _do_inspect_hardware(task):
+ """Initiates inspection.
+
+ :param: task: a TaskManager instance with an exclusive lock
+ on its node.
+ :raises: HardwareInspectionFailure if driver doesn't
+ return the state as states.MANAGEABLE or
+ states.INSPECTING.
+
+ """
+ node = task.node
+
+ def handle_failure(e):
+ node.last_error = e
+ task.process_event('fail')
+ LOG.error(_LE("Failed to inspect node %(node)s: %(err)s"),
+ {'node': node.uuid, 'err': e})
+
+ try:
+ new_state = task.driver.inspect.inspect_hardware(task)
+
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ error = str(e)
+ handle_failure(error)
+
+ if new_state == states.MANAGEABLE:
+ task.process_event('done')
+ LOG.info(_LI('Successfully inspected node %(node)s')
+ % {'node': node.uuid})
+ elif new_state != states.INSPECTING:
+ error = (_("During inspection, driver returned unexpected "
+ "state %(state)s") % {'state': new_state})
+ handle_failure(error)
+ raise exception.HardwareInspectionFailure(error=error)
+
+
+def cleaning_error_handler(task, msg, tear_down_cleaning=True):
+ """Put a failed node in CLEANFAIL or ZAPFAIL and maintenance."""
+ # Reset clean step, msg should include current step
+ if task.node.provision_state == states.CLEANING:
+ task.node.clean_step = {}
+ task.node.last_error = msg
+ task.node.maintenance = True
+ task.node.maintenance_reason = msg
+ task.node.save()
+ if tear_down_cleaning:
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ msg = (_LE('Failed to tear down cleaning on node %(uuid)s, '
+ 'reason: %(err)s'), {'err': e, 'uuid': task.node.uuid})
+ LOG.exception(msg)
+
+ task.process_event('fail')
+
+
+def _step_key(step):
+ """Sort by priority, then interface priority in event of tie.
+
+ :param step: cleaning step dict to get priority for.
+ """
+ return (step.get('priority'),
+ CLEANING_INTERFACE_PRIORITY[step.get('interface')])
+
+
+def _get_cleaning_steps(task, enabled=False):
+ """Get sorted cleaning steps for task.node
+
+ :param task: A TaskManager object
+ :param enabled: If True, returns only enabled (priority > 0) steps. If
+ False, returns all clean steps.
+ :returns: A list of clean steps dictionaries, sorted with largest priority
+ as the first item
+ """
+ # Iterate interfaces and get clean steps from each
+ steps = list()
+ for interface in CLEANING_INTERFACE_PRIORITY:
+ interface = getattr(task.driver, interface)
+ if interface:
+ interface_steps = [x for x in interface.get_clean_steps(task)
+ if not enabled or x['priority'] > 0]
+ steps.extend(interface_steps)
+ # Sort the steps from higher priority to lower priority
+ return sorted(steps, key=_step_key, reverse=True)
+
+
+def set_node_cleaning_steps(task):
+ """Get the list of clean steps, save them to the node."""
+ # Get the prioritized steps, store them.
+ node = task.node
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_steps'] = _get_cleaning_steps(task,
+ enabled=True)
+ node.driver_internal_info = driver_internal_info
+ node.clean_step = {}
+ node.save()
diff --git a/iotronic/conductor/__old/task_manager.py b/iotronic/conductor/__old/task_manager.py
new file mode 100644
index 0000000..4e6724b
--- /dev/null
+++ b/iotronic/conductor/__old/task_manager.py
@@ -0,0 +1,362 @@
+# coding=utf-8
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A context manager to perform a series of tasks on a set of resources.
+
+:class:`TaskManager` is a context manager, created on-demand to allow
+synchronized access to a node and its resources.
+
+The :class:`TaskManager` will, by default, acquire an exclusive lock on
+a node for the duration that the TaskManager instance exists. You may
+create a TaskManager instance without locking by passing "shared=True"
+when creating it, but certain operations on the resources held by such
+an instance of TaskManager will not be possible. Requiring this exclusive
+lock guards against parallel operations interfering with each other.
+
+A shared lock is useful when performing non-interfering operations,
+such as validating the driver interfaces.
+
+An exclusive lock is stored in the database to coordinate between
+:class:`iotronic.iotconductor.manager` instances, that are typically deployed on
+different hosts.
+
+:class:`TaskManager` methods, as well as driver methods, may be decorated to
+determine whether their invocation requires an exclusive lock.
+
+The TaskManager instance exposes certain node resources and properties as
+attributes that you may access:
+
+ task.context
+ The context passed to TaskManager()
+ task.shared
+ False if Node is locked, True if it is not locked. (The
+ 'shared' kwarg arg of TaskManager())
+ task.node
+ The Node object
+ task.ports
+ Ports belonging to the Node
+ task.driver
+ The Driver for the Node, or the Driver based on the
+ 'driver_name' kwarg of TaskManager().
+
+Example usage:
+
+::
+
+ with task_manager.acquire(context, node_id) as task:
+ task.driver.power.power_on(task.node)
+
+If you need to execute task-requiring code in a background thread, the
+TaskManager instance provides an interface to handle this for you, making
+sure to release resources when the thread finishes (successfully or if
+an exception occurs). Common use of this is within the Manager like so:
+
+::
+
+ with task_manager.acquire(context, node_id) as task:
+
+ task.spawn_after(self._spawn_worker,
+ utils.node_power_action, task, new_state)
+
+All exceptions that occur in the current GreenThread as part of the
+spawn handling are re-raised. You can specify a hook to execute custom
+code when such exceptions occur. For example, the hook is a more elegant
+solution than wrapping the "with task_manager.acquire()" with a
+try..exception block. (Note that this hook does not handle exceptions
+raised in the background thread.):
+
+::
+
+ def on_error(e):
+ if isinstance(e, Exception):
+ ...
+
+ with task_manager.acquire(context, node_id) as task:
+
+ task.set_spawn_error_hook(on_error)
+ task.spawn_after(self._spawn_worker,
+ utils.node_power_action, task, new_state)
+
+"""
+
+import functools
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import retrying
+
+from iotronic.common import driver_factory
+from iotronic.common import exception
+from iotronic.common.i18n import _LW
+from iotronic.common import states
+from iotronic import objects
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+def require_exclusive_lock(f):
+ """Decorator to require an exclusive lock.
+
+ Decorated functions must take a :class:`TaskManager` as the first
+ parameter. Decorated class methods should take a :class:`TaskManager`
+ as the first parameter after "self".
+
+ """
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ task = args[0] if isinstance(args[0], TaskManager) else args[1]
+ if task.shared:
+ raise exception.ExclusiveLockRequired()
+ return f(*args, **kwargs)
+ return wrapper
+
+
+def acquire(context, node_id, shared=False, driver_name=None):
+ """Shortcut for acquiring a lock on a Node.
+
+ :param context: Request context.
+ :param node_id: ID or UUID of node to lock.
+ :param shared: Boolean indicating whether to take a shared or exclusive
+ lock. Default: False.
+ :param driver_name: Name of Driver. Default: None.
+ :returns: An instance of :class:`TaskManager`.
+
+ """
+ return TaskManager(context, node_id, shared=shared,
+ driver_name=driver_name)
+
+
+class TaskManager(object):
+ """Context manager for tasks.
+
+ This class wraps the locking, driver loading, and acquisition
+ of related resources (eg, Node and Ports) when beginning a unit of work.
+
+ """
+
+ def __init__(self, context, node_id, shared=False, driver_name=None):
+ """Create a new TaskManager.
+
+ Acquire a lock on a node. The lock can be either shared or
+ exclusive. Shared locks may be used for read-only or
+ non-disruptive actions only, and must be considerate to what
+ other threads may be doing on the same node at the same time.
+
+ :param context: request context
+ :param node_id: ID or UUID of node to lock.
+ :param shared: Boolean indicating whether to take a shared or exclusive
+ lock. Default: False.
+ :param driver_name: The name of the driver to load, if different
+ from the Node's current driver.
+ :raises: DriverNotFound
+ :raises: NodeNotFound
+ :raises: NodeLocked
+
+ """
+
+ self._spawn_method = None
+ self._on_error_method = None
+
+ self.context = context
+ self.node = None
+ self.shared = shared
+
+ self.fsm = states.machine.copy()
+
+ # NodeLocked exceptions can be annoying. Let's try to alleviate
+ # some of that pain by retrying our lock attempts. The retrying
+ # module expects a wait_fixed value in milliseconds.
+ @retrying.retry(
+ retry_on_exception=lambda e: isinstance(e, exception.NodeLocked),
+ stop_max_attempt_number=CONF.conductor.node_locked_retry_attempts,
+ wait_fixed=CONF.conductor.node_locked_retry_interval * 1000)
+ def reserve_node():
+ LOG.debug("Attempting to reserve node %(node)s",
+ {'node': node_id})
+ self.node = objects.Node.reserve(context, CONF.host, node_id)
+
+ try:
+ if not self.shared:
+ reserve_node()
+ else:
+ self.node = objects.Node.get(context, node_id)
+ #self.ports = objects.Port.list_by_node_id(context, self.node.id)
+ #self.driver = driver_factory.get_driver(driver_name or
+ # self.node.driver)
+
+ # NOTE(deva): this handles the Juno-era NOSTATE state
+ # and should be deleted after Kilo is released
+ '''
+ if self.node.provision_state is states.NOSTATE:
+ self.node.provision_state = states.AVAILABLE
+ self.node.save()
+
+ self.fsm.initialize(self.node.provision_state)
+ '''
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+
+ def spawn_after(self, _spawn_method, *args, **kwargs):
+ """Call this to spawn a thread to complete the task.
+
+ The specified method will be called when the TaskManager instance
+ exits.
+
+ :param _spawn_method: a method that returns a GreenThread object
+ :param args: args passed to the method.
+ :param kwargs: additional kwargs passed to the method.
+
+ """
+ self._spawn_method = _spawn_method
+ self._spawn_args = args
+ self._spawn_kwargs = kwargs
+
+ def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
+ """Create a hook to handle exceptions when spawning a task.
+
+ Create a hook that gets called upon an exception being raised
+ from spawning a background thread to do a task.
+
+ :param _on_error_method: a callable object, it's first parameter
+ should accept the Exception object that was raised.
+ :param args: additional args passed to the callable object.
+ :param kwargs: additional kwargs passed to the callable object.
+
+ """
+ self._on_error_method = _on_error_method
+ self._on_error_args = args
+ self._on_error_kwargs = kwargs
+
+ def release_resources(self):
+ """Unlock a node and release resources.
+
+ If an exclusive lock is held, unlock the node. Reset attributes
+ to make it clear that this instance of TaskManager should no
+ longer be accessed.
+ """
+
+ if not self.shared:
+ try:
+ if self.node:
+ objects.Node.release(self.context, CONF.host, self.node.id)
+ except exception.NodeNotFound:
+ # squelch the exception if the node was deleted
+ # within the task's context.
+ pass
+ self.node = None
+ self.driver = None
+ self.ports = None
+ self.fsm = None
+
+ def _thread_release_resources(self, t):
+ """Thread.link() callback to release resources."""
+ self.release_resources()
+
+ def process_event(self, event, callback=None, call_args=None,
+ call_kwargs=None, err_handler=None):
+ """Process the given event for the task's current state.
+
+ :param event: the name of the event to process
+ :param callback: optional callback to invoke upon event transition
+ :param call_args: optional \*args to pass to the callback method
+ :param call_kwargs: optional \**kwargs to pass to the callback method
+ :param err_handler: optional error handler to invoke if the
+ callback fails, eg. because there are no workers available
+ (err_handler should accept arguments node, prev_prov_state, and
+ prev_target_state)
+ :raises: InvalidState if the event is not allowed by the associated
+ state machine
+ """
+ # Advance the state model for the given event. Note that this doesn't
+ # alter the node in any way. This may raise InvalidState, if this event
+ # is not allowed in the current state.
+ self.fsm.process_event(event)
+
+ # stash current states in the error handler if callback is set,
+ # in case we fail to get a worker from the pool
+ if err_handler and callback:
+ self.set_spawn_error_hook(err_handler, self.node,
+ self.node.provision_state,
+ self.node.target_provision_state)
+
+ self.node.provision_state = self.fsm.current_state
+ self.node.target_provision_state = self.fsm.target_state
+
+ # set up the async worker
+ if callback:
+ # clear the error if we're going to start work in a callback
+ self.node.last_error = None
+ if call_args is None:
+ call_args = ()
+ if call_kwargs is None:
+ call_kwargs = {}
+ self.spawn_after(callback, *call_args, **call_kwargs)
+
+ # publish the state transition by saving the Node
+ self.node.save()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is None and self._spawn_method is not None:
+ # Spawn a worker to complete the task
+ # The linked callback below will be called whenever:
+ # - background task finished with no errors.
+ # - background task has crashed with exception.
+ # - callback was added after the background task has
+ # finished or crashed. While eventlet currently doesn't
+ # schedule the new thread until the current thread blocks
+ # for some reason, this is true.
+ # All of the above are asserted in tests such that we'll
+ # catch if eventlet ever changes this behavior.
+ thread = None
+ try:
+ thread = self._spawn_method(*self._spawn_args,
+ **self._spawn_kwargs)
+
+ # NOTE(comstud): Trying to use a lambda here causes
+ # the callback to not occur for some reason. This
+ # also makes it easier to test.
+ thread.link(self._thread_release_resources)
+ # Don't unlock! The unlock will occur when the
+ # thread finshes.
+ return
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ try:
+ # Execute the on_error hook if set
+ if self._on_error_method:
+ self._on_error_method(e, *self._on_error_args,
+ **self._on_error_kwargs)
+ except Exception:
+ LOG.warning(_LW("Task's on_error hook failed to "
+ "call %(method)s on node %(node)s"),
+ {'method': self._on_error_method.__name__,
+ 'node': self.node.uuid})
+
+ if thread is not None:
+ # This means the link() failed for some
+ # reason. Nuke the thread.
+ thread.cancel()
+ self.release_resources()
+ self.release_resources()
diff --git a/iotronic/conductor/__old/utils.py b/iotronic/conductor/__old/utils.py
new file mode 100644
index 0000000..31a36ab
--- /dev/null
+++ b/iotronic/conductor/__old/utils.py
@@ -0,0 +1,160 @@
+# coding=utf-8
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_utils import excutils
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LI
+from iotronic.common.i18n import _LW
+from iotronic.common import states
+from iotronic.conductor import task_manager
+
+LOG = log.getLogger(__name__)
+
+
+@task_manager.require_exclusive_lock
+def node_set_boot_device(task, device, persistent=False):
+ """Set the boot device for a node.
+
+ :param task: a TaskManager instance.
+ :param device: Boot device. Values are vendor-specific.
+ :param persistent: Whether to set next-boot, or make the change
+ permanent. Default: False.
+ :raises: InvalidParameterValue if the validation of the
+ ManagementInterface fails.
+
+ """
+ if getattr(task.driver, 'management', None):
+ task.driver.management.validate(task)
+ task.driver.management.set_boot_device(task,
+ device=device,
+ persistent=persistent)
+
+
+@task_manager.require_exclusive_lock
+def node_power_action(task, new_state):
+ """Change power state or reset for a node.
+
+ Perform the requested power action if the transition is required.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param new_state: Any power state from iotronic.common.states. If the
+ state is 'REBOOT' then a reboot will be attempted, otherwise
+ the node power state is directly set to 'state'.
+ :raises: InvalidParameterValue when the wrong state is specified
+ or the wrong driver info is specified.
+ :raises: other exceptions by the node's power driver if something
+ wrong occurred during the power action.
+
+ """
+ node = task.node
+ target_state = states.POWER_ON if new_state == states.REBOOT else new_state
+
+ if new_state != states.REBOOT:
+ try:
+ curr_state = task.driver.power.get_power_state(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ node['last_error'] = _(
+ "Failed to change power state to '%(target)s'. "
+ "Error: %(error)s") % {'target': new_state, 'error': e}
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+
+ if curr_state == new_state:
+ # Neither the iotronic service nor the hardware has erred. The
+ # node is, for some reason, already in the requested state,
+ # though we don't know why. eg, perhaps the user previously
+ # requested the node POWER_ON, the network delayed those IPMI
+ # packets, and they are trying again -- but the node finally
+ # responds to the first request, and so the second request
+ # gets to this check and stops.
+ # This isn't an error, so we'll clear last_error field
+ # (from previous operation), log a warning, and return.
+ node['last_error'] = None
+ # NOTE(dtantsur): under rare conditions we can get out of sync here
+ node['power_state'] = new_state
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+ LOG.warn(_LW("Not going to change_node_power_state because "
+ "current state = requested state = '%(state)s'."),
+ {'state': curr_state})
+ return
+
+ if curr_state == states.ERROR:
+ # be optimistic and continue action
+ LOG.warn(_LW("Driver returns ERROR power state for node %s."),
+ node.uuid)
+
+ # Set the target_power_state and clear any last_error, if we're
+ # starting a new operation. This will expose to other processes
+ # and clients that work is in progress.
+ if node['target_power_state'] != target_state:
+ node['target_power_state'] = target_state
+ node['last_error'] = None
+ node.save()
+
+ # take power action
+ try:
+ if new_state != states.REBOOT:
+ task.driver.power.set_power_state(task, new_state)
+ else:
+ task.driver.power.reboot(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ node['last_error'] = _(
+ "Failed to change power state to '%(target)s'. "
+ "Error: %(error)s") % {'target': target_state, 'error': e}
+ else:
+ # success!
+ node['power_state'] = target_state
+ LOG.info(_LI('Successfully set node %(node)s power state to '
+ '%(state)s.'),
+ {'node': node.uuid, 'state': target_state})
+ finally:
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+
+
+@task_manager.require_exclusive_lock
+def cleanup_after_timeout(task):
+ """Cleanup deploy task after timeout.
+
+ :param task: a TaskManager instance.
+ """
+ node = task.node
+ msg = (_('Timeout reached while waiting for callback for node %s')
+ % node.uuid)
+ node.last_error = msg
+ LOG.error(msg)
+ node.save()
+
+ error_msg = _('Cleanup failed for node %(node)s after deploy timeout: '
+ ' %(error)s')
+ try:
+ task.driver.deploy.clean_up(task)
+ except exception.IotronicException as e:
+ msg = error_msg % {'node': node.uuid, 'error': e}
+ LOG.error(msg)
+ node.last_error = msg
+ node.save()
+ except Exception as e:
+ msg = error_msg % {'node': node.uuid, 'error': e}
+ LOG.error(msg)
+ node.last_error = _('Deploy timed out, but an unhandled exception was '
+ 'encountered while aborting. More info may be '
+ 'found in the log file.')
+ node.save()
diff --git a/iotronic/conductor/manager.py b/iotronic/conductor/manager.py
new file mode 100644
index 0000000..0dc4d7b
--- /dev/null
+++ b/iotronic/conductor/manager.py
@@ -0,0 +1,2269 @@
+# coding=utf-8
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 International Business Machines Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Conduct all activity related to bare-metal deployments.
+
+A single instance of :py:class:`iotronic.iotconductor.manager.ConductorManager` is
+created within the *iotronic-conductor* process, and is responsible for
+performing all actions on bare metal resources (Chassis, Nodes, and Ports).
+Commands are received via RPCs. The conductor service also performs periodic
+tasks, eg. to monitor the status of active deployments.
+
+Drivers are loaded via entrypoints by the
+:py:class:`iotronic.common.driver_factory` class. Each driver is instantiated
+only once, when the ConductorManager service starts. In this way, a single
+ConductorManager may use multiple drivers, and manage heterogeneous hardware.
+
+When multiple :py:class:`ConductorManager` are run on different hosts, they are
+all active and cooperatively manage all nodes in the deployment. Nodes are
+locked by each conductor when performing actions which change the state of that
+node; these locks are represented by the
+:py:class:`iotronic.iotconductor.task_manager.TaskManager` class.
+
+A :py:class:`iotronic.common.hash_ring.HashRing` is used to distribute nodes
+across the set of active conductors which support each node's driver.
+Rebalancing this ring can trigger various actions by each conductor, such as
+building or tearing down the TFTP environment for a node, notifying Neutron of
+a change, etc.
+"""
+'''
+import collections
+import datetime
+import inspect
+import tempfile
+
+
+from iotronic.common import dhcp_factory
+
+
+from iotronic.common.glance_service import service_utils as glance_utils
+
+from iotronic.common import images
+from iotronic.common import rpc
+from iotronic.common import states
+from iotronic.common import swift
+from iotronic.iotconductor import task_manager
+from iotronic.iotconductor import utils
+
+from iotronic import objects
+from iotronic.openstack.common import periodic_task
+'''
+
+import threading
+import eventlet
+from eventlet import greenpool
+
+from iotronic.db import api as dbapi
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_concurrency import lockutils
+from oslo_config import cfg
+from oslo_db import exception as db_exception
+from oslo_log import log
+import oslo_messaging as messaging
+from oslo_utils import excutils
+from oslo_utils import uuidutils
+from iotronic.conductor import utils
+
+from iotronic.common import hash_ring as hash
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LC
+from iotronic.common.i18n import _LE
+from iotronic.common.i18n import _LI
+from iotronic.common.i18n import _LW
+#from iotronic.common import driver_factory
+
+from iotronic.conductor import task_manager
+from iotronic.common import states
+from iotronic.openstack.common import periodic_task
+from iotronic.common import exception
+
+MANAGER_TOPIC = 'iotronic.conductor_manager'
+WORKER_SPAWN_lOCK = "conductor_worker_spawn"
+
+LOG = log.getLogger(__name__)
+
+conductor_opts = [
+ cfg.StrOpt('api_url',
+ help=('URL of Iotronic API service. If not set iotronic can '
+ 'get the current value from the keystone service '
+ 'catalog.')),
+ cfg.IntOpt('heartbeat_interval',
+ default=10,
+ help='Seconds between conductor heart beats.'),
+ cfg.IntOpt('heartbeat_timeout',
+ default=60,
+ help='Maximum time (in seconds) since the last check-in '
+ 'of a conductor. A conductor is considered inactive '
+ 'when this time has been exceeded.'),
+ cfg.IntOpt('sync_power_state_interval',
+ default=60,
+ help='Interval between syncing the node power state to the '
+ 'database, in seconds.'),
+ cfg.IntOpt('check_provision_state_interval',
+ default=60,
+ help='Interval between checks of provision timeouts, '
+ 'in seconds.'),
+ cfg.IntOpt('deploy_callback_timeout',
+ default=1800,
+ help='Timeout (seconds) to wait for a callback from '
+ 'a deploy ramdisk. Set to 0 to disable timeout.'),
+ cfg.BoolOpt('force_power_state_during_sync',
+ default=True,
+ help='During sync_power_state, should the hardware power '
+ 'state be set to the state recorded in the database '
+ '(True) or should the database be updated based on '
+ 'the hardware state (False).'),
+ cfg.IntOpt('power_state_sync_max_retries',
+ default=3,
+ help='During sync_power_state failures, limit the '
+ 'number of times Iotronic should try syncing the '
+ 'hardware node power state with the node power state '
+ 'in DB'),
+ cfg.IntOpt('periodic_max_workers',
+ default=8,
+ help='Maximum number of worker threads that can be started '
+ 'simultaneously by a periodic task. Should be less '
+ 'than RPC thread pool size.'),
+ cfg.IntOpt('workers_pool_size',
+ default=100,
+ help='The size of the workers greenthread pool.'),
+ cfg.IntOpt('node_locked_retry_attempts',
+ default=3,
+ help='Number of attempts to grab a node lock.'),
+ cfg.IntOpt('node_locked_retry_interval',
+ default=1,
+ help='Seconds to sleep between node lock attempts.'),
+ cfg.BoolOpt('send_sensor_data',
+ default=False,
+ help='Enable sending sensor data message via the '
+ 'notification bus'),
+ cfg.IntOpt('send_sensor_data_interval',
+ default=600,
+ help='Seconds between conductor sending sensor data message'
+ ' to ceilometer via the notification bus.'),
+ cfg.ListOpt('send_sensor_data_types',
+ default=['ALL'],
+ help='List of comma separated meter types which need to be'
+ ' sent to Ceilometer. The default value, "ALL", is a '
+ 'special value meaning send all the sensor data.'),
+ cfg.IntOpt('sync_local_state_interval',
+ default=180,
+ help='When conductors join or leave the cluster, existing '
+ 'conductors may need to update any persistent '
+ 'local state as nodes are moved around the cluster. '
+ 'This option controls how often, in seconds, each '
+ 'conductor will check for nodes that it should '
+ '"take over". Set it to a negative value to disable '
+ 'the check entirely.'),
+ cfg.BoolOpt('configdrive_use_swift',
+ default=False,
+ help='Whether to upload the config drive to Swift.'),
+ cfg.StrOpt('configdrive_swift_container',
+ default='iotronic_configdrive_container',
+ help='Name of the Swift container to store config drive '
+ 'data. Used when configdrive_use_swift is True.'),
+ cfg.IntOpt('inspect_timeout',
+ default=1800,
+ help='Timeout (seconds) for waiting for node inspection. '
+ '0 - unlimited.'),
+ cfg.BoolOpt('clean_nodes',
+ default=True,
+ help='Cleaning is a configurable set of steps, such as '
+ 'erasing disk drives, that are performed on the node '
+ 'to ensure it is in a baseline state and ready to be '
+ 'deployed to. '
+ 'This is done after instance deletion, and during '
+ 'the transition from a "managed" to "available" '
+ 'state. When enabled, the particular steps '
+ 'performed to clean a node depend on which driver '
+ 'that node is managed by; see the individual '
+ 'driver\'s documentation for details. '
+ 'NOTE: The introduction of the cleaning operation '
+ 'causes instance deletion to take significantly '
+ 'longer. In an environment where all tenants are '
+ 'trusted (eg, because there is only one tenant), '
+ 'this option could be safely disabled.'),
+
+ #################### NEW
+
+ cfg.IntOpt('board_locked_retry_attempts',
+ default=3,
+ help='Number of attempts to grab a node lock.'),
+ cfg.IntOpt('board_locked_retry_interval',
+ default=1,
+ help='Seconds to sleep between node lock attempts.'),
+]
+CONF = cfg.CONF
+CONF.register_opts(conductor_opts, 'conductor')
+
+CLEANING_INTERFACE_PRIORITY = {
+ # When two clean steps have the same priority, their order is determined
+ # by which interface is implementing the clean step. The clean step of the
+ # interface with the highest value here, will be executed first in that
+ # case.
+ 'power': 3,
+ 'management': 2,
+ 'deploy': 1
+}
+
+
+def get_vendor_passthru_metadata(route_dict):
+ d = {}
+ for method, metadata in route_dict.items():
+ # 'func' is the vendor method reference, ignore it
+ d[method] = {k: metadata[k] for k in metadata if k != 'func'}
+ return d
+
+
+def power_state_error_handler(e, node, power_state):
+ """Set the node's power states if error occurs.
+
+ This hook gets called upon an execption being raised when spawning
+ the worker thread to change the power state of a node.
+
+ :param e: the exception object that was raised.
+ :param node: an Iotronic node object.
+ :param power_state: the power state to set on the node.
+
+ """
+ if isinstance(e, exception.NoFreeConductorWorker):
+ node.power_state = power_state
+ node.target_power_state = states.NOSTATE
+ node.last_error = (_("No free conductor workers available"))
+ node.save()
+ LOG.warning(_LW("No free conductor workers available to perform "
+ "an action on node %(node)s, setting node's "
+ "power state back to %(power_state)s."),
+ {'node': node.uuid, 'power_state': power_state})
+
+
+def provisioning_error_handler(e, node, provision_state,
+ target_provision_state):
+ """Set the node's provisioning states if error occurs.
+
+ This hook gets called upon an exception being raised when spawning
+ the worker to do the deployment or tear down of a node.
+
+ :param e: the exception object that was raised.
+ :param node: an Iotronic node object.
+ :param provision_state: the provision state to be set on
+ the node.
+ :param target_provision_state: the target provision state to be
+ set on the node.
+
+ """
+ if isinstance(e, exception.NoFreeConductorWorker):
+ # NOTE(deva): there is no need to clear conductor_affinity
+ # because it isn't updated on a failed deploy
+ node.provision_state = provision_state
+ node.target_provision_state = target_provision_state
+ node.last_error = (_("No free conductor workers available"))
+ node.save()
+ LOG.warning(_LW("No free conductor workers available to perform "
+ "an action on node %(node)s, setting node's "
+ "provision_state back to %(prov_state)s and "
+ "target_provision_state to %(tgt_prov_state)s."),
+ {'node': node.uuid, 'prov_state': provision_state,
+ 'tgt_prov_state': target_provision_state})
+
+
+def _get_configdrive_obj_name(node):
+ """Generate the object name for the config drive."""
+ return 'configdrive-%s' % node.uuid
+
+
+def _store_configdrive(node, configdrive):
+ """Handle the storage of the config drive.
+
+ If configured, the config drive data are uploaded to Swift. The Node's
+ instance_info is updated to include either the temporary Swift URL
+ from the upload, or if no upload, the actual config drive data.
+
+ :param node: an Iotronic node object.
+ :param configdrive: A gzipped and base64 encoded configdrive.
+ :raises: SwiftOperationError if an error occur when uploading the
+ config drive to Swift.
+
+ """
+ if CONF.conductor.configdrive_use_swift:
+ # NOTE(lucasagomes): No reason to use a different timeout than
+ # the one used for deploying the node
+ timeout = CONF.conductor.deploy_callback_timeout
+ container = CONF.conductor.configdrive_swift_container
+ object_name = _get_configdrive_obj_name(node)
+
+ object_headers = {'X-Delete-After': timeout}
+
+ with tempfile.NamedTemporaryFile() as fileobj:
+ fileobj.write(configdrive)
+ fileobj.flush()
+
+ swift_api = swift.SwiftAPI()
+ swift_api.create_object(container, object_name, fileobj.name,
+ object_headers=object_headers)
+ configdrive = swift_api.get_temp_url(container, object_name,
+ timeout)
+
+ i_info = node.instance_info
+ i_info['configdrive'] = configdrive
+ node.instance_info = i_info
+
+
+def do_node_deploy(task, conductor_id, configdrive=None):
+ """Prepare the environment and deploy a node."""
+ node = task.node
+
+ def handle_failure(e, task, logmsg, errmsg):
+ # NOTE(deva): there is no need to clear conductor_affinity
+ task.process_event('fail')
+ args = {'node': task.node.uuid, 'err': e}
+ LOG.warning(logmsg, args)
+ node.last_error = errmsg % e
+
+ try:
+ try:
+ if configdrive:
+ _store_configdrive(node, configdrive)
+ except exception.SwiftOperationError as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error while uploading the configdrive for '
+ '%(node)s to Swift'),
+ _('Failed to upload the configdrive to Swift. '
+ 'Error: %s'))
+
+ try:
+ task.driver.deploy.prepare(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error while preparing to deploy to node %(node)s: '
+ '%(err)s'),
+ _("Failed to prepare to deploy. Error: %s"))
+
+ try:
+ new_state = task.driver.deploy.deploy(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ handle_failure(
+ e, task,
+ _LW('Error in deploy of node %(node)s: %(err)s'),
+ _("Failed to deploy. Error: %s"))
+
+ # Update conductor_affinity to reference this conductor's ID
+ # since there may be local persistent state
+ node.conductor_affinity = conductor_id
+
+ # NOTE(deva): Some drivers may return states.DEPLOYWAIT
+ # eg. if they are waiting for a callback
+ if new_state == states.DEPLOYDONE:
+ task.process_event('done')
+ LOG.info(_LI('Successfully deployed node %(node)s with '
+ 'instance %(instance)s.'),
+ {'node': node.uuid, 'instance': node.instance_uuid})
+ elif new_state == states.DEPLOYWAIT:
+ task.process_event('wait')
+ else:
+ LOG.error(_LE('Unexpected state %(state)s returned while '
+ 'deploying node %(node)s.'),
+ {'state': new_state, 'node': node.uuid})
+ finally:
+ node.save()
+
+
+def handle_sync_power_state_max_retries_exceeded(task,
+ actual_power_state):
+ """Handles power state sync exceeding the max retries.
+
+ When synchronizing the power state between a node and the DB has exceeded
+ the maximum number of retries, change the DB power state to be the actual
+ node power state and place the node in maintenance.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param actual_power_state: the actual power state of the node; a power
+ state from iotronic.common.states
+ """
+ node = task.node
+ msg = (_("During sync_power_state, max retries exceeded "
+ "for node %(node)s, node state %(actual)s "
+ "does not match expected state '%(state)s'. "
+ "Updating DB state to '%(actual)s' "
+ "Switching node to maintenance mode.") %
+ {'node': node.uuid, 'actual': actual_power_state,
+ 'state': node.power_state})
+ node.power_state = actual_power_state
+ node.last_error = msg
+ node.maintenance = True
+ node.maintenance_reason = msg
+ node.save()
+ LOG.error(msg)
+
+
+def do_sync_power_state(task, count):
+ """Sync the power state for this node, incrementing the counter on failure.
+
+ When the limit of power_state_sync_max_retries is reached, the node is put
+ into maintenance mode and the error recorded.
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param count: number of times this node has previously failed a sync
+ :returns: Count of failed attempts.
+ On success, the counter is set to 0.
+ On failure, the count is incremented by one
+ """
+ node = task.node
+ power_state = None
+ count += 1
+
+ max_retries = CONF.conductor.power_state_sync_max_retries
+ # If power driver info can not be validated, and node has no prior state,
+ # do not attempt to sync the node's power state.
+ if node.power_state is None:
+ try:
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue):
+ return 0
+
+ try:
+ # The driver may raise an exception, or may return ERROR.
+ # Handle both the same way.
+ power_state = task.driver.power.get_power_state(task)
+ if power_state == states.ERROR:
+ raise exception.PowerStateFailure(
+ _("Power driver returned ERROR state "
+ "while trying to sync power state."))
+ except Exception as e:
+ # Stop if any exception is raised when getting the power state
+ if count > max_retries:
+ handle_sync_power_state_max_retries_exceeded(task, power_state)
+ else:
+ LOG.warning(_LW("During sync_power_state, could not get power "
+ "state for node %(node)s, attempt %(attempt)s of "
+ "%(retries)s. Error: %(err)s."),
+ {'node': node.uuid, 'attempt': count,
+ 'retries': max_retries, 'err': e})
+ return count
+ else:
+ # If node has no prior state AND we successfully got a state,
+ # simply record that.
+ if node.power_state is None:
+ LOG.info(_LI("During sync_power_state, node %(node)s has no "
+ "previous known state. Recording current state "
+ "'%(state)s'."),
+ {'node': node.uuid, 'state': power_state})
+ node.power_state = power_state
+ node.save()
+ return 0
+
+ # If the node is now in the expected state, reset the counter
+ # otherwise, if we've exceeded the retry limit, stop here
+ if node.power_state == power_state:
+ return 0
+ else:
+ if count > max_retries:
+ handle_sync_power_state_max_retries_exceeded(task, power_state)
+ return count
+
+ if CONF.conductor.force_power_state_during_sync:
+ LOG.warning(_LW("During sync_power_state, node %(node)s state "
+ "'%(actual)s' does not match expected state. "
+ "Changing hardware state to '%(state)s'."),
+ {'node': node.uuid, 'actual': power_state,
+ 'state': node.power_state})
+ try:
+ # node_power_action will update the node record
+ # so don't do that again here.
+ utils.node_power_action(task, node.power_state)
+ except Exception as e:
+ LOG.error(_LE(
+ "Failed to change power state of node %(node)s "
+ "to '%(state)s', attempt %(attempt)s of %(retries)s."),
+ {'node': node.uuid,
+ 'state': node.power_state,
+ 'attempt': count,
+ 'retries': max_retries})
+ else:
+ LOG.warning(_LW("During sync_power_state, node %(node)s state "
+ "does not match expected state '%(state)s'. "
+ "Updating recorded state to '%(actual)s'."),
+ {'node': node.uuid, 'actual': power_state,
+ 'state': node.power_state})
+ node.power_state = power_state
+ node.save()
+
+ return count
+
+
+def _do_inspect_hardware(task):
+ """Initiates inspection.
+
+ :param: task: a TaskManager instance with an exclusive lock
+ on its node.
+ :raises: HardwareInspectionFailure if driver doesn't
+ return the state as states.MANAGEABLE or
+ states.INSPECTING.
+
+ """
+ node = task.node
+
+ def handle_failure(e):
+ node.last_error = e
+ task.process_event('fail')
+ LOG.error(_LE("Failed to inspect node %(node)s: %(err)s"),
+ {'node': node.uuid, 'err': e})
+
+ try:
+ new_state = task.driver.inspect.inspect_hardware(task)
+
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ error = str(e)
+ handle_failure(error)
+
+ if new_state == states.MANAGEABLE:
+ task.process_event('done')
+ LOG.info(_LI('Successfully inspected node %(node)s')
+ % {'node': node.uuid})
+ elif new_state != states.INSPECTING:
+ error = (_("During inspection, driver returned unexpected "
+ "state %(state)s") % {'state': new_state})
+ handle_failure(error)
+ raise exception.HardwareInspectionFailure(error=error)
+
+
+def cleaning_error_handler(task, msg, tear_down_cleaning=True):
+ """Put a failed node in CLEANFAIL or ZAPFAIL and maintenance."""
+ # Reset clean step, msg should include current step
+ if task.node.provision_state == states.CLEANING:
+ task.node.clean_step = {}
+ task.node.last_error = msg
+ task.node.maintenance = True
+ task.node.maintenance_reason = msg
+ task.node.save()
+ if tear_down_cleaning:
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ msg = (_LE('Failed to tear down cleaning on node %(uuid)s, '
+ 'reason: %(err)s'), {'err': e, 'uuid': task.node.uuid})
+ LOG.exception(msg)
+
+ task.process_event('fail')
+
+
+def _step_key(step):
+ """Sort by priority, then interface priority in event of tie.
+
+ :param step: cleaning step dict to get priority for.
+ """
+ return (step.get('priority'),
+ CLEANING_INTERFACE_PRIORITY[step.get('interface')])
+
+
+def _get_cleaning_steps(task, enabled=False):
+ """Get sorted cleaning steps for task.node
+
+ :param task: A TaskManager object
+ :param enabled: If True, returns only enabled (priority > 0) steps. If
+ False, returns all clean steps.
+ :returns: A list of clean steps dictionaries, sorted with largest priority
+ as the first item
+ """
+ # Iterate interfaces and get clean steps from each
+ steps = list()
+ for interface in CLEANING_INTERFACE_PRIORITY:
+ interface = getattr(task.driver, interface)
+ if interface:
+ interface_steps = [x for x in interface.get_clean_steps(task)
+ if not enabled or x['priority'] > 0]
+ steps.extend(interface_steps)
+ # Sort the steps from higher priority to lower priority
+ return sorted(steps, key=_step_key, reverse=True)
+
+
+def set_node_cleaning_steps(task):
+ """Get the list of clean steps, save them to the node."""
+ # Get the prioritized steps, store them.
+ node = task.node
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_steps'] = _get_cleaning_steps(task,
+ enabled=True)
+ node.driver_internal_info = driver_internal_info
+ node.clean_step = {}
+ node.save()
+
+
+
+##################### NEW
+
+
+class ConductorManager(periodic_task.PeriodicTasks):
+ """Iotronic Conductor manager main class."""
+
+ # NOTE(rloo): This must be in sync with rpcapi.ConductorAPI's.
+ RPC_API_VERSION = '1.0'
+
+ target = messaging.Target(version=RPC_API_VERSION)
+
+ def __init__(self, host, topic):
+ super(ConductorManager, self).__init__()
+ if not host:
+ host = CONF.host
+ self.host = host
+ self.topic = topic
+ #self.power_state_sync_count = collections.defaultdict(int)
+ #self.notifier = rpc.get_notifier()
+ '''
+ def _get_driver(self, driver_name):
+ """Get the driver.
+
+ :param driver_name: name of the driver.
+ :returns: the driver; an instance of a class which implements
+ :class:`iotronic.drivers.base.BaseDriver`.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ try:
+ return self._driver_factory[driver_name].obj
+ except KeyError:
+ raise exception.DriverNotFound(driver_name=driver_name)
+ '''
+ def init_host(self):
+ self.dbapi = dbapi.get_instance()
+
+ self._keepalive_evt = threading.Event()
+ """Event for the keepalive thread."""
+
+ self._worker_pool = greenpool.GreenPool(
+ size=CONF.conductor.workers_pool_size)
+ """GreenPool of background workers for performing tasks async."""
+
+ self.ring_manager = hash.HashRingManager()
+ """Consistent hash ring which maps drivers to conductors."""
+
+ # NOTE(deva): instantiating DriverFactory may raise DriverLoadError
+ # or DriverNotFound
+ #self._driver_factory = driver_factory.DriverFactory()
+ #"""Driver factory loads all enabled drivers."""
+
+ #self.drivers = self._driver_factory.names
+ """List of driver names which this conductor supports."""
+ '''
+ if not self.drivers:
+ msg = _LE("Conductor %s cannot be started because no drivers "
+ "were loaded. This could be because no drivers were "
+ "specified in 'enabled_drivers' config option.")
+ LOG.error(msg, self.host)
+ raise exception.NoDriversLoaded(conductor=self.host)
+
+ # Collect driver-specific periodic tasks
+ for driver_obj in driver_factory.drivers().values():
+ self._collect_periodic_tasks(driver_obj)
+ for iface_name in (driver_obj.core_interfaces +
+ driver_obj.standard_interfaces +
+ ['vendor']):
+ iface = getattr(driver_obj, iface_name, None)
+ if iface:
+ self._collect_periodic_tasks(iface)
+ '''
+ # clear all locks held by this conductor before registering
+ self.dbapi.clear_node_reservations_for_conductor(self.host)
+ try:
+ # Register this conductor with the cluster
+ cdr = self.dbapi.register_conductor({'hostname': self.host,'drivers': ['fake']})
+ except exception.ConductorAlreadyRegistered:
+ # This conductor was already registered and did not shut down
+ # properly, so log a warning and update the record.
+ LOG.warn(_LW("A conductor with hostname %(hostname)s "
+ "was previously registered. Updating registration"),
+ {'hostname': self.host})
+ cdr = self.dbapi.register_conductor({'hostname': self.host,
+ 'drivers': self.drivers},
+ update_existing=True)
+ self.conductor = cdr
+
+ # Spawn a dedicated greenthread for the keepalive
+ try:
+ self._spawn_worker(self._conductor_service_record_keepalive)
+ LOG.info(_LI('Successfully started conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ except exception.NoFreeConductorWorker:
+ with excutils.save_and_reraise_exception():
+ LOG.critical(_LC('Failed to start keepalive'))
+ self.del_host()
+
+ def _collect_periodic_tasks(self, obj):
+ for n, method in inspect.getmembers(obj, inspect.ismethod):
+ if getattr(method, '_periodic_enabled', False):
+ self.add_periodic_task(method)
+
+ def del_host(self, deregister=True):
+ self._keepalive_evt.set()
+ if deregister:
+ try:
+ # Inform the cluster that this conductor is shutting down.
+ # Note that rebalancing will not occur immediately, but when
+ # the periodic sync takes place.
+ self.dbapi.unregister_conductor(self.host)
+ LOG.info(_LI('Successfully stopped conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ except exception.ConductorNotFound:
+ pass
+ else:
+ LOG.info(_LI('Not deregistering conductor with hostname '
+ '%(hostname)s.'),
+ {'hostname': self.host})
+ # Waiting here to give workers the chance to finish. This has the
+ # benefit of releasing locks workers placed on nodes, as well as
+ # having work complete normally.
+ self._worker_pool.waitall()
+
+ def periodic_tasks(self, context, raise_on_error=False):
+ """Periodic tasks are run at pre-specified interval."""
+ return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
+
+ @lockutils.synchronized(WORKER_SPAWN_lOCK, 'iotronic-')
+ def _spawn_worker(self, func, *args, **kwargs):
+
+ """Create a greenthread to run func(*args, **kwargs).
+
+ Spawns a greenthread if there are free slots in pool, otherwise raises
+ exception. Execution control returns immediately to the caller.
+
+ :returns: GreenThread object.
+ :raises: NoFreeConductorWorker if worker pool is currently full.
+
+ """
+ if self._worker_pool.free():
+ return self._worker_pool.spawn(func, *args, **kwargs)
+ else:
+ raise exception.NoFreeConductorWorker()
+
+ def _conductor_service_record_keepalive(self):
+ while not self._keepalive_evt.is_set():
+ try:
+ self.dbapi.touch_conductor(self.host)
+ except db_exception.DBConnectionError:
+ LOG.warning(_LW('Conductor could not connect to database '
+ 'while heartbeating.'))
+ self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
+
+ @messaging.expected_exceptions(exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.NodeLocked)
+ def update_node(self, context, node_obj):
+ """Update a node with the supplied data.
+
+ This method is the main "hub" for PUT and PATCH requests in the API.
+ It ensures that the requested change is safe to perform,
+ validates the parameters with the node's driver, if necessary.
+
+ :param context: an admin context
+ :param node_obj: a changed (but not saved) node object.
+
+ """
+ node_id = node_obj.uuid
+ LOG.debug("RPC update_node called for node %s." % node_id)
+
+ # NOTE(jroll) clear maintenance_reason if node.update sets
+ # maintenance to False for backwards compatibility, for tools
+ # not using the maintenance endpoint.
+ delta = node_obj.obj_what_changed()
+ if 'maintenance' in delta and not node_obj.maintenance:
+ node_obj.maintenance_reason = None
+
+ driver_name = node_obj.driver if 'driver' in delta else None
+ with task_manager.acquire(context, node_id, shared=False,
+ driver_name=driver_name):
+ node_obj.save()
+
+ return node_obj
+
+ @messaging.expected_exceptions(exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.NoFreeConductorWorker,
+ exception.NodeLocked)
+ def change_node_power_state(self, context, node_id, new_state):
+ """RPC method to encapsulate changes to a node's state.
+
+ Perform actions such as power on, power off. The validation is
+ performed synchronously, and if successful, the power action is
+ updated in the background (asynchronously). Once the power action
+ is finished and successful, it updates the power_state for the
+ node with the new power state.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param new_state: the desired power state of the node.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+
+ """
+ LOG.debug("RPC change_node_power_state called for node %(node)s. "
+ "The desired new state is %(state)s."
+ % {'node': node_id, 'state': new_state})
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ task.driver.power.validate(task)
+ # Set the target_power_state and clear any last_error, since we're
+ # starting a new operation. This will expose to other processes
+ # and clients that work is in progress.
+ if new_state == states.REBOOT:
+ task.node.target_power_state = states.POWER_ON
+ else:
+ task.node.target_power_state = new_state
+ task.node.last_error = None
+ task.node.save()
+ task.set_spawn_error_hook(power_state_error_handler,
+ task.node, task.node.power_state)
+ task.spawn_after(self._spawn_worker, utils.node_power_action,
+ task, new_state)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InvalidParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.MissingParameterValue)
+ def vendor_passthru(self, context, node_id, driver_method,
+ http_method, info):
+ """RPC method to encapsulate vendor action.
+
+ Synchronously validate driver specific info or get driver status,
+ and if successful invokes the vendor method. If the method mode
+ is 'async' the conductor will start background worker to perform
+ vendor action.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param driver_method: the name of the vendor method.
+ :param http_method: the HTTP method used for the request.
+ :param info: vendor method args.
+ :raises: InvalidParameterValue if supplied info is not valid.
+ :raises: MissingParameterValue if missing supplied info
+ :raises: UnsupportedDriverExtension if current driver does not have
+ vendor interface or method is unsupported.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: NodeLocked if node is locked by another conductor.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+ """
+ LOG.debug("RPC vendor_passthru called for node %s." % node_id)
+ # NOTE(max_lobur): Even though not all vendor_passthru calls may
+ # require an exclusive lock, we need to do so to guarantee that the
+ # state doesn't unexpectedly change between doing a vendor.validate
+ # and vendor.vendor_passthru.
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if not getattr(task.driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver,
+ extension='vendor interface')
+
+ vendor_iface = task.driver.vendor
+
+ # NOTE(lucasagomes): Before the vendor_passthru() method was
+ # a self-contained method and each driver implemented their own
+ # version of it, now we have a common mechanism that drivers
+ # should use to expose their vendor methods. If a driver still
+ # have their own vendor_passthru() method we call it to be
+ # backward compat. This code should be removed once L opens.
+ if hasattr(vendor_iface, 'vendor_passthru'):
+ LOG.warning(_LW("Drivers implementing their own version "
+ "of vendor_passthru() has been deprecated. "
+ "Please update the code to use the "
+ "@passthru decorator."))
+ vendor_iface.validate(task, method=driver_method,
+ **info)
+ task.spawn_after(self._spawn_worker,
+ vendor_iface.vendor_passthru, task,
+ method=driver_method, **info)
+ # NodeVendorPassthru was always async
+ return (None, True)
+
+ try:
+ vendor_opts = vendor_iface.vendor_routes[driver_method]
+ vendor_func = vendor_opts['func']
+ except KeyError:
+ raise exception.InvalidParameterValue(
+ _('No handler for method %s') % driver_method)
+
+ http_method = http_method.upper()
+ if http_method not in vendor_opts['http_methods']:
+ raise exception.InvalidParameterValue(
+ _('The method %(method)s does not support HTTP %(http)s') %
+ {'method': driver_method, 'http': http_method})
+
+ vendor_iface.validate(task, method=driver_method,
+ http_method=http_method, **info)
+
+ # Inform the vendor method which HTTP method it was invoked with
+ info['http_method'] = http_method
+
+ # Invoke the vendor method accordingly with the mode
+ is_async = vendor_opts['async']
+ ret = None
+ if is_async:
+ task.spawn_after(self._spawn_worker, vendor_func, task, **info)
+ else:
+ ret = vendor_func(task, **info)
+
+ return (ret, is_async)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.DriverNotFound)
+ def driver_vendor_passthru(self, context, driver_name, driver_method,
+ http_method, info):
+ """Handle top-level vendor actions.
+
+ RPC method which handles driver-level vendor passthru calls. These
+ calls don't require a node UUID and are executed on a random
+ conductor with the specified driver. If the method mode is
+ async the conductor will start background worker to perform
+ vendor action.
+
+ :param context: an admin context.
+ :param driver_name: name of the driver on which to call the method.
+ :param driver_method: name of the vendor method, for use by the driver.
+ :param http_method: the HTTP method used for the request.
+ :param info: user-supplied data to pass through to the driver.
+ :raises: MissingParameterValue if missing supplied info
+ :raises: InvalidParameterValue if supplied info is not valid.
+ :raises: UnsupportedDriverExtension if current driver does not have
+ vendor interface, if the vendor interface does not implement
+ driver-level vendor passthru or if the passthru method is
+ unsupported.
+ :raises: DriverNotFound if the supplied driver is not loaded.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+ """
+ # Any locking in a top-level vendor action will need to be done by the
+ # implementation, as there is little we could reasonably lock on here.
+ LOG.debug("RPC driver_vendor_passthru for driver %s." % driver_name)
+ driver = self._get_driver(driver_name)
+ if not getattr(driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=driver_name,
+ extension='vendor interface')
+
+ # NOTE(lucasagomes): Before the driver_vendor_passthru()
+ # method was a self-contained method and each driver implemented
+ # their own version of it, now we have a common mechanism that
+ # drivers should use to expose their vendor methods. If a driver
+ # still have their own driver_vendor_passthru() method we call
+ # it to be backward compat. This code should be removed
+ # once L opens.
+ if hasattr(driver.vendor, 'driver_vendor_passthru'):
+ LOG.warning(_LW("Drivers implementing their own version "
+ "of driver_vendor_passthru() has been "
+ "deprecated. Please update the code to use "
+ "the @driver_passthru decorator."))
+
+ driver.vendor.driver_validate(method=driver_method, **info)
+ ret = driver.vendor.driver_vendor_passthru(
+ context, method=driver_method, **info)
+ # DriverVendorPassthru was always sync
+ return (ret, False)
+
+ try:
+ vendor_opts = driver.vendor.driver_routes[driver_method]
+ vendor_func = vendor_opts['func']
+ except KeyError:
+ raise exception.InvalidParameterValue(
+ _('No handler for method %s') % driver_method)
+
+ http_method = http_method.upper()
+ if http_method not in vendor_opts['http_methods']:
+ raise exception.InvalidParameterValue(
+ _('The method %(method)s does not support HTTP %(http)s') %
+ {'method': driver_method, 'http': http_method})
+
+ # Inform the vendor method which HTTP method it was invoked with
+ info['http_method'] = http_method
+
+ # Invoke the vendor method accordingly with the mode
+ is_async = vendor_opts['async']
+ ret = None
+ driver.vendor.driver_validate(method=driver_method, **info)
+
+ if is_async:
+ self._spawn_worker(vendor_func, context, **info)
+ else:
+ ret = vendor_func(context, **info)
+
+ return (ret, is_async)
+
+ @messaging.expected_exceptions(exception.UnsupportedDriverExtension)
+ def get_node_vendor_passthru_methods(self, context, node_id):
+ """Retrieve information about vendor methods of the given node.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :returns: dictionary of : entries.
+
+ """
+ LOG.debug("RPC get_node_vendor_passthru_methods called for node %s"
+ % node_id)
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ if not getattr(task.driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver,
+ extension='vendor interface')
+
+ return get_vendor_passthru_metadata(
+ task.driver.vendor.vendor_routes)
+
+ @messaging.expected_exceptions(exception.UnsupportedDriverExtension,
+ exception.DriverNotFound)
+ def get_driver_vendor_passthru_methods(self, context, driver_name):
+ """Retrieve information about vendor methods of the given driver.
+
+ :param context: an admin context.
+ :param driver_name: name of the driver.
+ :returns: dictionary of : entries.
+
+ """
+ # Any locking in a top-level vendor action will need to be done by the
+ # implementation, as there is little we could reasonably lock on here.
+ LOG.debug("RPC get_driver_vendor_passthru_methods for driver %s"
+ % driver_name)
+ driver = self._get_driver(driver_name)
+ if not getattr(driver, 'vendor', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=driver_name,
+ extension='vendor interface')
+
+ return get_vendor_passthru_metadata(driver.vendor.driver_routes)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.NodeInMaintenance,
+ exception.InstanceDeployFailure,
+ exception.InvalidStateRequested)
+ def do_node_deploy(self, context, node_id, rebuild=False,
+ configdrive=None):
+ """RPC method to initiate deployment to a node.
+
+ Initiate the deployment of a node. Validations are done
+ synchronously and the actual deploy work is performed in
+ background (asynchronously).
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param rebuild: True if this is a rebuild request. A rebuild will
+ recreate the instance on the same node, overwriting
+ all disk. The ephemeral partition, if it exists, can
+ optionally be preserved.
+ :param configdrive: Optional. A gzipped and base64 encoded configdrive.
+ :raises: InstanceDeployFailure
+ :raises: NodeInMaintenance if the node is in maintenance mode.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: InvalidStateRequested when the requested state is not a valid
+ target from the current state.
+
+ """
+ LOG.debug("RPC do_node_deploy called for node %s." % node_id)
+
+ # NOTE(comstud): If the _sync_power_states() periodic task happens
+ # to have locked this node, we'll fail to acquire the lock. The
+ # client should perhaps retry in this case unless we decide we
+ # want to add retries or extra synchronization here.
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ node = task.node
+ if node.maintenance:
+ raise exception.NodeInMaintenance(op=_('provisioning'),
+ node=node.uuid)
+
+ if rebuild:
+ event = 'rebuild'
+
+ # Note(gilliard) Clear these to force the driver to
+ # check whether they have been changed in glance
+ # NOTE(vdrok): If image_source is not from Glance we should
+ # not clear kernel and ramdisk as they're input manually
+ if glance_utils.is_glance_image(
+ node.instance_info.get('image_source')):
+ instance_info = node.instance_info
+ instance_info.pop('kernel', None)
+ instance_info.pop('ramdisk', None)
+ node.instance_info = instance_info
+ else:
+ event = 'deploy'
+
+ driver_internal_info = node.driver_internal_info
+ # Infer the image type to make sure the deploy driver
+ # validates only the necessary variables for different
+ # image types.
+ # NOTE(sirushtim): The iwdi variable can be None. It's up to
+ # the deploy driver to validate this.
+ iwdi = images.is_whole_disk_image(context, node.instance_info)
+ driver_internal_info['is_whole_disk_image'] = iwdi
+ node.driver_internal_info = driver_internal_info
+ node.save()
+
+ try:
+ task.driver.power.validate(task)
+ task.driver.deploy.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ raise exception.InstanceDeployFailure(_(
+ "RPC do_node_deploy failed to validate deploy or "
+ "power info. Error: %(msg)s") % {'msg': e})
+
+ LOG.debug("do_node_deploy Calling event: %(event)s for node: "
+ "%(node)s", {'event': event, 'node': node.uuid})
+ try:
+ task.process_event(event,
+ callback=self._spawn_worker,
+ call_args=(do_node_deploy, task,
+ self.conductor.id,
+ configdrive),
+ err_handler=provisioning_error_handler)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action=event, node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InstanceDeployFailure,
+ exception.InvalidStateRequested)
+ def do_node_tear_down(self, context, node_id):
+ """RPC method to tear down an existing node deployment.
+
+ Validate driver specific information synchronously, and then
+ spawn a background worker to tear down the node asynchronously.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :raises: InstanceDeployFailure
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: InvalidStateRequested when the requested state is not a valid
+ target from the current state.
+
+ """
+ LOG.debug("RPC do_node_tear_down called for node %s." % node_id)
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ try:
+ # NOTE(ghe): Valid power driver values are needed to perform
+ # a tear-down. Deploy info is useful to purge the cache but not
+ # required for this method.
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ raise exception.InstanceDeployFailure(_(
+ "Failed to validate power driver interface. "
+ "Can not delete instance. Error: %(msg)s") % {'msg': e})
+
+ try:
+ task.process_event('delete',
+ callback=self._spawn_worker,
+ call_args=(self._do_node_tear_down, task),
+ err_handler=provisioning_error_handler)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='delete', node=task.node.uuid,
+ state=task.node.provision_state)
+
+ def _do_node_tear_down(self, task):
+ """Internal RPC method to tear down an existing node deployment."""
+ node = task.node
+ try:
+ task.driver.deploy.clean_up(task)
+ task.driver.deploy.tear_down(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE('Error in tear_down of node %(node)s: '
+ '%(err)s'),
+ {'node': node.uuid, 'err': e})
+ node.last_error = _("Failed to tear down. Error: %s") % e
+ task.process_event('error')
+ else:
+ # NOTE(deva): When tear_down finishes, the deletion is done,
+ # cleaning will start next
+ LOG.info(_LI('Successfully unprovisioned node %(node)s with '
+ 'instance %(instance)s.'),
+ {'node': node.uuid, 'instance': node.instance_uuid})
+ finally:
+ # NOTE(deva): there is no need to unset conductor_affinity
+ # because it is a reference to the most recent conductor which
+ # deployed a node, and does not limit any future actions.
+ # But we do need to clear the instance_info
+ node.instance_info = {}
+ node.save()
+
+ # Begin cleaning
+ try:
+ task.process_event('clean')
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='clean', node=node.uuid,
+ state=node.provision_state)
+ self._do_node_clean(task)
+
+ def continue_node_clean(self, context, node_id):
+ """RPC method to continue cleaning a node.
+
+ This is useful for cleaning tasks that are async. When they complete,
+ they call back via RPC, a new worker and lock are set up, and cleaning
+ continues. This can also be used to resume cleaning on take_over.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :raises: InvalidStateRequested if the node is not in CLEANING state
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeNotFound if the node no longer appears in the database
+
+ """
+ LOG.debug("RPC continue_node_clean called for node %s.", node_id)
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if task.node.provision_state != states.CLEANING:
+ raise exception.InvalidStateRequested(_(
+ 'Cannot continue cleaning on %(node)s, node is in '
+ '%(state)s state, should be %(clean_state)s') %
+ {'node': task.node.uuid,
+ 'state': task.node.provision_state,
+ 'clean_state': states.CLEANING})
+ task.set_spawn_error_hook(cleaning_error_handler, task.node,
+ 'Failed to run next clean step')
+ task.spawn_after(
+ self._spawn_worker,
+ self._do_next_clean_step,
+ task,
+ task.node.driver_internal_info.get('clean_steps', []),
+ task.node.clean_step)
+
+ def _do_node_clean(self, task):
+ """Internal RPC method to perform automated cleaning of a node."""
+ node = task.node
+ LOG.debug('Starting cleaning for node %s', node.uuid)
+
+ if not CONF.conductor.clean_nodes:
+ # Skip cleaning, move to AVAILABLE.
+ node.clean_step = None
+ node.save()
+
+ task.process_event('done')
+ LOG.info(_LI('Cleaning is disabled, node %s has been successfully '
+ 'moved to AVAILABLE state.'), node.uuid)
+ return
+
+ try:
+ # NOTE(ghe): Valid power driver values are needed to perform
+ # a cleaning.
+ task.driver.power.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ msg = (_('Failed to validate power driver interface. '
+ 'Can not clean node %(node)s. Error: %(msg)s') %
+ {'node': node.uuid, 'msg': e})
+ return cleaning_error_handler(task, msg)
+
+ # Allow the deploy driver to set up the ramdisk again (necessary for
+ # IPA cleaning/zapping)
+ try:
+ prepare_result = task.driver.deploy.prepare_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to prepare node %(node)s for cleaning: %(e)s')
+ % {'node': node.uuid, 'e': e})
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg)
+ if prepare_result == states.CLEANING:
+ # Prepare is asynchronous, the deploy driver will need to
+ # set node.driver_internal_info['clean_steps'] and
+ # node.clean_step and then make an RPC call to
+ # continue_node_cleaning to start cleaning.
+ return
+
+ set_node_cleaning_steps(task)
+ self._do_next_clean_step(
+ task,
+ node.driver_internal_info.get('clean_steps', []),
+ node.clean_step)
+
+ def _do_next_clean_step(self, task, steps, last_step):
+ """Start executing cleaning/zapping steps from the last step (if any).
+
+ :param task: a TaskManager instance with an exclusive lock
+ :param steps: The complete list of steps that need to be executed
+ on the node
+ :param last_step: The last step that was executed. {} will start
+ from the beginning
+ """
+ node = task.node
+ # Trim already executed steps
+ if last_step:
+ try:
+ # Trim off last_step (now finished) and all previous steps.
+ steps = steps[steps.index(last_step) + 1:]
+ except ValueError:
+ msg = (_('Node %(node)s got an invalid last step for '
+ '%(state)s: %(step)s.') %
+ {'node': node.uuid, 'step': last_step,
+ 'state': node.provision_state})
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg)
+
+ LOG.info(_LI('Executing %(state)s on node %(node)s, remaining steps: '
+ '%(steps)s'), {'node': node.uuid, 'steps': steps,
+ 'state': node.provision_state})
+ # Execute each step until we hit an async step or run out of steps
+ for step in steps:
+ # Save which step we're about to start so we can restart
+ # if necessary
+ node.clean_step = step
+ node.save()
+ interface = getattr(task.driver, step.get('interface'))
+ LOG.info(_LI('Executing %(step)s on node %(node)s'),
+ {'step': step, 'node': node.uuid})
+ try:
+ result = interface.execute_clean_step(task, step)
+ except Exception as e:
+ msg = (_('Node %(node)s failed step %(step)s: '
+ '%(exc)s') %
+ {'node': node.uuid, 'exc': e,
+ 'step': node.clean_step})
+ LOG.exception(msg)
+ cleaning_error_handler(task, msg)
+ return
+
+ # Check if the step is done or not. The step should return
+ # states.CLEANING if the step is still being executed, or
+ # None if the step is done.
+ if result == states.CLEANING:
+ # Kill this worker, the async step will make an RPC call to
+ # continue_node_clean to continue cleaning
+ LOG.info(_LI('Clean step %(step)s on node %(node)s being '
+ 'executed asynchronously, waiting for driver.') %
+ {'node': node.uuid, 'step': step})
+ return
+ elif result is not None:
+ msg = (_('While executing step %(step)s on node '
+ '%(node)s, step returned invalid value: %(val)s')
+ % {'step': step, 'node': node.uuid, 'val': result})
+ LOG.error(msg)
+ return cleaning_error_handler(task, msg)
+ LOG.info(_LI('Node %(node)s finished clean step %(step)s'),
+ {'node': node.uuid, 'step': step})
+
+ # Clear clean_step
+ node.clean_step = None
+ driver_internal_info = node.driver_internal_info
+ driver_internal_info['clean_steps'] = None
+ node.driver_internal_info = driver_internal_info
+ try:
+ task.driver.deploy.tear_down_cleaning(task)
+ except Exception as e:
+ msg = (_('Failed to tear down from cleaning for node %s')
+ % node.uuid)
+ LOG.exception(msg)
+ return cleaning_error_handler(task, msg, tear_down_cleaning=False)
+
+ LOG.info(_LI('Node %s cleaning complete'), node.uuid)
+ task.process_event('done')
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue,
+ exception.InvalidStateRequested)
+ def do_provisioning_action(self, context, node_id, action):
+ """RPC method to initiate certain provisioning state transitions.
+
+ Initiate a provisioning state change through the state machine,
+ rather than through an RPC call to do_node_deploy / do_node_tear_down
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param action: an action. One of iotronic.common.states.VERBS
+ :raises: InvalidParameterValue
+ :raises: InvalidStateRequested
+ :raises: NoFreeConductorWorker
+
+ """
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if (action == states.VERBS['provide'] and
+ task.node.provision_state == states.MANAGEABLE):
+ task.process_event('provide',
+ callback=self._spawn_worker,
+ call_args=(self._do_node_clean, task),
+ err_handler=provisioning_error_handler)
+ else:
+ try:
+ task.process_event(action)
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action=action, node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.sync_power_state_interval)
+ def _sync_power_states(self, context):
+ """Periodic task to sync power states for the nodes.
+
+ Attempt to grab a lock and sync only if the following
+ conditions are met:
+
+ 1) Node is mapped to this conductor.
+ 2) Node is not in maintenance mode.
+ 3) Node is not in DEPLOYWAIT provision state.
+ 4) Node doesn't have a reservation
+
+ NOTE: Grabbing a lock here can cause other methods to fail to
+ grab it. We want to avoid trying to grab a lock while a
+ node is in the DEPLOYWAIT state so we don't unnecessarily
+ cause a deploy callback to fail. There's not much we can do
+ here to avoid failing a brand new deploy to a node that we've
+ locked here, though.
+ """
+ # FIXME(comstud): Since our initial state checks are outside
+ # of the lock (to try to avoid the lock), some checks are
+ # repeated after grabbing the lock so we can unlock quickly.
+ # The node mapping is not re-checked because it doesn't much
+ # matter if things happened to re-balance.
+ #
+ # This is inefficient and racey. We end up with calling DB API's
+ # get_node() twice (once here, and once in acquire(). Ideally we
+ # add a way to pass constraints to task_manager.acquire()
+ # (through to its DB API call) so that we can eliminate our call
+ # and first set of checks below.
+
+ filters = {'reserved': False, 'maintenance': False}
+ node_iter = self.iter_nodes(fields=['id'], filters=filters)
+ for (node_uuid, driver, node_id) in node_iter:
+ try:
+ # NOTE(deva): we should not acquire a lock on a node in
+ # DEPLOYWAIT, as this could cause an error within
+ # a deploy ramdisk POSTing back at the same time.
+ # TODO(deva): refactor this check, because it needs to be done
+ # in every periodic task, not just this one.
+ node = objects.Node.get_by_id(context, node_id)
+ if (node.provision_state == states.DEPLOYWAIT or
+ node.maintenance or node.reservation is not None):
+ continue
+
+ with task_manager.acquire(context, node_uuid) as task:
+ if (task.node.provision_state == states.DEPLOYWAIT or
+ task.node.maintenance):
+ continue
+ count = do_sync_power_state(
+ task, self.power_state_sync_count[node_uuid])
+ if count:
+ self.power_state_sync_count[node_uuid] = count
+ else:
+ # don't bloat the dict with non-failing nodes
+ del self.power_state_sync_count[node_uuid]
+ except exception.NodeNotFound:
+ LOG.info(_LI("During sync_power_state, node %(node)s was not "
+ "found and presumed deleted by another process."),
+ {'node': node_uuid})
+ except exception.NodeLocked:
+ LOG.info(_LI("During sync_power_state, node %(node)s was "
+ "already locked by another process. Skip."),
+ {'node': node_uuid})
+ finally:
+ # Yield on every iteration
+ eventlet.sleep(0)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.check_provision_state_interval)
+ def _check_deploy_timeouts(self, context):
+ """Periodically checks whether a deploy RPC call has timed out.
+
+ If a deploy call has timed out, the deploy failed and we clean up.
+
+ :param context: request context.
+ """
+ callback_timeout = CONF.conductor.deploy_callback_timeout
+ if not callback_timeout:
+ return
+
+ filters = {'reserved': False,
+ 'provision_state': states.DEPLOYWAIT,
+ 'maintenance': False,
+ 'provisioned_before': callback_timeout}
+ sort_key = 'provision_updated_at'
+ callback_method = utils.cleanup_after_timeout
+ err_handler = provisioning_error_handler
+ self._fail_if_in_state(context, filters, states.DEPLOYWAIT,
+ sort_key, callback_method, err_handler)
+
+ def _do_takeover(self, task):
+ """Take over this node.
+
+ Prepares a node for takeover by this conductor, performs the takeover,
+ and changes the conductor associated with the node. The node with the
+ new conductor affiliation is saved to the DB.
+
+ :param task: a TaskManager instance
+ """
+ LOG.debug(('Conductor %(cdr)s taking over node %(node)s'),
+ {'cdr': self.host, 'node': task.node.uuid})
+ task.driver.deploy.prepare(task)
+ task.driver.deploy.take_over(task)
+ # NOTE(lucasagomes): Set the ID of the new conductor managing
+ # this node
+ task.node.conductor_affinity = self.conductor.id
+ task.node.save()
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.sync_local_state_interval)
+ def _sync_local_state(self, context):
+ """Perform any actions necessary to sync local state.
+
+ This is called periodically to refresh the conductor's copy of the
+ consistent hash ring. If any mappings have changed, this method then
+ determines which, if any, nodes need to be "taken over".
+ The ensuing actions could include preparing a PXE environment,
+ updating the DHCP server, and so on.
+ """
+ self.ring_manager.reset()
+ filters = {'reserved': False,
+ 'maintenance': False,
+ 'provision_state': states.ACTIVE}
+ node_iter = self.iter_nodes(fields=['id', 'conductor_affinity'],
+ filters=filters)
+
+ workers_count = 0
+ for node_uuid, driver, node_id, conductor_affinity in node_iter:
+ if conductor_affinity == self.conductor.id:
+ continue
+
+ # Node is mapped here, but not updated by this conductor last
+ try:
+ with task_manager.acquire(context, node_uuid) as task:
+ # NOTE(deva): now that we have the lock, check again to
+ # avoid racing with deletes and other state changes
+ node = task.node
+ if (node.maintenance or
+ node.conductor_affinity == self.conductor.id or
+ node.provision_state != states.ACTIVE):
+ continue
+
+ task.spawn_after(self._spawn_worker,
+ self._do_takeover, task)
+
+ except exception.NoFreeConductorWorker:
+ break
+ except (exception.NodeLocked, exception.NodeNotFound):
+ continue
+ workers_count += 1
+ if workers_count == CONF.conductor.periodic_max_workers:
+ break
+
+ def _mapped_to_this_conductor(self, node_uuid, driver):
+ """Check that node is mapped to this conductor.
+
+ Note that because mappings are eventually consistent, it is possible
+ for two conductors to simultaneously believe that a node is mapped to
+ them. Any operation that depends on exclusive control of a node should
+ take out a lock.
+ """
+ try:
+ ring = self.ring_manager[driver]
+ except exception.DriverNotFound:
+ return False
+
+ return self.host in ring.get_hosts(node_uuid)
+
+ def iter_nodes(self, fields=None, **kwargs):
+ """Iterate over nodes mapped to this conductor.
+
+ Requests node set from and filters out nodes that are not
+ mapped to this conductor.
+
+ Yields tuples (node_uuid, driver, ...) where ... is derived from
+ fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
+ fields=['foo'] means yielding ('uuid', 'driver', 'foo').
+
+ :param fields: list of fields to fetch in addition to uuid and driver
+ :param kwargs: additional arguments to pass to dbapi when looking for
+ nodes
+ :return: generator yielding tuples of requested fields
+ """
+ columns = ['uuid', 'driver'] + list(fields or ())
+ node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
+ for result in node_list:
+ if self._mapped_to_this_conductor(*result[:2]):
+ yield result
+
+ @messaging.expected_exceptions(exception.NodeLocked)
+ def validate_driver_interfaces(self, context, node_id):
+ """Validate the `core` and `standardized` interfaces for drivers.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :returns: a dictionary containing the results of each
+ interface validation.
+
+ """
+ LOG.debug('RPC validate_driver_interfaces called for node %s.',
+ node_id)
+ ret_dict = {}
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ # NOTE(sirushtim): the is_whole_disk_image variable is needed by
+ # deploy drivers for doing their validate(). Since the deploy
+ # isn't being done yet and the driver information could change in
+ # the meantime, we don't know if the is_whole_disk_image value will
+ # change or not. It isn't saved to the DB, but only used with this
+ # node instance for the current validations.
+ iwdi = images.is_whole_disk_image(context,
+ task.node.instance_info)
+ task.node.driver_internal_info['is_whole_disk_image'] = iwdi
+ for iface_name in (task.driver.core_interfaces +
+ task.driver.standard_interfaces):
+ iface = getattr(task.driver, iface_name, None)
+ result = reason = None
+ if iface:
+ try:
+ iface.validate(task)
+ result = True
+ except (exception.InvalidParameterValue,
+ exception.UnsupportedDriverExtension,
+ exception.MissingParameterValue) as e:
+ result = False
+ reason = str(e)
+ else:
+ reason = _('not supported')
+
+ ret_dict[iface_name] = {}
+ ret_dict[iface_name]['result'] = result
+ if reason is not None:
+ ret_dict[iface_name]['reason'] = reason
+ return ret_dict
+
+
+
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.NodeAssociated,
+ exception.InvalidState)
+ def destroy_node(self, context, node_id):
+ """Delete a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeAssociated if the node contains an instance
+ associated with it.
+ :raises: InvalidState if the node is in the wrong provision
+ state to perform deletion.
+
+ """
+
+ with task_manager.acquire(context, node_id) as task:
+ node = task.node
+ node.destroy()
+ LOG.info(_LI('Successfully deleted node %(node)s.'),
+ {'node': node.uuid})
+ #if node.instance_uuid is not None:
+ # raise exception.NodeAssociated(node=node.uuid,
+ # instance=node.instance_uuid)
+
+ # TODO(lucasagomes): We should add ENROLLED once it's part of our
+ # state machine
+ # NOTE(lucasagomes): For the *FAIL states we users should
+ # move it to a safe state prior to deletion. This is because we
+ # should try to avoid deleting a node in a dirty/whacky state,
+ # e.g: A node in DEPLOYFAIL, if deleted without passing through
+ # tear down/cleaning may leave data from the previous tenant
+ # in the disk. So nodes in *FAIL states should first be moved to:
+ # CLEANFAIL -> MANAGEABLE
+ # INSPECTIONFAIL -> MANAGEABLE
+ # DEPLOYFAIL -> DELETING
+ # ZAPFAIL -> MANAGEABLE (in the future)
+ '''
+ valid_states = (states.AVAILABLE, states.NOSTATE,
+ states.MANAGEABLE)
+ if node.provision_state not in valid_states:
+ msg = (_('Can not delete node "%(node)s" while it is in '
+ 'provision state "%(state)s". Valid provision states '
+ 'to perform deletion are: "%(valid_states)s"') %
+ {'node': node.uuid, 'state': node.provision_state,
+ 'valid_states': valid_states})
+ raise exception.InvalidState(msg)
+ if node.console_enabled:
+ try:
+ task.driver.console.stop_console(task)
+ except Exception as err:
+ LOG.error(_LE('Failed to stop console while deleting '
+ 'the node %(node)s: %(err)s.'),
+ {'node': node.uuid, 'err': err})
+ node.destroy()
+ LOG.info(_LI('Successfully deleted node %(node)s.'),
+ {'node': node.uuid})
+ '''
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.NodeNotFound)
+ def destroy_port(self, context, port):
+ """Delete a port.
+
+ :param context: request context.
+ :param port: port object
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeNotFound if the node associated with the port does not
+ exist.
+
+ """
+ LOG.debug('RPC destroy_port called for port %(port)s',
+ {'port': port.uuid})
+ with task_manager.acquire(context, port.node_id) as task:
+ port.destroy()
+ LOG.info(_LI('Successfully deleted port %(port)s. '
+ 'The node associated with the port was '
+ '%(node)s'),
+ {'port': port.uuid, 'node': task.node.uuid})
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.NodeConsoleNotEnabled,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_console_information(self, context, node_id):
+ """Get connection information about the console.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: NodeConsoleNotEnabled if the console is not enabled.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ """
+ LOG.debug('RPC get_console_information called for node %s' % node_id)
+
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ node = task.node
+
+ if not getattr(task.driver, 'console', None):
+ raise exception.UnsupportedDriverExtension(driver=node.driver,
+ extension='console')
+ if not node.console_enabled:
+ raise exception.NodeConsoleNotEnabled(node=node_id)
+
+ task.driver.console.validate(task)
+ return task.driver.console.get_console(task)
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def set_console_mode(self, context, node_id, enabled):
+ """Enable/Disable the console.
+
+ Validate driver specific information synchronously, and then
+ spawn a background worker to set console mode asynchronously.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param enabled: Boolean value; whether the console is enabled or
+ disabled.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ """
+ LOG.debug('RPC set_console_mode called for node %(node)s with '
+ 'enabled %(enabled)s' % {'node': node_id,
+ 'enabled': enabled})
+
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ node = task.node
+ if not getattr(task.driver, 'console', None):
+ raise exception.UnsupportedDriverExtension(driver=node.driver,
+ extension='console')
+
+ task.driver.console.validate(task)
+
+ if enabled == node.console_enabled:
+ op = _('enabled') if enabled else _('disabled')
+ LOG.info(_LI("No console action was triggered because the "
+ "console is already %s"), op)
+ task.release_resources()
+ else:
+ node.last_error = None
+ node.save()
+ task.spawn_after(self._spawn_worker,
+ self._set_console_mode, task, enabled)
+
+ def _set_console_mode(self, task, enabled):
+ """Internal method to set console mode on a node."""
+ node = task.node
+ try:
+ if enabled:
+ task.driver.console.start_console(task)
+ # TODO(deva): We should be updating conductor_affinity here
+ # but there is no support for console sessions in
+ # take_over() right now.
+ else:
+ task.driver.console.stop_console(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ op = _('enabling') if enabled else _('disabling')
+ msg = (_('Error %(op)s the console on node %(node)s. '
+ 'Reason: %(error)s') % {'op': op,
+ 'node': node.uuid,
+ 'error': e})
+ node.last_error = msg
+ else:
+ node.console_enabled = enabled
+ node.last_error = None
+ finally:
+ node.save()
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.FailedToUpdateMacOnPort,
+ exception.MACAlreadyExists)
+ def update_port(self, context, port_obj):
+ """Update a port.
+
+ :param context: request context.
+ :param port_obj: a changed (but not saved) port object.
+ :raises: DHCPLoadError if the dhcp_provider cannot be loaded.
+ :raises: FailedToUpdateMacOnPort if MAC address changed and update
+ failed.
+ :raises: MACAlreadyExists if the update is setting a MAC which is
+ registered on another port already.
+ """
+ port_uuid = port_obj.uuid
+ LOG.debug("RPC update_port called for port %s.", port_uuid)
+
+ with task_manager.acquire(context, port_obj.node_id) as task:
+ node = task.node
+ if 'address' in port_obj.obj_what_changed():
+ vif = port_obj.extra.get('vif_port_id')
+ if vif:
+ api = dhcp_factory.DHCPFactory()
+ api.provider.update_port_address(vif, port_obj.address,
+ token=context.auth_token)
+ # Log warning if there is no vif_port_id and an instance
+ # is associated with the node.
+ elif node.instance_uuid:
+ LOG.warning(_LW(
+ "No VIF found for instance %(instance)s "
+ "port %(port)s when attempting to update port MAC "
+ "address."),
+ {'port': port_uuid, 'instance': node.instance_uuid})
+
+ port_obj.save()
+
+ return port_obj
+
+ @messaging.expected_exceptions(exception.DriverNotFound)
+ def get_driver_properties(self, context, driver_name):
+ """Get the properties of the driver.
+
+ :param context: request context.
+ :param driver_name: name of the driver.
+ :returns: a dictionary with :
+ entries.
+ :raises: DriverNotFound if the driver is not loaded.
+
+ """
+ LOG.debug("RPC get_driver_properties called for driver %s.",
+ driver_name)
+ driver = self._get_driver(driver_name)
+ return driver.get_properties()
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.send_sensor_data_interval)
+ def _send_sensor_data(self, context):
+ """Periodically sends sensor data to Ceilometer."""
+ # do nothing if send_sensor_data option is False
+ if not CONF.conductor.send_sensor_data:
+ return
+
+ filters = {'associated': True}
+ node_iter = self.iter_nodes(fields=['instance_uuid'],
+ filters=filters)
+
+ for (node_uuid, driver, instance_uuid) in node_iter:
+ # populate the message which will be sent to ceilometer
+ message = {'message_id': uuidutils.generate_uuid(),
+ 'instance_uuid': instance_uuid,
+ 'node_uuid': node_uuid,
+ 'timestamp': datetime.datetime.utcnow(),
+ 'event_type': 'hardware.ipmi.metrics.update'}
+
+ try:
+ with task_manager.acquire(context,
+ node_uuid,
+ shared=True) as task:
+ task.driver.management.validate(task)
+ sensors_data = task.driver.management.get_sensors_data(
+ task)
+ except NotImplementedError:
+ LOG.warn(_LW(
+ 'get_sensors_data is not implemented for driver'
+ ' %(driver)s, node_uuid is %(node)s'),
+ {'node': node_uuid, 'driver': driver})
+ except exception.FailedToParseSensorData as fps:
+ LOG.warn(_LW(
+ "During get_sensors_data, could not parse "
+ "sensor data for node %(node)s. Error: %(err)s."),
+ {'node': node_uuid, 'err': str(fps)})
+ except exception.FailedToGetSensorData as fgs:
+ LOG.warn(_LW(
+ "During get_sensors_data, could not get "
+ "sensor data for node %(node)s. Error: %(err)s."),
+ {'node': node_uuid, 'err': str(fgs)})
+ except exception.NodeNotFound:
+ LOG.warn(_LW(
+ "During send_sensor_data, node %(node)s was not "
+ "found and presumed deleted by another process."),
+ {'node': node_uuid})
+ except Exception as e:
+ LOG.warn(_LW(
+ "Failed to get sensor data for node %(node)s. "
+ "Error: %(error)s"), {'node': node_uuid, 'error': str(e)})
+ else:
+ message['payload'] = (
+ self._filter_out_unsupported_types(sensors_data))
+ if message['payload']:
+ self.notifier.info(context, "hardware.ipmi.metrics",
+ message)
+ finally:
+ # Yield on every iteration
+ eventlet.sleep(0)
+
+ def _filter_out_unsupported_types(self, sensors_data):
+ """Filters out sensor data types that aren't specified in the config.
+
+ Removes sensor data types that aren't specified in
+ CONF.conductor.send_sensor_data_types.
+
+ :param sensors_data: dict containing sensor types and the associated
+ data
+ :returns: dict with unsupported sensor types removed
+ """
+ allowed = set(x.lower() for x in CONF.conductor.send_sensor_data_types)
+
+ if 'all' in allowed:
+ return sensors_data
+
+ return dict((sensor_type, sensor_value) for (sensor_type, sensor_value)
+ in sensors_data.items() if sensor_type.lower() in allowed)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def set_boot_device(self, context, node_id, device, persistent=False):
+ """Set the boot device for a node.
+
+ Set the boot device to use on next reboot of the node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param device: the boot device, one of
+ :mod:`iotronic.common.boot_devices`.
+ :param persistent: Whether to set next-boot, or make the change
+ permanent. Default: False.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified or an invalid boot device is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ """
+ LOG.debug('RPC set_boot_device called for node %(node)s with '
+ 'device %(device)s', {'node': node_id, 'device': device})
+ with task_manager.acquire(context, node_id) as task:
+ node = task.node
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=node.driver, extension='management')
+ task.driver.management.validate(task)
+ task.driver.management.set_boot_device(task, device,
+ persistent=persistent)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_boot_device(self, context, node_id):
+ """Get the current boot device.
+
+ Returns the current boot device of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: a dictionary containing:
+
+ :boot_device: the boot device, one of
+ :mod:`iotronic.common.boot_devices` or None if it is unknown.
+ :persistent: Whether the boot device will persist to all
+ future boots or not, None if it is unknown.
+
+ """
+ LOG.debug('RPC get_boot_device called for node %s', node_id)
+ with task_manager.acquire(context, node_id) as task:
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='management')
+ task.driver.management.validate(task)
+ return task.driver.management.get_boot_device(task)
+
+ @messaging.expected_exceptions(exception.NodeLocked,
+ exception.UnsupportedDriverExtension,
+ exception.InvalidParameterValue,
+ exception.MissingParameterValue)
+ def get_supported_boot_devices(self, context, node_id):
+ """Get the list of supported devices.
+
+ Returns the list of supported boot devices of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: A list with the supported boot devices defined
+ in :mod:`iotronic.common.boot_devices`.
+
+ """
+ LOG.debug('RPC get_supported_boot_devices called for node %s', node_id)
+ with task_manager.acquire(context, node_id, shared=True) as task:
+ if not getattr(task.driver, 'management', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='management')
+ return task.driver.management.get_supported_boot_devices()
+
+ @messaging.expected_exceptions(exception.NoFreeConductorWorker,
+ exception.NodeLocked,
+ exception.HardwareInspectionFailure,
+ exception.InvalidStateRequested,
+ exception.UnsupportedDriverExtension)
+ def inspect_hardware(self, context, node_id):
+ """Inspect hardware to obtain hardware properties.
+
+ Initiate the inspection of a node. Validations are done
+ synchronously and the actual inspection work is performed in
+ background (asynchronously).
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support inspect.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task
+ :raises: HardwareInspectionFailure when unable to get
+ essential scheduling properties from hardware.
+ :raises: InvalidStateRequested if 'inspect' is not a
+ valid action to do in the current state.
+
+ """
+ LOG.debug('RPC inspect_hardware called for node %s', node_id)
+ with task_manager.acquire(context, node_id, shared=False) as task:
+ if not getattr(task.driver, 'inspect', None):
+ raise exception.UnsupportedDriverExtension(
+ driver=task.node.driver, extension='inspect')
+
+ try:
+ task.driver.power.validate(task)
+ task.driver.inspect.validate(task)
+ except (exception.InvalidParameterValue,
+ exception.MissingParameterValue) as e:
+ error = (_("RPC inspect_hardware failed to validate "
+ "inspection or power info. Error: %(msg)s")
+ % {'msg': e})
+ raise exception.HardwareInspectionFailure(error=error)
+
+ try:
+ task.process_event('inspect',
+ callback=self._spawn_worker,
+ call_args=(_do_inspect_hardware, task),
+ err_handler=provisioning_error_handler)
+
+ except exception.InvalidState:
+ raise exception.InvalidStateRequested(
+ action='inspect', node=task.node.uuid,
+ state=task.node.provision_state)
+
+ @periodic_task.periodic_task(
+ spacing=CONF.conductor.check_provision_state_interval)
+ def _check_inspect_timeouts(self, context):
+ """Periodically checks inspect_timeout and fails upon reaching it.
+
+ :param: context: request context
+
+ """
+ callback_timeout = CONF.conductor.inspect_timeout
+ if not callback_timeout:
+ return
+
+ filters = {'reserved': False,
+ 'provision_state': states.INSPECTING,
+ 'inspection_started_before': callback_timeout}
+ sort_key = 'inspection_started_at'
+ last_error = _("timeout reached while inspecting the node")
+ self._fail_if_in_state(context, filters, states.INSPECTING,
+ sort_key, last_error=last_error)
+
+ def _fail_if_in_state(self, context, filters, provision_state,
+ sort_key, callback_method=None,
+ err_handler=None, last_error=None):
+ """Fail nodes that are in specified state.
+
+ Retrieves nodes that satisfy the criteria in 'filters'.
+ If any of these nodes is in 'provision_state', it has failed
+ in whatever provisioning activity it was currently doing.
+ That failure is processed here.
+
+ :param: context: request context
+ :param: filters: criteria (as a dictionary) to get the desired
+ list of nodes that satisfy the filter constraints.
+ For example, if filters['provisioned_before'] = 60,
+ this would process nodes whose provision_updated_at
+ field value was 60 or more seconds before 'now'.
+ :param: provision_state: provision_state that the node is in,
+ for the provisioning activity to have failed.
+ :param: sort_key: the nodes are sorted based on this key.
+ :param: callback_method: the callback method to be invoked in a
+ spawned thread, for a failed node. This
+ method must take a :class:`TaskManager` as
+ the first (and only required) parameter.
+ :param: err_handler: for a failed node, the error handler to invoke
+ if an error occurs trying to spawn an thread
+ to do the callback_method.
+ :param: last_error: the error message to be updated in node.last_error
+
+ """
+ node_iter = self.iter_nodes(filters=filters,
+ sort_key=sort_key,
+ sort_dir='asc')
+
+ workers_count = 0
+ for node_uuid, driver in node_iter:
+ try:
+ with task_manager.acquire(context, node_uuid) as task:
+ if (task.node.maintenance or
+ task.node.provision_state != provision_state):
+ continue
+
+ # timeout has been reached - process the event 'fail'
+ if callback_method:
+ task.process_event('fail',
+ callback=self._spawn_worker,
+ call_args=(callback_method, task),
+ err_handler=err_handler)
+ else:
+ task.node.last_error = last_error
+ task.process_event('fail')
+ except exception.NoFreeConductorWorker:
+ break
+ except (exception.NodeLocked, exception.NodeNotFound):
+ continue
+ workers_count += 1
+ if workers_count >= CONF.conductor.periodic_max_workers:
+ break
+
+
+ @messaging.expected_exceptions(exception.BoardLocked,
+ exception.BoardAssociated,
+ exception.InvalidState)
+ def destroy_board(self, context, board_id):
+ """Delete a board.
+
+ :param context: request context.
+ :param board_id: board id or uuid.
+ :raises: BoardLocked if board is locked by another conductor.
+ :raises: BoardAssociated if the board contains an instance
+ associated with it.
+ :raises: InvalidState if the board is in the wrong provision
+ state to perform deletion.
+
+ """
+ with task_manager.acquire(context, board_id) as task:
+ board = task.board
+ board.destroy()
+ LOG.info(_LI('Successfully deleted board %(board)s.'),
+ {'board': board.uuid})
+ #if board.instance_uuid is not None:
+ # raise exception.BoardAssociated(board=board.uuid,
+ # instance=board.instance_uuid)
+
+ # TODO(lucasagomes): We should add ENROLLED once it's part of our
+ # state machine
+ # NOTE(lucasagomes): For the *FAIL states we users should
+ # move it to a safe state prior to deletion. This is because we
+ # should try to avoid deleting a board in a dirty/whacky state,
+ # e.g: A board in DEPLOYFAIL, if deleted without passing through
+ # tear down/cleaning may leave data from the previous tenant
+ # in the disk. So boards in *FAIL states should first be moved to:
+ # CLEANFAIL -> MANAGEABLE
+ # INSPECTIONFAIL -> MANAGEABLE
+ # DEPLOYFAIL -> DELETING
+ # ZAPFAIL -> MANAGEABLE (in the future)
+ '''
+ valid_states = (states.AVAILABLE, states.NOSTATE,
+ states.MANAGEABLE)
+ if board.provision_state not in valid_states:
+ msg = (_('Can not delete board "%(board)s" while it is in '
+ 'provision state "%(state)s". Valid provision states '
+ 'to perform deletion are: "%(valid_states)s"') %
+ {'board': board.uuid, 'state': board.provision_state,
+ 'valid_states': valid_states})
+ raise exception.InvalidState(msg)
+ if board.console_enabled:
+ try:
+ task.driver.console.stop_console(task)
+ except Exception as err:
+ LOG.error(_LE('Failed to stop console while deleting '
+ 'the board %(board)s: %(err)s.'),
+ {'board': board.uuid, 'err': err})
+ board.destroy()
+ LOG.info(_LI('Successfully deleted board %(board)s.'),
+ {'board': board.uuid})
+ '''
+
diff --git a/iotronic/conductor/rpcapi.py b/iotronic/conductor/rpcapi.py
new file mode 100644
index 0000000..42a7f72
--- /dev/null
+++ b/iotronic/conductor/rpcapi.py
@@ -0,0 +1,519 @@
+# coding=utf-8
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Client side of the conductor RPC API.
+"""
+
+import random
+
+import oslo_messaging as messaging
+
+from iotronic.common import exception
+from iotronic.common import hash_ring
+from iotronic.common.i18n import _
+from iotronic.common import rpc
+from iotronic.conductor import manager
+from iotronic.objects import base as objects_base
+
+
+class ConductorAPI(object):
+ """Client side of the conductor RPC API.
+
+ API version history:
+ | 1.0 - Initial version.
+ """
+
+ # NOTE(rloo): This must be in sync with manager.ConductorManager's.
+ RPC_API_VERSION = '1.0'
+
+ def __init__(self, topic=None):
+ super(ConductorAPI, self).__init__()
+ self.topic = topic
+ if self.topic is None:
+ self.topic = manager.MANAGER_TOPIC
+
+ target = messaging.Target(topic=self.topic,
+ version='1.0')
+ serializer = objects_base.IotronicObjectSerializer()
+ self.client = rpc.get_client(target,
+ version_cap=self.RPC_API_VERSION,
+ serializer=serializer)
+ # NOTE(deva): this is going to be buggy
+ self.ring_manager = hash_ring.HashRingManager()
+
+ def get_topic_for(self, node):
+ """Get the RPC topic for the conductor service the node is mapped to.
+
+ :param node: a node object.
+ :returns: an RPC topic string.
+ :raises: NoValidHost
+
+ """
+ '''
+ self.ring_manager.reset()
+
+ try:
+ ring = self.ring_manager[node.driver]
+ dest = ring.get_hosts(node.uuid)
+ return self.topic + "." + dest[0]
+ except exception.DriverNotFound:
+ reason = (_('No conductor service registered which supports '
+ 'driver %s.') % node.driver)
+ raise exception.NoValidHost(reason=reason)
+ '''
+
+ pass
+
+ def get_topic_for_driver(self, driver_name):
+ """Get RPC topic name for a conductor supporting the given driver.
+
+ The topic is used to route messages to the conductor supporting
+ the specified driver. A conductor is selected at random from the
+ set of qualified conductors.
+
+ :param driver_name: the name of the driver to route to.
+ :returns: an RPC topic string.
+ :raises: DriverNotFound
+
+ """
+ self.ring_manager.reset()
+
+ hash_ring = self.ring_manager[driver_name]
+ host = random.choice(list(hash_ring.hosts))
+ return self.topic + "." + host
+
+ def update_node(self, context, node_obj, topic=None):
+ """Synchronously, have a conductor update the node's information.
+
+ Update the node's information in the database and return a node object.
+ The conductor will lock the node while it validates the supplied
+ information. If driver_info is passed, it will be validated by
+ the core drivers. If instance_uuid is passed, it will be set or unset
+ only if the node is properly configured.
+
+ Note that power_state should not be passed via this method.
+ Use change_node_power_state for initiating driver actions.
+
+ :param context: request context.
+ :param node_obj: a changed (but not saved) node object.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: updated node object, including all fields.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.1')
+ return cctxt.call(context, 'update_node', node_obj=node_obj)
+
+ def change_node_power_state(self, context, node_id, new_state, topic=None):
+ """Change a node's power state.
+
+ Synchronously, acquire lock and start the conductor background task
+ to change power state of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param new_state: one of iotronic.common.states power state values
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
+ return cctxt.call(context, 'change_node_power_state', node_id=node_id,
+ new_state=new_state)
+
+ def vendor_passthru(self, context, node_id, driver_method, http_method,
+ info, topic=None):
+ """Receive requests for vendor-specific actions.
+
+ Synchronously validate driver specific info or get driver status,
+ and if successful invokes the vendor method. If the method mode
+ is async the conductor will start background worker to perform
+ vendor action.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param driver_method: name of method for driver.
+ :param http_method: the HTTP method used for the request.
+ :param info: info for node driver.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: InvalidParameterValue if supplied info is not valid.
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: UnsupportedDriverExtension if current driver does not have
+ vendor interface.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: NodeLocked if node is locked by another conductor.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
+ return cctxt.call(context, 'vendor_passthru', node_id=node_id,
+ driver_method=driver_method,
+ http_method=http_method,
+ info=info)
+
+ def driver_vendor_passthru(self, context, driver_name, driver_method,
+ http_method, info, topic=None):
+ """Pass vendor-specific calls which don't specify a node to a driver.
+
+ Handles driver-level vendor passthru calls. These calls don't
+ require a node UUID and are executed on a random conductor with
+ the specified driver. If the method mode is async the conductor
+ will start background worker to perform vendor action.
+
+ :param context: request context.
+ :param driver_name: name of the driver on which to call the method.
+ :param driver_method: name of the vendor method, for use by the driver.
+ :param http_method: the HTTP method used for the request.
+ :param info: data to pass through to the driver.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: InvalidParameterValue for parameter errors.
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: UnsupportedDriverExtension if the driver doesn't have a vendor
+ interface, or if the vendor interface does not support the
+ specified driver_method.
+ :raises: DriverNotFound if the supplied driver is not loaded.
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :returns: A tuple containing the response of the invoked method
+ and a boolean value indicating whether the method was
+ invoked asynchronously (True) or synchronously (False).
+ If invoked asynchronously the response field will be
+ always None.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.20')
+ return cctxt.call(context, 'driver_vendor_passthru',
+ driver_name=driver_name,
+ driver_method=driver_method,
+ http_method=http_method,
+ info=info)
+
+ def get_node_vendor_passthru_methods(self, context, node_id, topic=None):
+ """Retrieve information about vendor methods of the given node.
+
+ :param context: an admin context.
+ :param node_id: the id or uuid of a node.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: dictionary of : entries.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
+ return cctxt.call(context, 'get_node_vendor_passthru_methods',
+ node_id=node_id)
+
+ def get_driver_vendor_passthru_methods(self, context, driver_name,
+ topic=None):
+ """Retrieve information about vendor methods of the given driver.
+
+ :param context: an admin context.
+ :param driver_name: name of the driver.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: dictionary of : entries.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.21')
+ return cctxt.call(context, 'get_driver_vendor_passthru_methods',
+ driver_name=driver_name)
+
+ def do_node_deploy(self, context, node_id, rebuild, configdrive,
+ topic=None):
+ """Signal to conductor service to perform a deployment.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param rebuild: True if this is a rebuild request.
+ :param configdrive: A gzipped and base64 encoded configdrive.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: InstanceDeployFailure
+ :raises: InvalidParameterValue if validation fails
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+
+ The node must already be configured and in the appropriate
+ undeployed state before this method is called.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.22')
+ return cctxt.call(context, 'do_node_deploy', node_id=node_id,
+ rebuild=rebuild, configdrive=configdrive)
+
+ def do_node_tear_down(self, context, node_id, topic=None):
+ """Signal to conductor service to tear down a deployment.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: InstanceDeployFailure
+ :raises: InvalidParameterValue if validation fails
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+
+ The node must already be configured and in the appropriate
+ deployed state before this method is called.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.6')
+ return cctxt.call(context, 'do_node_tear_down', node_id=node_id)
+
+ def do_provisioning_action(self, context, node_id, action, topic=None):
+ """Signal to conductor service to perform the given action on a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param action: an action. One of iotronic.common.states.VERBS
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: InvalidParameterValue
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: InvalidStateRequested if the requested action can not
+ be performed.
+
+ This encapsulates some provisioning actions in a single call.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.23')
+ return cctxt.call(context, 'do_provisioning_action',
+ node_id=node_id, action=action)
+
+ def continue_node_clean(self, context, node_id, topic=None):
+ """Signal to conductor service to start the next cleaning action.
+
+ NOTE(JoshNang) this is an RPC cast, there will be no response or
+ exception raised by the conductor for this RPC.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.27')
+ return cctxt.cast(context, 'continue_node_clean',
+ node_id=node_id)
+
+ def validate_driver_interfaces(self, context, node_id, topic=None):
+ """Validate the `core` and `standardized` interfaces for drivers.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: a dictionary containing the results of each
+ interface validation.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.5')
+ return cctxt.call(context, 'validate_driver_interfaces',
+ node_id=node_id)
+
+ def destroy_node(self, context, node_id, topic=None):
+ """Delete a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeAssociated if the node contains an instance
+ associated with it.
+ :raises: InvalidState if the node is in the wrong provision
+ state to perform deletion.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.9')
+ return cctxt.call(context, 'destroy_node', node_id=node_id)
+
+ def get_console_information(self, context, node_id, topic=None):
+ """Get connection information about the console.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if a required parameter is missing
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
+ return cctxt.call(context, 'get_console_information', node_id=node_id)
+
+ def set_console_mode(self, context, node_id, enabled, topic=None):
+ """Enable/Disable the console.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ :param enabled: Boolean value; whether the console is enabled or
+ disabled.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support console.
+ :raises: InvalidParameterValue when the wrong driver info is specified.
+ :raises: MissingParameterValue if a required parameter is missing
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.11')
+ return cctxt.call(context, 'set_console_mode', node_id=node_id,
+ enabled=enabled)
+
+ def update_port(self, context, port_obj, topic=None):
+ """Synchronously, have a conductor update the port's information.
+
+ Update the port's information in the database and return a port object.
+ The conductor will lock related node and trigger specific driver
+ actions if they are needed.
+
+ :param context: request context.
+ :param port_obj: a changed (but not saved) port object.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: updated port object, including all fields.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.13')
+ return cctxt.call(context, 'update_port', port_obj=port_obj)
+
+ def get_driver_properties(self, context, driver_name, topic=None):
+ """Get the properties of the driver.
+
+ :param context: request context.
+ :param driver_name: name of the driver.
+ :param topic: RPC topic. Defaults to self.topic.
+ :returns: a dictionary with :
+ entries.
+ :raises: DriverNotFound.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.16')
+ return cctxt.call(context, 'get_driver_properties',
+ driver_name=driver_name)
+
+ def set_boot_device(self, context, node_id, device, persistent=False,
+ topic=None):
+ """Set the boot device for a node.
+
+ Set the boot device to use on next reboot of the node. Be aware
+ that not all drivers support this.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param device: the boot device, one of
+ :mod:`iotronic.common.boot_devices`.
+ :param persistent: Whether to set next-boot, or make the change
+ permanent. Default: False.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified or an invalid boot device is specified.
+ :raises: MissingParameterValue if missing supplied info.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
+ return cctxt.call(context, 'set_boot_device', node_id=node_id,
+ device=device, persistent=persistent)
+
+ def get_boot_device(self, context, node_id, topic=None):
+ """Get the current boot device.
+
+ Returns the current boot device of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: a dictionary containing:
+
+ :boot_device: the boot device, one of
+ :mod:`iotronic.common.boot_devices` or None if it is unknown.
+ :persistent: Whether the boot device will persist to all
+ future boots or not, None if it is unknown.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
+ return cctxt.call(context, 'get_boot_device', node_id=node_id)
+
+ def get_supported_boot_devices(self, context, node_id, topic=None):
+ """Get the list of supported devices.
+
+ Returns the list of supported boot devices of a node.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support management.
+ :raises: InvalidParameterValue when the wrong driver info is
+ specified.
+ :raises: MissingParameterValue if missing supplied info.
+ :returns: A list with the supported boot devices defined
+ in :mod:`iotronic.common.boot_devices`.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.17')
+ return cctxt.call(context, 'get_supported_boot_devices',
+ node_id=node_id)
+
+ def inspect_hardware(self, context, node_id, topic=None):
+ """Signals the conductor service to perform hardware introspection.
+
+ :param context: request context.
+ :param node_id: node id or uuid.
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: HardwareInspectionFailure
+ :raises: NoFreeConductorWorker when there is no free worker to start
+ async task.
+ :raises: UnsupportedDriverExtension if the node's driver doesn't
+ support inspection.
+ :raises: InvalidStateRequested if 'inspect' is not a valid
+ action to do in the current state.
+
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.24')
+ return cctxt.call(context, 'inspect_hardware', node_id=node_id)
+
+ def destroy_port(self, context, port, topic=None):
+ """Delete a port.
+
+ :param context: request context.
+ :param port: port object
+ :param topic: RPC topic. Defaults to self.topic.
+ :raises: NodeLocked if node is locked by another conductor.
+ :raises: NodeNotFound if the node associated with the port does not
+ exist.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.25')
+ return cctxt.call(context, 'destroy_port', port=port)
+
+######################### NEW
+
+ def destroy_board(self, context, board_id, topic=None):
+ """Delete a board.
+
+ :param context: request context.
+ :param board_id: board id or uuid.
+ :raises: BoardLocked if board is locked by another conductor.
+ :raises: BoardAssociated if the board contains an instance
+ associated with it.
+ :raises: InvalidState if the board is in the wrong provision
+ state to perform deletion.
+ """
+ cctxt = self.client.prepare(topic=topic or self.topic, version='1.0')
+ return cctxt.call(context, 'destroy_board', board_id=board_id)
\ No newline at end of file
diff --git a/iotronic/conductor/task_manager.py b/iotronic/conductor/task_manager.py
new file mode 100644
index 0000000..bc33875
--- /dev/null
+++ b/iotronic/conductor/task_manager.py
@@ -0,0 +1,363 @@
+# coding=utf-8
+
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A context manager to perform a series of tasks on a set of resources.
+
+:class:`TaskManager` is a context manager, created on-demand to allow
+synchronized access to a board and its resources.
+
+The :class:`TaskManager` will, by default, acquire an exclusive lock on
+a board for the duration that the TaskManager instance exists. You may
+create a TaskManager instance without locking by passing "shared=True"
+when creating it, but certain operations on the resources held by such
+an instance of TaskManager will not be possible. Requiring this exclusive
+lock guards against parallel operations interfering with each other.
+
+A shared lock is useful when performing non-interfering operations,
+such as validating the driver interfaces.
+
+An exclusive lock is stored in the database to coordinate between
+:class:`iotronic.iotconductor.manager` instances, that are typically deployed on
+different hosts.
+
+:class:`TaskManager` methods, as well as driver methods, may be decorated to
+determine whether their invocation requires an exclusive lock.
+
+The TaskManager instance exposes certain board resources and properties as
+attributes that you may access:
+
+ task.context
+ The context passed to TaskManager()
+ task.shared
+ False if Board is locked, True if it is not locked. (The
+ 'shared' kwarg arg of TaskManager())
+ task.board
+ The Board object
+ task.ports
+ Ports belonging to the Board
+ task.driver
+ The Driver for the Board, or the Driver based on the
+ 'driver_name' kwarg of TaskManager().
+
+Example usage:
+
+::
+
+ with task_manager.acquire(context, board_id) as task:
+ task.driver.power.power_on(task.board)
+
+If you need to execute task-requiring code in a background thread, the
+TaskManager instance provides an interface to handle this for you, making
+sure to release resources when the thread finishes (successfully or if
+an exception occurs). Common use of this is within the Manager like so:
+
+::
+
+ with task_manager.acquire(context, board_id) as task:
+
+ task.spawn_after(self._spawn_worker,
+ utils.board_power_action, task, new_state)
+
+All exceptions that occur in the current GreenThread as part of the
+spawn handling are re-raised. You can specify a hook to execute custom
+code when such exceptions occur. For example, the hook is a more elegant
+solution than wrapping the "with task_manager.acquire()" with a
+try..exception block. (Note that this hook does not handle exceptions
+raised in the background thread.):
+
+::
+
+ def on_error(e):
+ if isinstance(e, Exception):
+ ...
+
+ with task_manager.acquire(context, board_id) as task:
+
+ task.set_spawn_error_hook(on_error)
+ task.spawn_after(self._spawn_worker,
+ utils.board_power_action, task, new_state)
+
+"""
+
+import functools
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+import retrying
+
+from iotronic.common import driver_factory
+from iotronic.common import exception
+from iotronic.common.i18n import _LW
+from iotronic.common import states
+from iotronic import objects
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+def require_exclusive_lock(f):
+ """Decorator to require an exclusive lock.
+
+ Decorated functions must take a :class:`TaskManager` as the first
+ parameter. Decorated class methods should take a :class:`TaskManager`
+ as the first parameter after "self".
+
+ """
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ task = args[0] if isinstance(args[0], TaskManager) else args[1]
+ if task.shared:
+ raise exception.ExclusiveLockRequired()
+ return f(*args, **kwargs)
+ return wrapper
+
+
+def acquire(context, board_id, shared=False, driver_name=None):
+ """Shortcut for acquiring a lock on a Board.
+
+ :param context: Request context.
+ :param board_id: ID or UUID of board to lock.
+ :param shared: Boolean indicating whether to take a shared or exclusive
+ lock. Default: False.
+ :param driver_name: Name of Driver. Default: None.
+ :returns: An instance of :class:`TaskManager`.
+
+ """
+ return TaskManager(context, board_id, shared=shared,
+ driver_name=driver_name)
+
+
+class TaskManager(object):
+ """Context manager for tasks.
+
+ This class wraps the locking, driver loading, and acquisition
+ of related resources (eg, Board and Ports) when beginning a unit of work.
+
+ """
+
+ def __init__(self, context, board_id, shared=False, driver_name=None):
+ """Create a new TaskManager.
+
+ Acquire a lock on a board. The lock can be either shared or
+ exclusive. Shared locks may be used for read-only or
+ non-disruptive actions only, and must be considerate to what
+ other threads may be doing on the same board at the same time.
+
+ :param context: request context
+ :param board_id: ID or UUID of board to lock.
+ :param shared: Boolean indicating whether to take a shared or exclusive
+ lock. Default: False.
+ :param driver_name: The name of the driver to load, if different
+ from the Board's current driver.
+ :raises: DriverNotFound
+ :raises: BoardNotFound
+ :raises: BoardLocked
+
+ """
+
+ self._spawn_method = None
+ self._on_error_method = None
+
+ self.context = context
+ #self.board = None
+ self.board = None
+ self.shared = shared
+
+ self.fsm = states.machine.copy()
+
+ # BoardLocked exceptions can be annoying. Let's try to alleviate
+ # some of that pain by retrying our lock attempts. The retrying
+ # module expects a wait_fixed value in milliseconds.
+ @retrying.retry(
+ retry_on_exception=lambda e: isinstance(e, exception.BoardLocked),
+ stop_max_attempt_number=CONF.conductor.board_locked_retry_attempts,
+ wait_fixed=CONF.conductor.board_locked_retry_interval * 1000)
+ def reserve_board():
+ LOG.debug("Attempting to reserve board %(board)s",
+ {'board': board_id})
+ self.board = objects.Board.reserve(context, CONF.host, board_id)
+
+ try:
+ if not self.shared:
+ reserve_board()
+ else:
+ self.board = objects.Board.get(context, board_id)
+ #self.ports = objects.Port.list_by_board_id(context, self.board.id)
+ #self.driver = driver_factory.get_driver(driver_name or
+ # self.board.driver)
+
+ # NOTE(deva): this handles the Juno-era NOSTATE state
+ # and should be deleted after Kilo is released
+ '''
+ if self.board.provision_state is states.NOSTATE:
+ self.board.provision_state = states.AVAILABLE
+ self.board.save()
+
+ self.fsm.initialize(self.board.provision_state)
+ '''
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.release_resources()
+
+ def spawn_after(self, _spawn_method, *args, **kwargs):
+ """Call this to spawn a thread to complete the task.
+
+ The specified method will be called when the TaskManager instance
+ exits.
+
+ :param _spawn_method: a method that returns a GreenThread object
+ :param args: args passed to the method.
+ :param kwargs: additional kwargs passed to the method.
+
+ """
+ self._spawn_method = _spawn_method
+ self._spawn_args = args
+ self._spawn_kwargs = kwargs
+
+ def set_spawn_error_hook(self, _on_error_method, *args, **kwargs):
+ """Create a hook to handle exceptions when spawning a task.
+
+ Create a hook that gets called upon an exception being raised
+ from spawning a background thread to do a task.
+
+ :param _on_error_method: a callable object, it's first parameter
+ should accept the Exception object that was raised.
+ :param args: additional args passed to the callable object.
+ :param kwargs: additional kwargs passed to the callable object.
+
+ """
+ self._on_error_method = _on_error_method
+ self._on_error_args = args
+ self._on_error_kwargs = kwargs
+
+ def release_resources(self):
+ """Unlock a board and release resources.
+
+ If an exclusive lock is held, unlock the board. Reset attributes
+ to make it clear that this instance of TaskManager should no
+ longer be accessed.
+ """
+
+ if not self.shared:
+ try:
+ if self.board:
+ objects.Board.release(self.context, CONF.host, self.board.id)
+ except exception.BoardNotFound:
+ # squelch the exception if the board was deleted
+ # within the task's context.
+ pass
+ self.board = None
+ self.driver = None
+ self.ports = None
+ self.fsm = None
+
+ def _thread_release_resources(self, t):
+ """Thread.link() callback to release resources."""
+ self.release_resources()
+
+ def process_event(self, event, callback=None, call_args=None,
+ call_kwargs=None, err_handler=None):
+ """Process the given event for the task's current state.
+
+ :param event: the name of the event to process
+ :param callback: optional callback to invoke upon event transition
+ :param call_args: optional \*args to pass to the callback method
+ :param call_kwargs: optional \**kwargs to pass to the callback method
+ :param err_handler: optional error handler to invoke if the
+ callback fails, eg. because there are no workers available
+ (err_handler should accept arguments board, prev_prov_state, and
+ prev_target_state)
+ :raises: InvalidState if the event is not allowed by the associated
+ state machine
+ """
+ # Advance the state model for the given event. Note that this doesn't
+ # alter the board in any way. This may raise InvalidState, if this event
+ # is not allowed in the current state.
+ self.fsm.process_event(event)
+
+ # stash current states in the error handler if callback is set,
+ # in case we fail to get a worker from the pool
+ if err_handler and callback:
+ self.set_spawn_error_hook(err_handler, self.board,
+ self.board.provision_state,
+ self.board.target_provision_state)
+
+ self.board.provision_state = self.fsm.current_state
+ self.board.target_provision_state = self.fsm.target_state
+
+ # set up the async worker
+ if callback:
+ # clear the error if we're going to start work in a callback
+ self.board.last_error = None
+ if call_args is None:
+ call_args = ()
+ if call_kwargs is None:
+ call_kwargs = {}
+ self.spawn_after(callback, *call_args, **call_kwargs)
+
+ # publish the state transition by saving the Board
+ self.board.save()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is None and self._spawn_method is not None:
+ # Spawn a worker to complete the task
+ # The linked callback below will be called whenever:
+ # - background task finished with no errors.
+ # - background task has crashed with exception.
+ # - callback was added after the background task has
+ # finished or crashed. While eventlet currently doesn't
+ # schedule the new thread until the current thread blocks
+ # for some reason, this is true.
+ # All of the above are asserted in tests such that we'll
+ # catch if eventlet ever changes this behavior.
+ thread = None
+ try:
+ thread = self._spawn_method(*self._spawn_args,
+ **self._spawn_kwargs)
+
+ # NOTE(comstud): Trying to use a lambda here causes
+ # the callback to not occur for some reason. This
+ # also makes it easier to test.
+ thread.link(self._thread_release_resources)
+ # Don't unlock! The unlock will occur when the
+ # thread finshes.
+ return
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ try:
+ # Execute the on_error hook if set
+ if self._on_error_method:
+ self._on_error_method(e, *self._on_error_args,
+ **self._on_error_kwargs)
+ except Exception:
+ LOG.warning(_LW("Task's on_error hook failed to "
+ "call %(method)s on board %(board)s"),
+ {'method': self._on_error_method.__name__,
+ 'board': self.board.uuid})
+
+ if thread is not None:
+ # This means the link() failed for some
+ # reason. Nuke the thread.
+ thread.cancel()
+ self.release_resources()
+ self.release_resources()
diff --git a/iotronic/conductor/utils.py b/iotronic/conductor/utils.py
new file mode 100644
index 0000000..31a36ab
--- /dev/null
+++ b/iotronic/conductor/utils.py
@@ -0,0 +1,160 @@
+# coding=utf-8
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from oslo_utils import excutils
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LI
+from iotronic.common.i18n import _LW
+from iotronic.common import states
+from iotronic.conductor import task_manager
+
+LOG = log.getLogger(__name__)
+
+
+@task_manager.require_exclusive_lock
+def node_set_boot_device(task, device, persistent=False):
+ """Set the boot device for a node.
+
+ :param task: a TaskManager instance.
+ :param device: Boot device. Values are vendor-specific.
+ :param persistent: Whether to set next-boot, or make the change
+ permanent. Default: False.
+ :raises: InvalidParameterValue if the validation of the
+ ManagementInterface fails.
+
+ """
+ if getattr(task.driver, 'management', None):
+ task.driver.management.validate(task)
+ task.driver.management.set_boot_device(task,
+ device=device,
+ persistent=persistent)
+
+
+@task_manager.require_exclusive_lock
+def node_power_action(task, new_state):
+ """Change power state or reset for a node.
+
+ Perform the requested power action if the transition is required.
+
+ :param task: a TaskManager instance containing the node to act on.
+ :param new_state: Any power state from iotronic.common.states. If the
+ state is 'REBOOT' then a reboot will be attempted, otherwise
+ the node power state is directly set to 'state'.
+ :raises: InvalidParameterValue when the wrong state is specified
+ or the wrong driver info is specified.
+ :raises: other exceptions by the node's power driver if something
+ wrong occurred during the power action.
+
+ """
+ node = task.node
+ target_state = states.POWER_ON if new_state == states.REBOOT else new_state
+
+ if new_state != states.REBOOT:
+ try:
+ curr_state = task.driver.power.get_power_state(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ node['last_error'] = _(
+ "Failed to change power state to '%(target)s'. "
+ "Error: %(error)s") % {'target': new_state, 'error': e}
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+
+ if curr_state == new_state:
+ # Neither the iotronic service nor the hardware has erred. The
+ # node is, for some reason, already in the requested state,
+ # though we don't know why. eg, perhaps the user previously
+ # requested the node POWER_ON, the network delayed those IPMI
+ # packets, and they are trying again -- but the node finally
+ # responds to the first request, and so the second request
+ # gets to this check and stops.
+ # This isn't an error, so we'll clear last_error field
+ # (from previous operation), log a warning, and return.
+ node['last_error'] = None
+ # NOTE(dtantsur): under rare conditions we can get out of sync here
+ node['power_state'] = new_state
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+ LOG.warn(_LW("Not going to change_node_power_state because "
+ "current state = requested state = '%(state)s'."),
+ {'state': curr_state})
+ return
+
+ if curr_state == states.ERROR:
+ # be optimistic and continue action
+ LOG.warn(_LW("Driver returns ERROR power state for node %s."),
+ node.uuid)
+
+ # Set the target_power_state and clear any last_error, if we're
+ # starting a new operation. This will expose to other processes
+ # and clients that work is in progress.
+ if node['target_power_state'] != target_state:
+ node['target_power_state'] = target_state
+ node['last_error'] = None
+ node.save()
+
+ # take power action
+ try:
+ if new_state != states.REBOOT:
+ task.driver.power.set_power_state(task, new_state)
+ else:
+ task.driver.power.reboot(task)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ node['last_error'] = _(
+ "Failed to change power state to '%(target)s'. "
+ "Error: %(error)s") % {'target': target_state, 'error': e}
+ else:
+ # success!
+ node['power_state'] = target_state
+ LOG.info(_LI('Successfully set node %(node)s power state to '
+ '%(state)s.'),
+ {'node': node.uuid, 'state': target_state})
+ finally:
+ node['target_power_state'] = states.NOSTATE
+ node.save()
+
+
+@task_manager.require_exclusive_lock
+def cleanup_after_timeout(task):
+ """Cleanup deploy task after timeout.
+
+ :param task: a TaskManager instance.
+ """
+ node = task.node
+ msg = (_('Timeout reached while waiting for callback for node %s')
+ % node.uuid)
+ node.last_error = msg
+ LOG.error(msg)
+ node.save()
+
+ error_msg = _('Cleanup failed for node %(node)s after deploy timeout: '
+ ' %(error)s')
+ try:
+ task.driver.deploy.clean_up(task)
+ except exception.IotronicException as e:
+ msg = error_msg % {'node': node.uuid, 'error': e}
+ LOG.error(msg)
+ node.last_error = msg
+ node.save()
+ except Exception as e:
+ msg = error_msg % {'node': node.uuid, 'error': e}
+ LOG.error(msg)
+ node.last_error = _('Deploy timed out, but an unhandled exception was '
+ 'encountered while aborting. More info may be '
+ 'found in the log file.')
+ node.save()
diff --git a/iotronic/db/__init__.py b/iotronic/db/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/db/api.py b/iotronic/db/api.py
new file mode 100644
index 0000000..9af30a3
--- /dev/null
+++ b/iotronic/db/api.py
@@ -0,0 +1,488 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Base classes for storage engines
+"""
+
+import abc
+
+from oslo_config import cfg
+from oslo_db import api as db_api
+import six
+
+
+_BACKEND_MAPPING = {'sqlalchemy': 'iotronic.db.sqlalchemy.api'}
+IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
+ lazy=True)
+
+
+def get_instance():
+ """Return a DB API instance."""
+ return IMPL
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Connection(object):
+ """Base class for storage system connections."""
+
+ @abc.abstractmethod
+ def __init__(self):
+ """Constructor."""
+
+ @abc.abstractmethod
+ def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
+ marker=None, sort_key=None, sort_dir=None):
+ """Get specific columns for matching nodes.
+
+ Return a list of the specified columns for all nodes that match the
+ specified filters.
+
+ :param columns: List of column names to return.
+ Defaults to 'id' column when columns == None.
+ :param filters: Filters to apply. Defaults to None.
+
+ :associated: True | False
+ :reserved: True | False
+ :maintenance: True | False
+ :chassis_uuid: uuid of chassis
+ :driver: driver's name
+ :provision_state: provision state of node
+ :provisioned_before:
+ nodes with provision_updated_at field before this
+ interval in seconds
+ :param limit: Maximum number of nodes to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: direction in which results should be sorted.
+ (asc, desc)
+ :returns: A list of tuples of the specified columns.
+ """
+
+ @abc.abstractmethod
+ def get_node_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of nodes.
+
+ :param filters: Filters to apply. Defaults to None.
+
+ :associated: True | False
+ :reserved: True | False
+ :maintenance: True | False
+ :chassis_uuid: uuid of chassis
+ :driver: driver's name
+ :provision_state: provision state of node
+ :provisioned_before:
+ nodes with provision_updated_at field before this
+ interval in seconds
+ :param limit: Maximum number of nodes to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: direction in which results should be sorted.
+ (asc, desc)
+ """
+
+ @abc.abstractmethod
+ def reserve_node(self, tag, node_id):
+ """Reserve a node.
+
+ To prevent other ManagerServices from manipulating the given
+ Node while a Task is performed, mark it reserved by this host.
+
+ :param tag: A string uniquely identifying the reservation holder.
+ :param node_id: A node id or uuid.
+ :returns: A Node object.
+ :raises: NodeNotFound if the node is not found.
+ :raises: NodeLocked if the node is already reserved.
+ """
+
+ @abc.abstractmethod
+ def release_node(self, tag, node_id):
+ """Release the reservation on a node.
+
+ :param tag: A string uniquely identifying the reservation holder.
+ :param node_id: A node id or uuid.
+ :raises: NodeNotFound if the node is not found.
+ :raises: NodeLocked if the node is reserved by another host.
+ :raises: NodeNotLocked if the node was found to not have a
+ reservation at all.
+ """
+
+ @abc.abstractmethod
+ def create_node(self, values):
+ """Create a new node.
+
+ :param values: A dict containing several items used to identify
+ and track the node, and several dicts which are passed
+ into the Drivers when managing this node. For example:
+
+ ::
+
+ {
+ 'uuid': uuidutils.generate_uuid(),
+ 'instance_uuid': None,
+ 'power_state': states.POWER_OFF,
+ 'provision_state': states.AVAILABLE,
+ 'driver': 'pxe_ipmitool',
+ 'driver_info': { ... },
+ 'properties': { ... },
+ 'extra': { ... },
+ }
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def get_node_by_id(self, node_id):
+ """Return a node.
+
+ :param node_id: The id of a node.
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def get_node_by_uuid(self, node_uuid):
+ """Return a node.
+
+ :param node_uuid: The uuid of a node.
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def get_node_by_name(self, node_name):
+ """Return a node.
+
+ :param node_name: The logical name of a node.
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def get_node_by_instance(self, instance):
+ """Return a node.
+
+ :param instance: The instance name or uuid to search for.
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def destroy_node(self, node_id):
+ """Destroy a node and all associated interfaces.
+
+ :param node_id: The id or uuid of a node.
+ """
+
+ @abc.abstractmethod
+ def update_node(self, node_id, values):
+ """Update properties of a node.
+
+ :param node_id: The id or uuid of a node.
+ :param values: Dict of values to update.
+ May be a partial list, eg. when setting the
+ properties for a driver. For example:
+
+ ::
+
+ {
+ 'driver_info':
+ {
+ 'my-field-1': val1,
+ 'my-field-2': val2,
+ }
+ }
+ :returns: A node.
+ :raises: NodeAssociated
+ :raises: NodeNotFound
+ """
+
+ @abc.abstractmethod
+ def get_port_by_id(self, port_id):
+ """Return a network port representation.
+
+ :param port_id: The id of a port.
+ :returns: A port.
+ """
+
+ @abc.abstractmethod
+ def get_port_by_uuid(self, port_uuid):
+ """Return a network port representation.
+
+ :param port_uuid: The uuid of a port.
+ :returns: A port.
+ """
+
+ @abc.abstractmethod
+ def get_port_by_address(self, address):
+ """Return a network port representation.
+
+ :param address: The MAC address of a port.
+ :returns: A port.
+ """
+
+ @abc.abstractmethod
+ def get_port_list(self, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of ports.
+
+ :param limit: Maximum number of ports to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: direction in which results should be sorted.
+ (asc, desc)
+ """
+
+ @abc.abstractmethod
+ def get_ports_by_node_id(self, node_id, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """List all the ports for a given node.
+
+ :param node_id: The integer node ID.
+ :param limit: Maximum number of ports to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted
+ :param sort_dir: direction in which results should be sorted
+ (asc, desc)
+ :returns: A list of ports.
+ """
+
+ @abc.abstractmethod
+ def create_port(self, values):
+ """Create a new port.
+
+ :param values: Dict of values.
+ """
+
+ @abc.abstractmethod
+ def update_port(self, port_id, values):
+ """Update properties of an port.
+
+ :param port_id: The id or MAC of a port.
+ :param values: Dict of values to update.
+ :returns: A port.
+ """
+
+ @abc.abstractmethod
+ def destroy_port(self, port_id):
+ """Destroy an port.
+
+ :param port_id: The id or MAC of a port.
+ """
+
+ @abc.abstractmethod
+ def create_chassis(self, values):
+ """Create a new chassis.
+
+ :param values: Dict of values.
+ """
+
+ @abc.abstractmethod
+ def get_chassis_by_id(self, chassis_id):
+ """Return a chassis representation.
+
+ :param chassis_id: The id of a chassis.
+ :returns: A chassis.
+ """
+
+ @abc.abstractmethod
+ def get_chassis_by_uuid(self, chassis_uuid):
+ """Return a chassis representation.
+
+ :param chassis_uuid: The uuid of a chassis.
+ :returns: A chassis.
+ """
+
+ @abc.abstractmethod
+ def get_chassis_list(self, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of chassis.
+
+ :param limit: Maximum number of chassis to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: direction in which results should be sorted.
+ (asc, desc)
+ """
+
+ @abc.abstractmethod
+ def update_chassis(self, chassis_id, values):
+ """Update properties of an chassis.
+
+ :param chassis_id: The id or the uuid of a chassis.
+ :param values: Dict of values to update.
+ :returns: A chassis.
+ """
+
+ @abc.abstractmethod
+ def destroy_chassis(self, chassis_id):
+ """Destroy a chassis.
+
+ :param chassis_id: The id or the uuid of a chassis.
+ """
+
+ @abc.abstractmethod
+ def register_conductor(self, values, update_existing=False):
+ """Register an active conductor with the cluster.
+
+ :param values: A dict of values which must contain the following:
+
+ ::
+
+ {
+ 'hostname': the unique hostname which identifies
+ this Conductor service.
+ 'drivers': a list of supported drivers.
+ }
+ :param update_existing: When false, registration will raise an
+ exception when a conflicting online record
+ is found. When true, will overwrite the
+ existing record. Default: False.
+ :returns: A conductor.
+ :raises: ConductorAlreadyRegistered
+ """
+
+ @abc.abstractmethod
+ def get_conductor(self, hostname):
+ """Retrieve a conductor's service record from the database.
+
+ :param hostname: The hostname of the conductor service.
+ :returns: A conductor.
+ :raises: ConductorNotFound
+ """
+
+ @abc.abstractmethod
+ def unregister_conductor(self, hostname):
+ """Remove this conductor from the service registry immediately.
+
+ :param hostname: The hostname of this conductor service.
+ :raises: ConductorNotFound
+ """
+
+ @abc.abstractmethod
+ def touch_conductor(self, hostname):
+ """Mark a conductor as active by updating its 'updated_at' property.
+
+ :param hostname: The hostname of this conductor service.
+ :raises: ConductorNotFound
+ """
+
+ @abc.abstractmethod
+ def get_active_driver_dict(self, interval):
+ """Retrieve drivers for the registered and active conductors.
+
+ :param interval: Seconds since last check-in of a conductor.
+ :returns: A dict which maps driver names to the set of hosts
+ which support them. For example:
+
+ ::
+
+ {driverA: set([host1, host2]),
+ driverB: set([host2, host3])}
+ """
+
+
+
+
+###################### NEW #############################
+
+ @abc.abstractmethod
+ def get_board_by_uuid(self, node_uuid):
+ """Return a node.
+
+ :param node_uuid: The uuid of a node.
+ :returns: A node.
+ """
+
+ @abc.abstractmethod
+ def get_board_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of nodes.
+
+ :param filters: Filters to apply. Defaults to None.
+
+ :associated: True | False
+ :reserved: True | False
+ :maintenance: True | False
+ :chassis_uuid: uuid of chassis
+ :driver: driver's name
+ :provision_state: provision state of node
+ :provisioned_before:
+ nodes with provision_updated_at field before this
+ interval in seconds
+ :param limit: Maximum number of nodes to return.
+ :param marker: the last item of the previous page; we return the next
+ result set.
+ :param sort_key: Attribute by which results should be sorted.
+ :param sort_dir: direction in which results should be sorted.
+ (asc, desc)
+ """
+
+ @abc.abstractmethod
+ def reserve_board(self, tag, board_id):
+ """Reserve a board.
+
+ To prevent other ManagerServices from manipulating the given
+ Board while a Task is performed, mark it reserved by this host.
+
+ :param tag: A string uniquely identifying the reservation holder.
+ :param board_id: A board id or uuid.
+ :returns: A Board object.
+ :raises: BoardNotFound if the board is not found.
+ :raises: BoardLocked if the board is already reserved.
+ """
+
+ @abc.abstractmethod
+ def release_board(self, tag, board_id):
+ """Release the reservation on a board.
+
+ :param tag: A string uniquely identifying the reservation holder.
+ :param board_id: A board id or uuid.
+ :raises: BoardNotFound if the board is not found.
+ :raises: BoardLocked if the board is reserved by another host.
+ :raises: BoardNotLocked if the board was found to not have a
+ reservation at all.
+ """
+
+ @abc.abstractmethod
+ def destroy_board(self, board_id):
+ """Destroy a board and all associated interfaces.
+
+ :param board_id: The id or uuid of a board.
+ """
+
+ @abc.abstractmethod
+ def create_board(self, values):
+ """Create a new board.
+
+ :param values: A dict containing several items used to identify
+ and track the board, and several dicts which are passed
+ into the Drivers when managing this board. For example:
+
+ ::
+
+ {
+ 'uuid': uuidutils.generate_uuid(),
+ 'instance_uuid': None,
+ 'power_state': states.POWER_OFF,
+ 'provision_state': states.AVAILABLE,
+ 'driver': 'pxe_ipmitool',
+ 'driver_info': { ... },
+ 'properties': { ... },
+ 'extra': { ... },
+ }
+ :returns: A board.
+ """
\ No newline at end of file
diff --git a/iotronic/db/migration.py b/iotronic/db/migration.py
new file mode 100644
index 0000000..6dd66e9
--- /dev/null
+++ b/iotronic/db/migration.py
@@ -0,0 +1,56 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Database setup and migration commands."""
+
+from oslo_config import cfg
+from stevedore import driver
+
+_IMPL = None
+
+
+def get_backend():
+ global _IMPL
+ if not _IMPL:
+ cfg.CONF.import_opt('backend', 'oslo_db.options', group='database')
+ _IMPL = driver.DriverManager("iotronic.database.migration_backend",
+ cfg.CONF.database.backend).driver
+ return _IMPL
+
+
+def upgrade(version=None):
+ """Migrate the database to `version` or the most recent version."""
+ return get_backend().upgrade(version)
+
+
+def downgrade(version=None):
+ return get_backend().downgrade(version)
+
+
+def version():
+ return get_backend().version()
+
+
+def stamp(version):
+ return get_backend().stamp(version)
+
+
+def revision(message, autogenerate):
+ return get_backend().revision(message, autogenerate)
+
+
+def create_schema():
+ return get_backend().create_schema()
diff --git a/iotronic/db/sqlalchemy/__init__.py b/iotronic/db/sqlalchemy/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/db/sqlalchemy/alembic.ini b/iotronic/db/sqlalchemy/alembic.ini
new file mode 100644
index 0000000..a768980
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic.ini
@@ -0,0 +1,54 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = %(here)s/alembic
+
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# max length of characters to apply to the
+# "slug" field
+#truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+#sqlalchemy.url = driver://user:pass@localhost/dbname
+
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/iotronic/db/sqlalchemy/alembic/README b/iotronic/db/sqlalchemy/alembic/README
new file mode 100644
index 0000000..d7e941b
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/README
@@ -0,0 +1,16 @@
+Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation
+
+To create alembic migrations use:
+$ iotronic-dbsync revision --message --autogenerate
+
+Stamp db with most recent migration version, without actually running migrations
+$ iotronic-dbsync stamp --revision head
+
+Upgrade can be performed by:
+$ iotronic-dbsync - for backward compatibility
+$ iotronic-dbsync upgrade
+# iotronic-dbsync upgrade --revision head
+
+Downgrading db:
+$ iotronic-dbsync downgrade
+$ iotronic-dbsync downgrade --revision base
diff --git a/iotronic/db/sqlalchemy/alembic/env.py b/iotronic/db/sqlalchemy/alembic/env.py
new file mode 100644
index 0000000..464ff98
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/env.py
@@ -0,0 +1,61 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from logging import config as log_config
+
+from alembic import context
+
+try:
+ # NOTE(whaom): This is to register the DB2 alembic code which
+ # is an optional runtime dependency.
+ from ibm_db_alembic.ibm_db import IbmDbImpl # noqa
+except ImportError:
+ pass
+
+from iotronic.db.sqlalchemy import api as sqla_api
+from iotronic.db.sqlalchemy import models
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+log_config.fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+target_metadata = models.Base.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_online():
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+ engine = sqla_api.get_engine()
+ with engine.connect() as connection:
+ context.configure(connection=connection,
+ target_metadata=target_metadata)
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+run_migrations_online()
diff --git a/iotronic/db/sqlalchemy/alembic/script.py.mako b/iotronic/db/sqlalchemy/alembic/script.py.mako
new file mode 100644
index 0000000..9570201
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/script.py.mako
@@ -0,0 +1,22 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision}
+Create Date: ${create_date}
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+def upgrade():
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade():
+ ${downgrades if downgrades else "pass"}
diff --git a/iotronic/db/sqlalchemy/alembic/versions/1e1d5ace7dc6_add_inspection_started_at_and_.py b/iotronic/db/sqlalchemy/alembic/versions/1e1d5ace7dc6_add_inspection_started_at_and_.py
new file mode 100644
index 0000000..58d8954
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/1e1d5ace7dc6_add_inspection_started_at_and_.py
@@ -0,0 +1,40 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add inspection_started_at and inspection_finished_at
+
+Revision ID: 1e1d5ace7dc6
+Revises: 3ae36a5f5131
+Create Date: 2015-02-26 10:46:46.861927
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '1e1d5ace7dc6'
+down_revision = '3ae36a5f5131'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('inspection_started_at',
+ sa.DateTime(),
+ nullable=True))
+ op.add_column('nodes', sa.Column('inspection_finished_at',
+ sa.DateTime(),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_column('nodes', 'inspection_started_at')
+ op.drop_column('nodes', 'inspection_finished_at')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/21b331f883ef_add_provision_updated_at.py b/iotronic/db/sqlalchemy/alembic/versions/21b331f883ef_add_provision_updated_at.py
new file mode 100644
index 0000000..12eb08f
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/21b331f883ef_add_provision_updated_at.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add provision_updated_at
+
+Revision ID: 21b331f883ef
+Revises: 2581ebaf0cb2
+Create Date: 2014-02-19 13:45:30.150632
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '21b331f883ef'
+down_revision = '2581ebaf0cb2'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('provision_updated_at', sa.DateTime(),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_column('nodes', 'provision_updated_at')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/242cc6a923b3_add_node_maintenance_reason.py b/iotronic/db/sqlalchemy/alembic/versions/242cc6a923b3_add_node_maintenance_reason.py
new file mode 100644
index 0000000..57d1987
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/242cc6a923b3_add_node_maintenance_reason.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add Node.maintenance_reason
+
+Revision ID: 242cc6a923b3
+Revises: 487deb87cc9d
+Create Date: 2014-10-15 23:00:43.164061
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '242cc6a923b3'
+down_revision = '487deb87cc9d'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('maintenance_reason',
+ sa.Text(),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_column('nodes', 'maintenance_reason')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py b/iotronic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
new file mode 100644
index 0000000..868b8db
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/2581ebaf0cb2_initial_migration.py
@@ -0,0 +1,106 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""initial migration
+
+Revision ID: 2581ebaf0cb2
+Revises: None
+Create Date: 2014-01-17 12:14:07.754448
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2581ebaf0cb2'
+down_revision = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ # commands auto generated by Alembic - please adjust!
+ op.create_table(
+ 'conductors',
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('hostname', sa.String(length=255), nullable=False),
+ sa.Column('drivers', sa.Text(), nullable=True),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
+ mysql_ENGINE='InnoDB',
+ mysql_DEFAULT_CHARSET='UTF8'
+ )
+ op.create_table(
+ 'chassis',
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=True),
+ sa.Column('extra', sa.Text(), nullable=True),
+ sa.Column('description', sa.String(length=255), nullable=True),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
+ mysql_ENGINE='InnoDB',
+ mysql_DEFAULT_CHARSET='UTF8'
+ )
+ op.create_table(
+ 'nodes',
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=True),
+ sa.Column('instance_uuid', sa.String(length=36), nullable=True),
+ sa.Column('chassis_id', sa.Integer(), nullable=True),
+ sa.Column('power_state', sa.String(length=15), nullable=True),
+ sa.Column('target_power_state', sa.String(length=15), nullable=True),
+ sa.Column('provision_state', sa.String(length=15), nullable=True),
+ sa.Column('target_provision_state', sa.String(length=15),
+ nullable=True),
+ sa.Column('last_error', sa.Text(), nullable=True),
+ sa.Column('properties', sa.Text(), nullable=True),
+ sa.Column('driver', sa.String(length=15), nullable=True),
+ sa.Column('driver_info', sa.Text(), nullable=True),
+ sa.Column('reservation', sa.String(length=255), nullable=True),
+ sa.Column('maintenance', sa.Boolean(), nullable=True),
+ sa.Column('extra', sa.Text(), nullable=True),
+ sa.ForeignKeyConstraint(['chassis_id'], ['chassis.id'], ),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
+ mysql_ENGINE='InnoDB',
+ mysql_DEFAULT_CHARSET='UTF8'
+ )
+ op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'],
+ unique=False)
+ op.create_table(
+ 'ports',
+ sa.Column('created_at', sa.DateTime(), nullable=True),
+ sa.Column('updated_at', sa.DateTime(), nullable=True),
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('uuid', sa.String(length=36), nullable=True),
+ sa.Column('address', sa.String(length=18), nullable=True),
+ sa.Column('node_id', sa.Integer(), nullable=True),
+ sa.Column('extra', sa.Text(), nullable=True),
+ sa.ForeignKeyConstraint(['node_id'], ['nodes.id'], ),
+ sa.PrimaryKeyConstraint('id'),
+ sa.UniqueConstraint('address', name='uniq_ports0address'),
+ sa.UniqueConstraint('uuid', name='uniq_ports0uuid'),
+ mysql_ENGINE='InnoDB',
+ mysql_DEFAULT_CHARSET='UTF8'
+ )
+ # end Alembic commands
+
+
+def downgrade():
+ raise NotImplementedError(('Downgrade from initial migration is'
+ ' unsupported.'))
diff --git a/iotronic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py b/iotronic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py
new file mode 100644
index 0000000..d73e229
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/2fb93ffd2af1_increase_node_name_length.py
@@ -0,0 +1,42 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""increase-node-name-length
+
+Revision ID: 2fb93ffd2af1
+Revises: 4f399b21ae71
+Create Date: 2015-03-18 17:08:11.470791
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2fb93ffd2af1'
+down_revision = '4f399b21ae71'
+
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import mysql
+
+
+def upgrade():
+ op.alter_column('nodes', 'name',
+ existing_type=mysql.VARCHAR(length=63),
+ type_=sa.String(length=255),
+ existing_nullable=True)
+
+
+def downgrade():
+ op.alter_column('nodes', 'name',
+ existing_type=sa.String(length=255),
+ type_=mysql.VARCHAR(length=63),
+ existing_nullable=True)
diff --git a/iotronic/db/sqlalchemy/alembic/versions/31baaf680d2b_add_node_instance_info.py b/iotronic/db/sqlalchemy/alembic/versions/31baaf680d2b_add_node_instance_info.py
new file mode 100644
index 0000000..1e7b5d4
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/31baaf680d2b_add_node_instance_info.py
@@ -0,0 +1,40 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add Node instance info
+
+Revision ID: 31baaf680d2b
+Revises: 3cb628139ea4
+Create Date: 2014-03-05 21:09:32.372463
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '31baaf680d2b'
+down_revision = '3cb628139ea4'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ # commands auto generated by Alembic - please adjust
+ op.add_column('nodes', sa.Column('instance_info',
+ sa.Text(),
+ nullable=True))
+ # end Alembic commands
+
+
+def downgrade():
+ # commands auto generated by Alembic - please adjust
+ op.drop_column('nodes', 'instance_info')
+ # end Alembic commands
diff --git a/iotronic/db/sqlalchemy/alembic/versions/3ae36a5f5131_add_logical_name.py b/iotronic/db/sqlalchemy/alembic/versions/3ae36a5f5131_add_logical_name.py
new file mode 100644
index 0000000..58d58bd
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/3ae36a5f5131_add_logical_name.py
@@ -0,0 +1,37 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add_logical_name
+
+Revision ID: 3ae36a5f5131
+Revises: bb59b63f55a
+Create Date: 2014-12-10 14:27:26.323540
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3ae36a5f5131'
+down_revision = 'bb59b63f55a'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('name', sa.String(length=63),
+ nullable=True))
+ op.create_unique_constraint('uniq_nodes0name', 'nodes', ['name'])
+
+
+def downgrade():
+ op.drop_constraint('uniq_nodes0name', 'nodes', type_='unique')
+ op.drop_column('nodes', 'name')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/3bea56f25597_add_unique_constraint_to_instance_uuid.py b/iotronic/db/sqlalchemy/alembic/versions/3bea56f25597_add_unique_constraint_to_instance_uuid.py
new file mode 100644
index 0000000..6f92ded
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/3bea56f25597_add_unique_constraint_to_instance_uuid.py
@@ -0,0 +1,39 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add unique constraint to instance_uuid
+
+Revision ID: 3bea56f25597
+Revises: 31baaf680d2b
+Create Date: 2014-06-05 11:45:07.046670
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3bea56f25597'
+down_revision = '31baaf680d2b'
+
+from alembic import op
+
+
+def upgrade():
+ op.create_unique_constraint("uniq_nodes0instance_uuid", "nodes",
+ ["instance_uuid"])
+ op.drop_index('node_instance_uuid', 'nodes')
+
+
+def downgrade():
+ op.drop_constraint("uniq_nodes0instance_uuid", "nodes", type_='unique')
+ op.create_index('node_instance_uuid', 'nodes', ['instance_uuid'])
diff --git a/iotronic/db/sqlalchemy/alembic/versions/3cb628139ea4_nodes_add_console_enabled.py b/iotronic/db/sqlalchemy/alembic/versions/3cb628139ea4_nodes_add_console_enabled.py
new file mode 100644
index 0000000..fb698f1
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/3cb628139ea4_nodes_add_console_enabled.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Nodes add console enabled
+
+Revision ID: 3cb628139ea4
+Revises: 21b331f883ef
+Create Date: 2014-02-26 11:24:11.318023
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '3cb628139ea4'
+down_revision = '21b331f883ef'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('console_enabled', sa.Boolean))
+
+
+def downgrade():
+ op.drop_column('nodes', 'console_enabled')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/487deb87cc9d_add_conductor_affinity_and_online.py b/iotronic/db/sqlalchemy/alembic/versions/487deb87cc9d_add_conductor_affinity_and_online.py
new file mode 100644
index 0000000..e02b450
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/487deb87cc9d_add_conductor_affinity_and_online.py
@@ -0,0 +1,45 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add conductor_affinity and online
+
+Revision ID: 487deb87cc9d
+Revises: 3bea56f25597
+Create Date: 2014-09-26 16:16:30.988900
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '487deb87cc9d'
+down_revision = '3bea56f25597'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column(
+ 'conductors',
+ sa.Column('online', sa.Boolean(), default=True))
+ op.add_column(
+ 'nodes',
+ sa.Column('conductor_affinity', sa.Integer(),
+ sa.ForeignKey('conductors.id',
+ name='nodes_conductor_affinity_fk'),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_constraint('nodes_conductor_affinity_fk', 'nodes',
+ type_='foreignkey')
+ op.drop_column('nodes', 'conductor_affinity')
+ op.drop_column('conductors', 'online')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/4f399b21ae71_add_node_clean_step.py b/iotronic/db/sqlalchemy/alembic/versions/4f399b21ae71_add_node_clean_step.py
new file mode 100644
index 0000000..a8c5798
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/4f399b21ae71_add_node_clean_step.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add node.clean_step
+
+Revision ID: 4f399b21ae71
+Revises: 1e1d5ace7dc6
+Create Date: 2015-02-18 01:21:46.062311
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '4f399b21ae71'
+down_revision = '1e1d5ace7dc6'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('clean_step', sa.Text(),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_column('nodes', 'clean_step')
diff --git a/iotronic/db/sqlalchemy/alembic/versions/5674c57409b9_replace_nostate_with_available.py b/iotronic/db/sqlalchemy/alembic/versions/5674c57409b9_replace_nostate_with_available.py
new file mode 100644
index 0000000..28f5987
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/5674c57409b9_replace_nostate_with_available.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""replace NOSTATE with AVAILABLE
+
+Revision ID: 5674c57409b9
+Revises: 242cc6a923b3
+Create Date: 2015-01-14 16:55:44.718196
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '5674c57409b9'
+down_revision = '242cc6a923b3'
+
+from alembic import op
+from sqlalchemy import String
+from sqlalchemy.sql import table, column
+
+node = table('nodes',
+ column('uuid', String(36)),
+ column('provision_state', String(15)))
+
+
+# NOTE(deva): We must represent the states as static strings in this migration
+# file, rather than import iotronic.common.states, because that file may change
+# in the future. This migration script must still be able to be run with
+# future versions of the code and still produce the same results.
+AVAILABLE = 'available'
+
+
+def upgrade():
+ op.execute(
+ node.update().where(
+ node.c.provision_state == None).values(
+ {'provision_state': op.inline_literal(AVAILABLE)}))
+
+
+def downgrade():
+ op.execute(
+ node.update().where(
+ node.c.provision_state == op.inline_literal(AVAILABLE)).values(
+ {'provision_state': None}))
diff --git a/iotronic/db/sqlalchemy/alembic/versions/bb59b63f55a_add_node_driver_internal_info.py b/iotronic/db/sqlalchemy/alembic/versions/bb59b63f55a_add_node_driver_internal_info.py
new file mode 100644
index 0000000..46b1aa7
--- /dev/null
+++ b/iotronic/db/sqlalchemy/alembic/versions/bb59b63f55a_add_node_driver_internal_info.py
@@ -0,0 +1,36 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""add_node_driver_internal_info
+
+Revision ID: bb59b63f55a
+Revises: 5674c57409b9
+Create Date: 2015-01-28 14:28:22.212790
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'bb59b63f55a'
+down_revision = '5674c57409b9'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.add_column('nodes', sa.Column('driver_internal_info',
+ sa.Text(),
+ nullable=True))
+
+
+def downgrade():
+ op.drop_column('nodes', 'driver_internal_info')
diff --git a/iotronic/db/sqlalchemy/api.py b/iotronic/db/sqlalchemy/api.py
new file mode 100644
index 0000000..f557aad
--- /dev/null
+++ b/iotronic/db/sqlalchemy/api.py
@@ -0,0 +1,741 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""SQLAlchemy storage backend."""
+
+import collections
+import datetime
+
+from oslo_config import cfg
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import session as db_session
+from oslo_db.sqlalchemy import utils as db_utils
+from oslo_log import log
+from oslo_utils import strutils
+from oslo_utils import timeutils
+from oslo_utils import uuidutils
+from sqlalchemy.orm.exc import NoResultFound
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LW
+from iotronic.common import states
+from iotronic.common import utils
+from iotronic.db import api
+from iotronic.db.sqlalchemy import models
+
+CONF = cfg.CONF
+CONF.import_opt('heartbeat_timeout',
+ 'iotronic.conductor.manager',
+ group='conductor')
+
+LOG = log.getLogger(__name__)
+
+
+_FACADE = None
+
+
+def _create_facade_lazily():
+ global _FACADE
+ if _FACADE is None:
+ _FACADE = db_session.EngineFacade.from_config(CONF)
+ return _FACADE
+
+
+def get_engine():
+ facade = _create_facade_lazily()
+ return facade.get_engine()
+
+
+def get_session(**kwargs):
+ facade = _create_facade_lazily()
+ return facade.get_session(**kwargs)
+
+
+def get_backend():
+ """The backend is this module itself."""
+ return Connection()
+
+
+def model_query(model, *args, **kwargs):
+ """Query helper for simpler session usage.
+
+ :param session: if present, the session to use
+ """
+
+ session = kwargs.get('session') or get_session()
+ query = session.query(model, *args)
+ return query
+
+
+def add_identity_filter(query, value):
+ """Adds an identity filter to a query.
+
+ Filters results by ID, if supplied value is a valid integer.
+ Otherwise attempts to filter results by UUID.
+
+ :param query: Initial query to add filter to.
+ :param value: Value for filtering results by.
+ :return: Modified query.
+ """
+ if strutils.is_int_like(value):
+ return query.filter_by(id=value)
+ elif uuidutils.is_uuid_like(value):
+ return query.filter_by(uuid=value)
+ else:
+ raise exception.InvalidIdentity(identity=value)
+
+
+def add_port_filter(query, value):
+ """Adds a port-specific filter to a query.
+
+ Filters results by address, if supplied value is a valid MAC
+ address. Otherwise attempts to filter results by identity.
+
+ :param query: Initial query to add filter to.
+ :param value: Value for filtering results by.
+ :return: Modified query.
+ """
+ if utils.is_valid_mac(value):
+ return query.filter_by(address=value)
+ else:
+ return add_identity_filter(query, value)
+
+
+def add_port_filter_by_node(query, value):
+ if strutils.is_int_like(value):
+ return query.filter_by(node_id=value)
+ else:
+ query = query.join(models.Board,
+ models.Port.node_id == models.Board.id)
+ return query.filter(models.Board.uuid == value)
+
+
+def add_node_filter_by_chassis(query, value):
+ if strutils.is_int_like(value):
+ return query.filter_by(chassis_id=value)
+ else:
+ query = query.join(models.Chassis,
+ models.Board.chassis_id == models.Chassis.id)
+ return query.filter(models.Chassis.uuid == value)
+
+
+def _paginate_query(model, limit=None, marker=None, sort_key=None,
+ sort_dir=None, query=None):
+ if not query:
+ query = model_query(model)
+ sort_keys = ['id']
+ if sort_key and sort_key not in sort_keys:
+ sort_keys.insert(0, sort_key)
+ try:
+ query = db_utils.paginate_query(query, model, limit, sort_keys,
+ marker=marker, sort_dir=sort_dir)
+ except db_exc.InvalidSortKey:
+ raise exception.InvalidParameterValue(
+ _('The sort_key value "%(key)s" is an invalid field for sorting')
+ % {'key': sort_key})
+ return query.all()
+
+
+class Connection(api.Connection):
+ """SqlAlchemy connection."""
+
+ def __init__(self):
+ pass
+
+ def _add_nodes_filters(self, query, filters):
+ if filters is None:
+ filters = []
+
+ if 'chassis_uuid' in filters:
+ # get_chassis_by_uuid() to raise an exception if the chassis
+ # is not found
+ chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid'])
+ query = query.filter_by(chassis_id=chassis_obj.id)
+ if 'associated' in filters:
+ if filters['associated']:
+ query = query.filter(models.Board.instance_uuid != None)
+ else:
+ query = query.filter(models.Board.instance_uuid == None)
+ if 'reserved' in filters:
+ if filters['reserved']:
+ query = query.filter(models.Board.reservation != None)
+ else:
+ query = query.filter(models.Board.reservation == None)
+ if 'maintenance' in filters:
+ query = query.filter_by(maintenance=filters['maintenance'])
+ if 'driver' in filters:
+ query = query.filter_by(driver=filters['driver'])
+ if 'provision_state' in filters:
+ query = query.filter_by(provision_state=filters['provision_state'])
+ if 'provisioned_before' in filters:
+ limit = (timeutils.utcnow() -
+ datetime.timedelta(seconds=filters['provisioned_before']))
+ query = query.filter(models.Board.provision_updated_at < limit)
+ if 'inspection_started_before' in filters:
+ limit = ((timeutils.utcnow()) -
+ (datetime.timedelta(
+ seconds=filters['inspection_started_before'])))
+ query = query.filter(models.Board.inspection_started_at < limit)
+
+ return query
+
+ def get_nodeinfo_list(self, columns=None, filters=None, limit=None,
+ marker=None, sort_key=None, sort_dir=None):
+ # list-ify columns default values because it is bad form
+ # to include a mutable list in function definitions.
+ if columns is None:
+ columns = [models.Board.id]
+ else:
+ columns = [getattr(models.Board, c) for c in columns]
+
+ query = model_query(*columns, base_model=models.Board)
+ query = self._add_nodes_filters(query, filters)
+ return _paginate_query(models.Board, limit, marker,
+ sort_key, sort_dir, query)
+
+ def get_node_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ query = model_query(models.Board)
+ query = self._add_nodes_filters(query, filters)
+ return _paginate_query(models.Board, limit, marker,
+ sort_key, sort_dir, query)
+
+ def reserve_node(self, tag, node_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, node_id)
+ # be optimistic and assume we usually create a reservation
+ count = query.filter_by(reservation=None).update(
+ {'reservation': tag}, synchronize_session=False)
+ try:
+ node = query.one()
+ if count != 1:
+ # Nothing updated and node exists. Must already be
+ # locked.
+ raise exception.NodeLocked(node=node_id,
+ host=node['reservation'])
+ return node
+ except NoResultFound:
+ raise exception.NodeNotFound(node_id)
+
+ def release_node(self, tag, node_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, node_id)
+ # be optimistic and assume we usually release a reservation
+ count = query.filter_by(reservation=tag).update(
+ {'reservation': None}, synchronize_session=False)
+ try:
+ if count != 1:
+ node = query.one()
+ if node['reservation'] is None:
+ raise exception.NodeNotLocked(node=node_id)
+ else:
+ raise exception.NodeLocked(node=node_id,
+ host=node['reservation'])
+ except NoResultFound:
+ raise exception.NodeNotFound(node_id)
+
+ def create_node(self, values):
+ # ensure defaults are present for new nodes
+ if 'uuid' not in values:
+ values['uuid'] = uuidutils.generate_uuid()
+ if 'power_state' not in values:
+ values['power_state'] = states.NOSTATE
+ if 'provision_state' not in values:
+ # TODO(deva): change this to ENROLL
+ values['provision_state'] = states.AVAILABLE
+
+ node = models.Board()
+ node.update(values)
+ try:
+ node.save()
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.DuplicateName(name=values['name'])
+ elif 'instance_uuid' in exc.columns:
+ raise exception.InstanceAssociated(
+ instance_uuid=values['instance_uuid'],
+ node=values['uuid'])
+ raise exception.NodeAlreadyExists(uuid=values['uuid'])
+ return node
+
+ def get_node_by_id(self, node_id):
+ query = model_query(models.Board).filter_by(id=node_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+
+ def get_node_by_uuid(self, node_uuid):
+ query = model_query(models.Board).filter_by(uuid=node_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_uuid)
+
+ def get_node_by_name(self, node_name):
+ query = model_query(models.Board).filter_by(name=node_name)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_name)
+
+ def get_node_by_instance(self, instance):
+ if not uuidutils.is_uuid_like(instance):
+ raise exception.InvalidUUID(uuid=instance)
+
+ query = (model_query(models.Board)
+ .filter_by(instance_uuid=instance))
+
+ try:
+ result = query.one()
+ except NoResultFound:
+ raise exception.InstanceNotFound(instance=instance)
+
+ return result
+
+ def destroy_node(self, node_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, node_id)
+
+ try:
+ node_ref = query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+
+ # Get node ID, if an UUID was supplied. The ID is
+ # required for deleting all ports, attached to the node.
+ if uuidutils.is_uuid_like(node_id):
+ node_id = node_ref['id']
+
+ #port_query = model_query(models.Port, session=session)
+ #port_query = add_port_filter_by_node(port_query, node_id)
+ #port_query.delete()
+
+ query.delete()
+
+ def update_node(self, node_id, values):
+ # NOTE(dtantsur): this can lead to very strange errors
+ if 'uuid' in values:
+ msg = _("Cannot overwrite UUID for an existing Board.")
+ raise exception.InvalidParameterValue(err=msg)
+
+ try:
+ return self._do_update_node(node_id, values)
+ except db_exc.DBDuplicateEntry as e:
+ if 'name' in e.columns:
+ raise exception.DuplicateName(name=values['name'])
+ elif 'uuid' in e.columns:
+ raise exception.NodeAlreadyExists(uuid=values['uuid'])
+ elif 'instance_uuid' in e.columns:
+ raise exception.InstanceAssociated(
+ instance_uuid=values['instance_uuid'],
+ node=node_id)
+ else:
+ raise e
+
+ def _do_update_node(self, node_id, values):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, node_id)
+ try:
+ ref = query.with_lockmode('update').one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=node_id)
+
+ # Prevent instance_uuid overwriting
+ if values.get("instance_uuid") and ref.instance_uuid:
+ raise exception.NodeAssociated(
+ node=node_id, instance=ref.instance_uuid)
+
+ if 'provision_state' in values:
+ values['provision_updated_at'] = timeutils.utcnow()
+ if values['provision_state'] == states.INSPECTING:
+ values['inspection_started_at'] = timeutils.utcnow()
+ values['inspection_finished_at'] = None
+ elif (ref.provision_state == states.INSPECTING and
+ values['provision_state'] == states.MANAGEABLE):
+ values['inspection_finished_at'] = timeutils.utcnow()
+ values['inspection_started_at'] = None
+ elif (ref.provision_state == states.INSPECTING and
+ values['provision_state'] == states.INSPECTFAIL):
+ values['inspection_started_at'] = None
+
+ ref.update(values)
+ return ref
+
+ def get_port_by_id(self, port_id):
+ query = model_query(models.Port).filter_by(id=port_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.PortNotFound(port=port_id)
+
+ def get_port_by_uuid(self, port_uuid):
+ query = model_query(models.Port).filter_by(uuid=port_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.PortNotFound(port=port_uuid)
+
+ def get_port_by_address(self, address):
+ query = model_query(models.Port).filter_by(address=address)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.PortNotFound(port=address)
+
+ def get_port_list(self, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ return _paginate_query(models.Port, limit, marker,
+ sort_key, sort_dir)
+
+ def get_ports_by_node_id(self, node_id, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ query = model_query(models.Port)
+ query = query.filter_by(node_id=node_id)
+ return _paginate_query(models.Port, limit, marker,
+ sort_key, sort_dir, query)
+
+ def create_port(self, values):
+ if not values.get('uuid'):
+ values['uuid'] = uuidutils.generate_uuid()
+ port = models.Port()
+ port.update(values)
+ try:
+ port.save()
+ except db_exc.DBDuplicateEntry as exc:
+ if 'address' in exc.columns:
+ raise exception.MACAlreadyExists(mac=values['address'])
+ raise exception.PortAlreadyExists(uuid=values['uuid'])
+ return port
+
+ def update_port(self, port_id, values):
+ # NOTE(dtantsur): this can lead to very strange errors
+ if 'uuid' in values:
+ msg = _("Cannot overwrite UUID for an existing Port.")
+ raise exception.InvalidParameterValue(err=msg)
+
+ session = get_session()
+ try:
+ with session.begin():
+ query = model_query(models.Port, session=session)
+ query = add_port_filter(query, port_id)
+ ref = query.one()
+ ref.update(values)
+ except NoResultFound:
+ raise exception.PortNotFound(port=port_id)
+ except db_exc.DBDuplicateEntry:
+ raise exception.MACAlreadyExists(mac=values['address'])
+ return ref
+
+ def destroy_port(self, port_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Port, session=session)
+ query = add_port_filter(query, port_id)
+ count = query.delete()
+ if count == 0:
+ raise exception.PortNotFound(port=port_id)
+
+ def get_chassis_by_id(self, chassis_id):
+ query = model_query(models.Chassis).filter_by(id=chassis_id)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.ChassisNotFound(chassis=chassis_id)
+
+ def get_chassis_by_uuid(self, chassis_uuid):
+ query = model_query(models.Chassis).filter_by(uuid=chassis_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.ChassisNotFound(chassis=chassis_uuid)
+
+ def get_chassis_list(self, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ return _paginate_query(models.Chassis, limit, marker,
+ sort_key, sort_dir)
+
+ def create_chassis(self, values):
+ if not values.get('uuid'):
+ values['uuid'] = uuidutils.generate_uuid()
+ chassis = models.Chassis()
+ chassis.update(values)
+ try:
+ chassis.save()
+ except db_exc.DBDuplicateEntry:
+ raise exception.ChassisAlreadyExists(uuid=values['uuid'])
+ return chassis
+
+ def update_chassis(self, chassis_id, values):
+ # NOTE(dtantsur): this can lead to very strange errors
+ if 'uuid' in values:
+ msg = _("Cannot overwrite UUID for an existing Chassis.")
+ raise exception.InvalidParameterValue(err=msg)
+
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Chassis, session=session)
+ query = add_identity_filter(query, chassis_id)
+
+ count = query.update(values)
+ if count != 1:
+ raise exception.ChassisNotFound(chassis=chassis_id)
+ ref = query.one()
+ return ref
+
+ def destroy_chassis(self, chassis_id):
+ def chassis_not_empty(session):
+ """Checks whether the chassis does not have nodes."""
+
+ query = model_query(models.Board, session=session)
+ query = add_node_filter_by_chassis(query, chassis_id)
+
+ return query.count() != 0
+
+ session = get_session()
+ with session.begin():
+ if chassis_not_empty(session):
+ raise exception.ChassisNotEmpty(chassis=chassis_id)
+
+ query = model_query(models.Chassis, session=session)
+ query = add_identity_filter(query, chassis_id)
+
+ count = query.delete()
+ if count != 1:
+ raise exception.ChassisNotFound(chassis=chassis_id)
+
+ def register_conductor(self, values, update_existing=False):
+ session = get_session()
+ with session.begin():
+ query = (model_query(models.Conductor, session=session)
+ .filter_by(hostname=values['hostname']))
+ try:
+ ref = query.one()
+ if ref.online is True and not update_existing:
+ raise exception.ConductorAlreadyRegistered(
+ conductor=values['hostname'])
+ except NoResultFound:
+ ref = models.Conductor()
+ ref.update(values)
+ # always set online and updated_at fields when registering
+ # a conductor, especially when updating an existing one
+ ref.update({'updated_at': timeutils.utcnow(),
+ 'online': True})
+ ref.save(session)
+ return ref
+
+ def get_conductor(self, hostname):
+ try:
+ return (model_query(models.Conductor)
+ .filter_by(hostname=hostname, online=True)
+ .one())
+ except NoResultFound:
+ raise exception.ConductorNotFound(conductor=hostname)
+
+ def unregister_conductor(self, hostname):
+ session = get_session()
+ with session.begin():
+ query = (model_query(models.Conductor, session=session)
+ .filter_by(hostname=hostname, online=True))
+ count = query.update({'online': False})
+ if count == 0:
+ raise exception.ConductorNotFound(conductor=hostname)
+
+ def touch_conductor(self, hostname):
+ session = get_session()
+ with session.begin():
+ query = (model_query(models.Conductor, session=session)
+ .filter_by(hostname=hostname))
+ # since we're not changing any other field, manually set updated_at
+ # and since we're heartbeating, make sure that online=True
+ count = query.update({'updated_at': timeutils.utcnow(),
+ 'online': True})
+ if count == 0:
+ raise exception.ConductorNotFound(conductor=hostname)
+
+ def clear_node_reservations_for_conductor(self, hostname):
+ session = get_session()
+ nodes = []
+ with session.begin():
+ query = (model_query(models.Board, session=session)
+ .filter_by(reservation=hostname))
+ nodes = [node['uuid'] for node in query]
+ query.update({'reservation': None})
+
+ if nodes:
+ nodes = ', '.join(nodes)
+ LOG.warn(_LW('Cleared reservations held by %(hostname)s: '
+ '%(nodes)s'), {'hostname': hostname, 'nodes': nodes})
+
+ def get_active_driver_dict(self, interval=None):
+ if interval is None:
+ interval = CONF.conductor.heartbeat_timeout
+
+ limit = timeutils.utcnow() - datetime.timedelta(seconds=interval)
+ result = (model_query(models.Conductor)
+ .filter_by(online=True)
+ .filter(models.Conductor.updated_at >= limit)
+ .all())
+
+ # build mapping of drivers to the set of hosts which support them
+ d2c = collections.defaultdict(set)
+ for row in result:
+ for driver in row['drivers']:
+ d2c[driver].add(row['hostname'])
+ return d2c
+
+
+###################### NEW #############################
+ def _add_boards_filters(self, query, filters):
+ if filters is None:
+ filters = []
+
+ if 'chassis_uuid' in filters:
+ # get_chassis_by_uuid() to raise an exception if the chassis
+ # is not found
+ chassis_obj = self.get_chassis_by_uuid(filters['chassis_uuid'])
+ query = query.filter_by(chassis_id=chassis_obj.id)
+ if 'associated' in filters:
+ if filters['associated']:
+ query = query.filter(models.Board.instance_uuid != None)
+ else:
+ query = query.filter(models.Board.instance_uuid == None)
+ if 'reserved' in filters:
+ if filters['reserved']:
+ query = query.filter(models.Board.reservation != None)
+ else:
+ query = query.filter(models.Board.reservation == None)
+ if 'maintenance' in filters:
+ query = query.filter_by(maintenance=filters['maintenance'])
+ if 'driver' in filters:
+ query = query.filter_by(driver=filters['driver'])
+ if 'provision_state' in filters:
+ query = query.filter_by(provision_state=filters['provision_state'])
+ if 'provisioned_before' in filters:
+ limit = (timeutils.utcnow() -
+ datetime.timedelta(seconds=filters['provisioned_before']))
+ query = query.filter(models.Board.provision_updated_at < limit)
+ if 'inspection_started_before' in filters:
+ limit = ((timeutils.utcnow()) -
+ (datetime.timedelta(
+ seconds=filters['inspection_started_before'])))
+ query = query.filter(models.Board.inspection_started_at < limit)
+
+ return query
+
+ def get_board_by_uuid(self, board_uuid):
+ query = model_query(models.Board).filter_by(uuid=board_uuid)
+ try:
+ return query.one()
+ except NoResultFound:
+ raise exception.NodeNotFound(node=board_uuid)
+
+ def get_board_list(self, filters=None, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ query = model_query(models.Board)
+ query = self._add_boards_filters(query, filters)
+ return _paginate_query(models.Board, limit, marker,
+ sort_key, sort_dir, query)
+
+ def reserve_board(self, tag, board_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, board_id)
+ # be optimistic and assume we usually create a reservation
+ count = query.filter_by(reservation=None).update(
+ {'reservation': tag}, synchronize_session=False)
+ try:
+ board = query.one()
+ if count != 1:
+ # Nothing updated and board exists. Must already be
+ # locked.
+ raise exception.BoardLocked(board=board_id,
+ host=board['reservation'])
+ return board
+ except NoResultFound:
+ raise exception.BoardNotFound(board_id)
+
+ def release_board(self, tag, board_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, board_id)
+ # be optimistic and assume we usually release a reservation
+ count = query.filter_by(reservation=tag).update(
+ {'reservation': None}, synchronize_session=False)
+ try:
+ if count != 1:
+ board = query.one()
+ if board['reservation'] is None:
+ raise exception.BoardNotLocked(board=board_id)
+ else:
+ raise exception.BoardLocked(board=board_id,
+ host=board['reservation'])
+ except NoResultFound:
+ raise exception.BoardNotFound(board_id)
+
+ def destroy_board(self, board_id):
+ session = get_session()
+ with session.begin():
+ query = model_query(models.Board, session=session)
+ query = add_identity_filter(query, board_id)
+
+ try:
+ board_ref = query.one()
+ except NoResultFound:
+ raise exception.BoardNotFound(board=board_id)
+
+ # Get board ID, if an UUID was supplied. The ID is
+ # required for deleting all ports, attached to the board.
+ if uuidutils.is_uuid_like(board_id):
+ board_id = board_ref['id']
+
+ query.delete()
+
+
+ def create_board(self, values):
+ # ensure defaults are present for new boards
+ if 'uuid' not in values:
+ values['uuid'] = uuidutils.generate_uuid()
+ if 'power_state' not in values:
+ values['power_state'] = states.NOSTATE
+ if 'provision_state' not in values:
+ # TODO(deva): change this to ENROLL
+ values['provision_state'] = states.AVAILABLE
+
+ board = models.Board()
+ board.update(values)
+ try:
+ board.save()
+ except db_exc.DBDuplicateEntry as exc:
+ if 'name' in exc.columns:
+ raise exception.DuplicateName(name=values['name'])
+ elif 'instance_uuid' in exc.columns:
+ raise exception.InstanceAssociated(
+ instance_uuid=values['instance_uuid'],
+ board=values['uuid'])
+ raise exception.BoardAlreadyExists(uuid=values['uuid'])
+ return board
\ No newline at end of file
diff --git a/iotronic/db/sqlalchemy/migration.py b/iotronic/db/sqlalchemy/migration.py
new file mode 100644
index 0000000..a2a39f3
--- /dev/null
+++ b/iotronic/db/sqlalchemy/migration.py
@@ -0,0 +1,113 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import alembic
+from alembic import config as alembic_config
+import alembic.migration as alembic_migration
+from oslo_db import exception as db_exc
+
+from iotronic.db.sqlalchemy import api as sqla_api
+from iotronic.db.sqlalchemy import models
+
+
+def _alembic_config():
+ path = os.path.join(os.path.dirname(__file__), 'alembic.ini')
+ config = alembic_config.Config(path)
+ return config
+
+
+def version(config=None, engine=None):
+ """Current database version.
+
+ :returns: Database version
+ :rtype: string
+ """
+ if engine is None:
+ engine = sqla_api.get_engine()
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ return context.get_current_revision()
+
+
+def upgrade(revision, config=None):
+ """Used for upgrading database.
+
+ :param version: Desired database version
+ :type version: string
+ """
+ revision = revision or 'head'
+ config = config or _alembic_config()
+
+ alembic.command.upgrade(config, revision or 'head')
+
+
+def create_schema(config=None, engine=None):
+ """Create database schema from models description.
+
+ Can be used for initial installation instead of upgrade('head').
+ """
+ if engine is None:
+ engine = sqla_api.get_engine()
+
+ # NOTE(viktors): If we will use metadata.create_all() for non empty db
+ # schema, it will only add the new tables, but leave
+ # existing as is. So we should avoid of this situation.
+ if version(engine=engine) is not None:
+ raise db_exc.DbMigrationError("DB schema is already under version"
+ " control. Use upgrade() instead")
+
+ models.Base.metadata.create_all(engine)
+ stamp('head', config=config)
+
+
+def downgrade(revision, config=None):
+ """Used for downgrading database.
+
+ :param version: Desired database version
+ :type version: string
+ """
+ revision = revision or 'base'
+ config = config or _alembic_config()
+ return alembic.command.downgrade(config, revision)
+
+
+def stamp(revision, config=None):
+ """Stamps database with provided revision.
+
+ Don't run any migrations.
+
+ :param revision: Should match one from repository or head - to stamp
+ database with most recent revision
+ :type revision: string
+ """
+ config = config or _alembic_config()
+ return alembic.command.stamp(config, revision=revision)
+
+
+def revision(message=None, autogenerate=False, config=None):
+ """Creates template for migration.
+
+ :param message: Text that will be used for migration title
+ :type message: string
+ :param autogenerate: If True - generates diff based on current database
+ state
+ :type autogenerate: bool
+ """
+ config = config or _alembic_config()
+ return alembic.command.revision(config, message=message,
+ autogenerate=autogenerate)
diff --git a/iotronic/db/sqlalchemy/models.py b/iotronic/db/sqlalchemy/models.py
new file mode 100644
index 0000000..b270168
--- /dev/null
+++ b/iotronic/db/sqlalchemy/models.py
@@ -0,0 +1,234 @@
+# -*- encoding: utf-8 -*-
+#
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+SQLAlchemy models for baremetal data.
+"""
+
+import json
+
+from oslo_config import cfg
+from oslo_db import options as db_options
+from oslo_db.sqlalchemy import models
+import six.moves.urllib.parse as urlparse
+from sqlalchemy import Boolean, Column, DateTime
+from sqlalchemy import ForeignKey, Integer
+from sqlalchemy import schema, String, Text
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.types import TypeDecorator, TEXT
+
+from iotronic.common import paths
+
+
+sql_opts = [
+ cfg.StrOpt('mysql_engine',
+ default='InnoDB',
+ help='MySQL engine to use.')
+]
+
+_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('iotronic.sqlite')
+
+
+cfg.CONF.register_opts(sql_opts, 'database')
+db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'iotronic.sqlite')
+
+
+def table_args():
+ engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
+ if engine_name == 'mysql':
+ return {'mysql_engine': cfg.CONF.database.mysql_engine,
+ 'mysql_charset': "utf8"}
+ return None
+
+
+class JsonEncodedType(TypeDecorator):
+ """Abstract base type serialized as json-encoded string in db."""
+ type = None
+ impl = TEXT
+
+ def process_bind_param(self, value, dialect):
+ if value is None:
+ # Save default value according to current type to keep the
+ # interface the consistent.
+ value = self.type()
+ elif not isinstance(value, self.type):
+ raise TypeError("%s supposes to store %s objects, but %s given"
+ % (self.__class__.__name__,
+ self.type.__name__,
+ type(value).__name__))
+ serialized_value = json.dumps(value)
+ return serialized_value
+
+ def process_result_value(self, value, dialect):
+ if value is not None:
+ value = json.loads(value)
+ return value
+
+
+class JSONEncodedDict(JsonEncodedType):
+ """Represents dict serialized as json-encoded string in db."""
+ type = dict
+
+
+class JSONEncodedList(JsonEncodedType):
+ """Represents list serialized as json-encoded string in db."""
+ type = list
+
+
+class IotronicBase(models.TimestampMixin,
+ models.ModelBase):
+
+ metadata = None
+
+ def as_dict(self):
+ d = {}
+ for c in self.__table__.columns:
+ d[c.name] = self[c.name]
+ return d
+
+ def save(self, session=None):
+ import iotronic.db.sqlalchemy.api as db_api
+
+ if session is None:
+ session = db_api.get_session()
+
+ super(IotronicBase, self).save(session)
+
+Base = declarative_base(cls=IotronicBase)
+
+
+class Chassis(Base):
+ """Represents a hardware chassis."""
+
+ __tablename__ = 'chassis'
+ __table_args__ = (
+ schema.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
+ table_args()
+ )
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36))
+ extra = Column(JSONEncodedDict)
+ description = Column(String(255), nullable=True)
+
+
+class Conductor(Base):
+ """Represents a conductor service entry."""
+
+ __tablename__ = 'conductors'
+ __table_args__ = (
+ schema.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
+ table_args()
+ )
+ id = Column(Integer, primary_key=True)
+ hostname = Column(String(255), nullable=False)
+ drivers = Column(JSONEncodedList)
+ online = Column(Boolean, default=True)
+
+
+class Node(Base):
+ """Represents a bare metal node."""
+
+ __tablename__ = 'nodes'
+ '''
+ __table_args__ = (
+ schema.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
+ schema.UniqueConstraint('instance_uuid',
+ name='uniq_nodes0instance_uuid'),
+ schema.UniqueConstraint('name', name='uniq_nodes0name'),
+ table_args())
+ '''
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36))
+ # NOTE(deva): we store instance_uuid directly on the node so that we can
+ # filter on it more efficiently, even though it is
+ # user-settable, and would otherwise be in node.properties.
+ uuid = Column(String(36), nullable=True)
+ name = Column(String(255), nullable=True)
+ status = Column(String(10), nullable=True)
+ #chassis_id = Column(Integer, ForeignKey('chassis.id'), nullable=True)
+ #power_state = Column(String(15), nullable=True)
+ #target_power_state = Column(String(15), nullable=True)
+ #provision_state = Column(String(15), nullable=True)
+ #target_provision_state = Column(String(15), nullable=True)
+ #provision_updated_at = Column(DateTime, nullable=True)
+ #last_error = Column(Text, nullable=True)
+ #instance_info = Column(JSONEncodedDict)
+ #properties = Column(JSONEncodedDict)
+ #driver = Column(String(15))
+ #driver_info = Column(JSONEncodedDict)
+ #driver_internal_info = Column(JSONEncodedDict)
+ #clean_step = Column(JSONEncodedDict)
+
+ # NOTE(deva): this is the host name of the conductor which has
+ # acquired a TaskManager lock on the node.
+ # We should use an INT FK (conductors.id) in the future.
+ reservation = Column(String(255), nullable=True)
+
+ # NOTE(deva): this is the id of the last conductor which prepared local
+ # state for the node (eg, a PXE config file).
+ # When affinity and the hash ring's mapping do not match,
+ # this indicates that a conductor should rebuild local state.
+ '''
+ conductor_affinity = Column(Integer,
+ ForeignKey('conductors.id',
+ name='nodes_conductor_affinity_fk'),
+ nullable=True)
+ '''
+ #maintenance = Column(Boolean, default=False)
+ #maintenance_reason = Column(Text, nullable=True)
+ #console_enabled = Column(Boolean, default=False)
+ #inspection_finished_at = Column(DateTime, nullable=True)
+ #inspection_started_at = Column(DateTime, nullable=True)
+ #extra = Column(JSONEncodedDict)
+
+
+class Port(Base):
+ """Represents a network port of a bare metal node."""
+
+ __tablename__ = 'ports'
+ __table_args__ = (
+ schema.UniqueConstraint('address', name='uniq_ports0address'),
+ schema.UniqueConstraint('uuid', name='uniq_ports0uuid'),
+ table_args())
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36))
+ address = Column(String(18))
+ node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
+ extra = Column(JSONEncodedDict)
+
+##################### NEW
+class Board(Base):
+ """Represents a board."""
+
+ __tablename__ = 'boards'
+ '''
+ __table_args__ = (
+ schema.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
+ schema.UniqueConstraint('instance_uuid',
+ name='uniq_nodes0instance_uuid'),
+ schema.UniqueConstraint('name', name='uniq_nodes0name'),
+ table_args())
+ '''
+ id = Column(Integer, primary_key=True)
+ uuid = Column(String(36))
+ # NOTE(deva): we store instance_uuid directly on the node so that we can
+ # filter on it more efficiently, even though it is
+ # user-settable, and would otherwise be in node.properties.
+ uuid = Column(String(36), nullable=True)
+ name = Column(String(255), nullable=True)
+ status = Column(String(10), nullable=True)
+ reservation = Column(String(255), nullable=True)
+
diff --git a/iotronic/objects/__init__.py b/iotronic/objects/__init__.py
new file mode 100644
index 0000000..927ad82
--- /dev/null
+++ b/iotronic/objects/__init__.py
@@ -0,0 +1,34 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+#from iotronic.objects import chassis
+from iotronic.objects import conductor
+#from iotronic.objects import node
+from iotronic.objects import board
+#from iotronic.objects import port
+
+
+#Chassis = chassis.Chassis
+Conductor = conductor.Conductor
+Board=board.Board
+#Node = node.Node
+#Port = port.Port
+
+__all__ = (
+ #Chassis,
+ Conductor,
+ #Node,
+ Board,
+ #Port
+ )
diff --git a/iotronic/objects/__old/chassis.py b/iotronic/objects/__old/chassis.py
new file mode 100644
index 0000000..a131e49
--- /dev/null
+++ b/iotronic/objects/__old/chassis.py
@@ -0,0 +1,186 @@
+# coding=utf-8
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+
+from iotronic.common import exception
+from iotronic.db import api as dbapi
+from iotronic.objects import base
+from iotronic.objects import utils as obj_utils
+
+
+class Chassis(base.IotronicObject):
+ # Version 1.0: Initial version
+ # Version 1.1: Add get() and get_by_id() and make get_by_uuid()
+ # only work with a uuid
+ # Version 1.2: Add create() and destroy()
+ # Version 1.3: Add list()
+ VERSION = '1.3'
+
+ dbapi = dbapi.get_instance()
+
+ fields = {
+ 'id': int,
+ 'uuid': obj_utils.str_or_none,
+ 'extra': obj_utils.dict_or_none,
+ 'description': obj_utils.str_or_none,
+ }
+
+ @staticmethod
+ def _from_db_object(chassis, db_chassis):
+ """Converts a database entity to a formal :class:`Chassis` object.
+
+ :param chassis: An object of :class:`Chassis`.
+ :param db_chassis: A DB model of a chassis.
+ :return: a :class:`Chassis` object.
+ """
+ for field in chassis.fields:
+ chassis[field] = db_chassis[field]
+
+ chassis.obj_reset_changes()
+ return chassis
+
+ @base.remotable_classmethod
+ def get(cls, context, chassis_id):
+ """Find a chassis based on its id or uuid and return a Chassis object.
+
+ :param chassis_id: the id *or* uuid of a chassis.
+ :returns: a :class:`Chassis` object.
+ """
+ if strutils.is_int_like(chassis_id):
+ return cls.get_by_id(context, chassis_id)
+ elif uuidutils.is_uuid_like(chassis_id):
+ return cls.get_by_uuid(context, chassis_id)
+ else:
+ raise exception.InvalidIdentity(identity=chassis_id)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, chassis_id):
+ """Find a chassis based on its integer id and return a Chassis object.
+
+ :param chassis_id: the id of a chassis.
+ :returns: a :class:`Chassis` object.
+ """
+ db_chassis = cls.dbapi.get_chassis_by_id(chassis_id)
+ chassis = Chassis._from_db_object(cls(context), db_chassis)
+ return chassis
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ """Find a chassis based on uuid and return a :class:`Chassis` object.
+
+ :param uuid: the uuid of a chassis.
+ :param context: Security context
+ :returns: a :class:`Chassis` object.
+ """
+ db_chassis = cls.dbapi.get_chassis_by_uuid(uuid)
+ chassis = Chassis._from_db_object(cls(context), db_chassis)
+ return chassis
+
+ @base.remotable_classmethod
+ def list(cls, context, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of Chassis objects.
+
+ :param context: Security context.
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :returns: a list of :class:`Chassis` object.
+
+ """
+ db_chassis = cls.dbapi.get_chassis_list(limit=limit,
+ marker=marker,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ return [Chassis._from_db_object(cls(context), obj)
+ for obj in db_chassis]
+
+ @base.remotable
+ def create(self, context=None):
+ """Create a Chassis record in the DB.
+
+ Column-wise updates will be made based on the result of
+ self.what_changed(). If target_power_state is provided,
+ it will be checked against the in-database copy of the
+ chassis before updates are made.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Chassis(context)
+
+ """
+ values = self.obj_get_changes()
+ db_chassis = self.dbapi.create_chassis(values)
+ self._from_db_object(self, db_chassis)
+
+ @base.remotable
+ def destroy(self, context=None):
+ """Delete the Chassis from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Chassis(context)
+ """
+ self.dbapi.destroy_chassis(self.uuid)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def save(self, context=None):
+ """Save updates to this Chassis.
+
+ Updates will be made column by column based on the result
+ of self.what_changed().
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Chassis(context)
+ """
+ updates = self.obj_get_changes()
+ self.dbapi.update_chassis(self.uuid, updates)
+
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context=None):
+ """Loads and applies updates for this Chassis.
+
+ Loads a :class:`Chassis` with the same uuid from the database and
+ checks for updated attributes. Updates are applied from
+ the loaded chassis column by column, if there are any updates.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Chassis(context)
+ """
+ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ self[field] != current[field]):
+ self[field] = current[field]
diff --git a/iotronic/objects/__old/node.py b/iotronic/objects/__old/node.py
new file mode 100644
index 0000000..bb857a8
--- /dev/null
+++ b/iotronic/objects/__old/node.py
@@ -0,0 +1,272 @@
+# coding=utf-8
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+
+from iotronic.common import exception
+from iotronic.db import api as db_api
+from iotronic.objects import base
+from iotronic.objects import utils as obj_utils
+
+
+class Node(base.IotronicObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ dbapi = db_api.get_instance()
+
+ fields = {
+ 'id': int,
+
+ 'uuid': obj_utils.str_or_none,
+ 'name': obj_utils.str_or_none,
+ 'status': obj_utils.str_or_none,
+ #'chassis_id': obj_utils.int_or_none,
+ #'instance_uuid': obj_utils.str_or_none,
+
+ #'driver': obj_utils.str_or_none,
+ #'driver_info': obj_utils.dict_or_none,
+ #'driver_internal_info': obj_utils.dict_or_none,
+
+ # A clean step dictionary, indicating the current clean step
+ # being executed, or None, indicating cleaning is not in progress
+ # or has not yet started.
+ #'clean_step': obj_utils.dict_or_none,
+
+ #'instance_info': obj_utils.dict_or_none,
+ #'properties': obj_utils.dict_or_none,
+ 'reservation': obj_utils.str_or_none,
+ # a reference to the id of the conductor service, not its hostname,
+ # that has most recently performed some action which could require
+ # local state to be maintained (eg, built a PXE config)
+ #'conductor_affinity': obj_utils.int_or_none,
+
+ # One of states.POWER_ON|POWER_OFF|NOSTATE|ERROR
+ #'power_state': obj_utils.str_or_none,
+
+ # Set to one of states.POWER_ON|POWER_OFF when a power operation
+ # starts, and set to NOSTATE when the operation finishes
+ # (successfully or unsuccessfully).
+ #'target_power_state': obj_utils.str_or_none,
+
+ #'provision_state': obj_utils.str_or_none,
+ #'provision_updated_at': obj_utils.datetime_or_str_or_none,
+ #'target_provision_state': obj_utils.str_or_none,
+
+ #'maintenance': bool,
+ #'maintenance_reason': obj_utils.str_or_none,
+ #'console_enabled': bool,
+
+ # Any error from the most recent (last) asynchronous transaction
+ # that started but failed to finish.
+ #'last_error': obj_utils.str_or_none,
+
+ #'inspection_finished_at': obj_utils.datetime_or_str_or_none,
+ #'inspection_started_at': obj_utils.datetime_or_str_or_none,
+
+ #'extra': obj_utils.dict_or_none,
+ }
+
+ @staticmethod
+ def _from_db_object(node, db_node):
+ """Converts a database entity to a formal object."""
+ for field in node.fields:
+ node[field] = db_node[field]
+ node.obj_reset_changes()
+ return node
+
+ @base.remotable_classmethod
+ def get(cls, context, node_id):
+ """Find a node based on its id or uuid and return a Node object.
+
+ :param node_id: the id *or* uuid of a node.
+ :returns: a :class:`Node` object.
+ """
+ if strutils.is_int_like(node_id):
+ return cls.get_by_id(context, node_id)
+ elif uuidutils.is_uuid_like(node_id):
+ return cls.get_by_uuid(context, node_id)
+ else:
+ raise exception.InvalidIdentity(identity=node_id)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, node_id):
+ """Find a node based on its integer id and return a Node object.
+
+ :param node_id: the id of a node.
+ :returns: a :class:`Node` object.
+ """
+ db_node = cls.dbapi.get_node_by_id(node_id)
+ node = Node._from_db_object(cls(context), db_node)
+ return node
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ """Find a node based on uuid and return a Node object.
+
+ :param uuid: the uuid of a node.
+ :returns: a :class:`Node` object.
+ """
+ db_node = cls.dbapi.get_node_by_uuid(uuid)
+ node = Node._from_db_object(cls(context), db_node)
+ return node
+
+ @base.remotable_classmethod
+ def get_by_name(cls, context, name):
+ """Find a node based on name and return a Node object.
+
+ :param name: the logical name of a node.
+ :returns: a :class:`Node` object.
+ """
+ db_node = cls.dbapi.get_node_by_name(name)
+ node = Node._from_db_object(cls(context), db_node)
+ return node
+
+ @base.remotable_classmethod
+ def get_by_instance_uuid(cls, context, instance_uuid):
+ """Find a node based on the instance uuid and return a Node object.
+
+ :param uuid: the uuid of the instance.
+ :returns: a :class:`Node` object.
+ """
+ db_node = cls.dbapi.get_node_by_instance(instance_uuid)
+ node = Node._from_db_object(cls(context), db_node)
+ return node
+
+ @base.remotable_classmethod
+ def list(cls, context, limit=None, marker=None, sort_key=None,
+ sort_dir=None, filters=None):
+ """Return a list of Node objects.
+
+ :param context: Security context.
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :param filters: Filters to apply.
+ :returns: a list of :class:`Node` object.
+
+ """
+ db_nodes = cls.dbapi.get_node_list(filters=filters, limit=limit,
+ marker=marker, sort_key=sort_key,
+ sort_dir=sort_dir)
+ return [Node._from_db_object(cls(context), obj) for obj in db_nodes]
+
+ @base.remotable_classmethod
+ def reserve(cls, context, tag, node_id):
+ """Get and reserve a node.
+
+ To prevent other ManagerServices from manipulating the given
+ Node while a Task is performed, mark it reserved by this host.
+
+ :param context: Security context.
+ :param tag: A string uniquely identifying the reservation holder.
+ :param node_id: A node id or uuid.
+ :raises: NodeNotFound if the node is not found.
+ :returns: a :class:`Node` object.
+
+ """
+ db_node = cls.dbapi.reserve_node(tag, node_id)
+ node = Node._from_db_object(cls(context), db_node)
+ return node
+
+ @base.remotable_classmethod
+ def release(cls, context, tag, node_id):
+ """Release the reservation on a node.
+
+ :param context: Security context.
+ :param tag: A string uniquely identifying the reservation holder.
+ :param node_id: A node id or uuid.
+ :raises: NodeNotFound if the node is not found.
+
+ """
+ cls.dbapi.release_node(tag, node_id)
+
+ @base.remotable
+ def create(self, context=None):
+ """Create a Node record in the DB.
+
+ Column-wise updates will be made based on the result of
+ self.what_changed(). If target_power_state is provided,
+ it will be checked against the in-database copy of the
+ node before updates are made.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Node(context)
+
+ """
+ values = self.obj_get_changes()
+ db_node = self.dbapi.create_node(values)
+ self._from_db_object(self, db_node)
+
+ @base.remotable
+ def destroy(self, context=None):
+ """Delete the Node from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Node(context)
+ """
+ self.dbapi.destroy_node(self.uuid)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def save(self, context=None):
+ """Save updates to this Node.
+
+ Column-wise updates will be made based on the result of
+ self.what_changed(). If target_power_state is provided,
+ it will be checked against the in-database copy of the
+ node before updates are made.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Node(context)
+ """
+ updates = self.obj_get_changes()
+ if 'driver' in updates and 'driver_internal_info' not in updates:
+ # Clean driver_internal_info when changes driver
+ self.driver_internal_info = {}
+ updates = self.obj_get_changes()
+ self.dbapi.update_node(self.uuid, updates)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context=None):
+ """Refresh the object by re-fetching from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Node(context)
+ """
+ current = self.__class__.get_by_uuid(self._context, self.uuid)
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ self[field] != current[field]):
+ self[field] = current[field]
diff --git a/iotronic/objects/__old/port.py b/iotronic/objects/__old/port.py
new file mode 100644
index 0000000..4b00374
--- /dev/null
+++ b/iotronic/objects/__old/port.py
@@ -0,0 +1,217 @@
+# coding=utf-8
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+
+from iotronic.common import exception
+from iotronic.common import utils
+from iotronic.db import api as dbapi
+from iotronic.objects import base
+from iotronic.objects import utils as obj_utils
+
+
+class Port(base.IotronicObject):
+ # Version 1.0: Initial version
+ # Version 1.1: Add get() and get_by_id() and get_by_address() and
+ # make get_by_uuid() only work with a uuid
+ # Version 1.2: Add create() and destroy()
+ # Version 1.3: Add list()
+ # Version 1.4: Add list_by_node_id()
+ VERSION = '1.4'
+
+ dbapi = dbapi.get_instance()
+
+ fields = {
+ 'id': int,
+ 'uuid': obj_utils.str_or_none,
+ 'node_id': obj_utils.int_or_none,
+ 'address': obj_utils.str_or_none,
+ 'extra': obj_utils.dict_or_none,
+ }
+
+ @staticmethod
+ def _from_db_object(port, db_port):
+ """Converts a database entity to a formal object."""
+ for field in port.fields:
+ port[field] = db_port[field]
+
+ port.obj_reset_changes()
+ return port
+
+ @staticmethod
+ def _from_db_object_list(db_objects, cls, context):
+ """Converts a list of database entities to a list of formal objects."""
+ return [Port._from_db_object(cls(context), obj) for obj in db_objects]
+
+ @base.remotable_classmethod
+ def get(cls, context, port_id):
+ """Find a port based on its id or uuid and return a Port object.
+
+ :param port_id: the id *or* uuid of a port.
+ :returns: a :class:`Port` object.
+ """
+ if strutils.is_int_like(port_id):
+ return cls.get_by_id(context, port_id)
+ elif uuidutils.is_uuid_like(port_id):
+ return cls.get_by_uuid(context, port_id)
+ elif utils.is_valid_mac(port_id):
+ return cls.get_by_address(context, port_id)
+ else:
+ raise exception.InvalidIdentity(identity=port_id)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, port_id):
+ """Find a port based on its integer id and return a Port object.
+
+ :param port_id: the id of a port.
+ :returns: a :class:`Port` object.
+ """
+ db_port = cls.dbapi.get_port_by_id(port_id)
+ port = Port._from_db_object(cls(context), db_port)
+ return port
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ """Find a port based on uuid and return a :class:`Port` object.
+
+ :param uuid: the uuid of a port.
+ :param context: Security context
+ :returns: a :class:`Port` object.
+ """
+ db_port = cls.dbapi.get_port_by_uuid(uuid)
+ port = Port._from_db_object(cls(context), db_port)
+ return port
+
+ @base.remotable_classmethod
+ def get_by_address(cls, context, address):
+ """Find a port based on address and return a :class:`Port` object.
+
+ :param address: the address of a port.
+ :param context: Security context
+ :returns: a :class:`Port` object.
+ """
+ db_port = cls.dbapi.get_port_by_address(address)
+ port = Port._from_db_object(cls(context), db_port)
+ return port
+
+ @base.remotable_classmethod
+ def list(cls, context, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of Port objects.
+
+ :param context: Security context.
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :returns: a list of :class:`Port` object.
+
+ """
+ db_ports = cls.dbapi.get_port_list(limit=limit,
+ marker=marker,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ return Port._from_db_object_list(db_ports, cls, context)
+
+ @base.remotable_classmethod
+ def list_by_node_id(cls, context, node_id, limit=None, marker=None,
+ sort_key=None, sort_dir=None):
+ """Return a list of Port objects associated with a given node ID.
+
+ :param context: Security context.
+ :param node_id: the ID of the node.
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :returns: a list of :class:`Port` object.
+
+ """
+ db_ports = cls.dbapi.get_ports_by_node_id(node_id, limit=limit,
+ marker=marker,
+ sort_key=sort_key,
+ sort_dir=sort_dir)
+ return Port._from_db_object_list(db_ports, cls, context)
+
+ @base.remotable
+ def create(self, context=None):
+ """Create a Port record in the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Port(context)
+
+ """
+ values = self.obj_get_changes()
+ db_port = self.dbapi.create_port(values)
+ self._from_db_object(self, db_port)
+
+ @base.remotable
+ def destroy(self, context=None):
+ """Delete the Port from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Port(context)
+ """
+ self.dbapi.destroy_port(self.uuid)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def save(self, context=None):
+ """Save updates to this Port.
+
+ Updates will be made column by column based on the result
+ of self.what_changed().
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Port(context)
+ """
+ updates = self.obj_get_changes()
+ self.dbapi.update_port(self.uuid, updates)
+
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context=None):
+ """Loads updates for this Port.
+
+ Loads a port with the same uuid from the database and
+ checks for updated attributes. Updates are applied from
+ the loaded port column by column, if there are any updates.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Port(context)
+ """
+ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ self[field] != current[field]):
+ self[field] = current[field]
diff --git a/iotronic/objects/base.py b/iotronic/objects/base.py
new file mode 100644
index 0000000..fe6d58a
--- /dev/null
+++ b/iotronic/objects/base.py
@@ -0,0 +1,596 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Iotronic common internal object model"""
+
+import collections
+import copy
+
+from oslo_context import context
+from oslo_log import log as logging
+import oslo_messaging as messaging
+import six
+
+from iotronic.common import exception
+from iotronic.common.i18n import _
+from iotronic.common.i18n import _LE
+from iotronic.objects import utils as obj_utils
+from iotronic.openstack.common import versionutils
+
+
+LOG = logging.getLogger('object')
+
+
+class NotSpecifiedSentinel(object):
+ pass
+
+
+def get_attrname(name):
+ """Return the mangled name of the attribute's underlying storage."""
+ return '_%s' % name
+
+
+def make_class_properties(cls):
+ # NOTE(danms/comstud): Inherit fields from super classes.
+ # mro() returns the current class first and returns 'object' last, so
+ # those can be skipped. Also be careful to not overwrite any fields
+ # that already exist. And make sure each cls has its own copy of
+ # fields and that it is not sharing the dict with a super class.
+ cls.fields = dict(cls.fields)
+ for supercls in cls.mro()[1:-1]:
+ if not hasattr(supercls, 'fields'):
+ continue
+ for name, field in supercls.fields.items():
+ if name not in cls.fields:
+ cls.fields[name] = field
+ for name, typefn in cls.fields.items():
+
+ def getter(self, name=name):
+ attrname = get_attrname(name)
+ if not hasattr(self, attrname):
+ self.obj_load_attr(name)
+ return getattr(self, attrname)
+
+ def setter(self, value, name=name, typefn=typefn):
+ self._changed_fields.add(name)
+ try:
+ return setattr(self, get_attrname(name), typefn(value))
+ except Exception:
+ attr = "%s.%s" % (self.obj_name(), name)
+ LOG.exception(_LE('Error setting %(attr)s'),
+ {'attr': attr})
+ raise
+
+ setattr(cls, name, property(getter, setter))
+
+
+class IotronicObjectMetaclass(type):
+ """Metaclass that allows tracking of object classes."""
+
+ # NOTE(danms): This is what controls whether object operations are
+ # remoted. If this is not None, use it to remote things over RPC.
+ indirection_api = None
+
+ def __init__(cls, names, bases, dict_):
+ if not hasattr(cls, '_obj_classes'):
+ # This will be set in the 'IotronicObject' class.
+ cls._obj_classes = collections.defaultdict(list)
+ else:
+ # Add the subclass to IotronicObject._obj_classes
+ make_class_properties(cls)
+ cls._obj_classes[cls.obj_name()].append(cls)
+
+
+# These are decorators that mark an object's method as remotable.
+# If the metaclass is configured to forward object methods to an
+# indirection service, these will result in making an RPC call
+# instead of directly calling the implementation in the object. Instead,
+# the object implementation on the remote end will perform the
+# requested action and the result will be returned here.
+def remotable_classmethod(fn):
+ """Decorator for remotable classmethods."""
+ def wrapper(cls, context, *args, **kwargs):
+ if IotronicObject.indirection_api:
+ result = IotronicObject.indirection_api.object_class_action(
+ context, cls.obj_name(), fn.__name__, cls.VERSION,
+ args, kwargs)
+ else:
+ result = fn(cls, context, *args, **kwargs)
+ if isinstance(result, IotronicObject):
+ result._context = context
+ return result
+ return classmethod(wrapper)
+
+
+# See comment above for remotable_classmethod()
+#
+# Note that this will use either the provided context, or the one
+# stashed in the object. If neither are present, the object is
+# "orphaned" and remotable methods cannot be called.
+def remotable(fn):
+ """Decorator for remotable object methods."""
+ def wrapper(self, *args, **kwargs):
+ ctxt = self._context
+ try:
+ if isinstance(args[0], (context.RequestContext)):
+ ctxt = args[0]
+ args = args[1:]
+ except IndexError:
+ pass
+ if ctxt is None:
+ raise exception.OrphanedObjectError(method=fn.__name__,
+ objtype=self.obj_name())
+ if IotronicObject.indirection_api:
+ updates, result = IotronicObject.indirection_api.object_action(
+ ctxt, self, fn.__name__, args, kwargs)
+ for key, value in updates.iteritems():
+ if key in self.fields:
+ self[key] = self._attr_from_primitive(key, value)
+ self._changed_fields = set(updates.get('obj_what_changed', []))
+ return result
+ else:
+ return fn(self, ctxt, *args, **kwargs)
+ return wrapper
+
+
+# Object versioning rules
+#
+# Each service has its set of objects, each with a version attached. When
+# a client attempts to call an object method, the server checks to see if
+# the version of that object matches (in a compatible way) its object
+# implementation. If so, cool, and if not, fail.
+def check_object_version(server, client):
+ try:
+ client_major, _client_minor = client.split('.')
+ server_major, _server_minor = server.split('.')
+ client_minor = int(_client_minor)
+ server_minor = int(_server_minor)
+ except ValueError:
+ raise exception.IncompatibleObjectVersion(
+ _('Invalid version string'))
+
+ if client_major != server_major:
+ raise exception.IncompatibleObjectVersion(
+ dict(client=client_major, server=server_major))
+ if client_minor > server_minor:
+ raise exception.IncompatibleObjectVersion(
+ dict(client=client_minor, server=server_minor))
+
+
+@six.add_metaclass(IotronicObjectMetaclass)
+class IotronicObject(object):
+ """Base class and object factory.
+
+ This forms the base of all objects that can be remoted or instantiated
+ via RPC. Simply defining a class that inherits from this base class
+ will make it remotely instantiatable. Objects should implement the
+ necessary "get" classmethod routines as well as "save" object methods
+ as appropriate.
+ """
+
+ # Version of this object (see rules above check_object_version())
+ VERSION = '1.0'
+
+ # The fields present in this object as key:typefn pairs. For example:
+ #
+ # fields = { 'foo': int,
+ # 'bar': str,
+ # 'baz': lambda x: str(x).ljust(8),
+ # }
+ #
+ # NOTE(danms): The base IotronicObject class' fields will be inherited
+ # by subclasses, but that is a special case. Objects inheriting from
+ # other objects will not receive this merging of fields contents.
+ fields = {
+ 'created_at': obj_utils.datetime_or_str_or_none,
+ 'updated_at': obj_utils.datetime_or_str_or_none,
+ }
+ obj_extra_fields = []
+
+ _attr_created_at_from_primitive = obj_utils.dt_deserializer
+ _attr_updated_at_from_primitive = obj_utils.dt_deserializer
+ _attr_created_at_to_primitive = obj_utils.dt_serializer('created_at')
+ _attr_updated_at_to_primitive = obj_utils.dt_serializer('updated_at')
+
+ def __init__(self, context, **kwargs):
+ self._changed_fields = set()
+ self._context = context
+ self.update(kwargs)
+
+ @classmethod
+ def obj_name(cls):
+ """Get canonical object name.
+
+ This object name will be used over the wire for remote hydration.
+ """
+ return cls.__name__
+
+ @classmethod
+ def obj_class_from_name(cls, objname, objver):
+ """Returns a class from the registry based on a name and version."""
+ if objname not in cls._obj_classes:
+ LOG.error(_LE('Unable to instantiate unregistered object type '
+ '%(objtype)s'), dict(objtype=objname))
+ raise exception.UnsupportedObjectError(objtype=objname)
+
+ latest = None
+ compatible_match = None
+ for objclass in cls._obj_classes[objname]:
+ if objclass.VERSION == objver:
+ return objclass
+
+ version_bits = tuple([int(x) for x in objclass.VERSION.split(".")])
+ if latest is None:
+ latest = version_bits
+ elif latest < version_bits:
+ latest = version_bits
+
+ if versionutils.is_compatible(objver, objclass.VERSION):
+ compatible_match = objclass
+
+ if compatible_match:
+ return compatible_match
+
+ latest_ver = '%i.%i' % latest
+ raise exception.IncompatibleObjectVersion(objname=objname,
+ objver=objver,
+ supported=latest_ver)
+
+ def _attr_from_primitive(self, attribute, value):
+ """Attribute deserialization dispatcher.
+
+ This calls self._attr_foo_from_primitive(value) for an attribute
+ foo with value, if it exists, otherwise it assumes the value
+ is suitable for the attribute's setter method.
+ """
+ handler = '_attr_%s_from_primitive' % attribute
+ if hasattr(self, handler):
+ return getattr(self, handler)(value)
+ return value
+
+ @classmethod
+ def _obj_from_primitive(cls, context, objver, primitive):
+ self = cls(context)
+ self.VERSION = objver
+ objdata = primitive['iotronic_object.data']
+ changes = primitive.get('iotronic_object.changes', [])
+ for name in self.fields:
+ if name in objdata:
+ setattr(self, name,
+ self._attr_from_primitive(name, objdata[name]))
+ self._changed_fields = set([x for x in changes if x in self.fields])
+ return self
+
+ @classmethod
+ def obj_from_primitive(cls, primitive, context=None):
+ """Simple base-case hydration.
+
+ This calls self._attr_from_primitive() for each item in fields.
+ """
+ if primitive['iotronic_object.namespace'] != 'iotronic':
+ # NOTE(danms): We don't do anything with this now, but it's
+ # there for "the future"
+ raise exception.UnsupportedObjectError(
+ objtype='%s.%s' % (primitive['iotronic_object.namespace'],
+ primitive['iotronic_object.name']))
+ objname = primitive['iotronic_object.name']
+ objver = primitive['iotronic_object.version']
+ objclass = cls.obj_class_from_name(objname, objver)
+ return objclass._obj_from_primitive(context, objver, primitive)
+
+ def __deepcopy__(self, memo):
+ """Efficiently make a deep copy of this object."""
+
+ # NOTE(danms): A naive deepcopy would copy more than we need,
+ # and since we have knowledge of the volatile bits of the
+ # object, we can be smarter here. Also, nested entities within
+ # some objects may be uncopyable, so we can avoid those sorts
+ # of issues by copying only our field data.
+
+ nobj = self.__class__(self._context)
+ for name in self.fields:
+ if self.obj_attr_is_set(name):
+ nval = copy.deepcopy(getattr(self, name), memo)
+ setattr(nobj, name, nval)
+ nobj._changed_fields = set(self._changed_fields)
+ return nobj
+
+ def obj_clone(self):
+ """Create a copy."""
+ return copy.deepcopy(self)
+
+ def _attr_to_primitive(self, attribute):
+ """Attribute serialization dispatcher.
+
+ This calls self._attr_foo_to_primitive() for an attribute foo,
+ if it exists, otherwise it assumes the attribute itself is
+ primitive-enough to be sent over the RPC wire.
+ """
+ handler = '_attr_%s_to_primitive' % attribute
+ if hasattr(self, handler):
+ return getattr(self, handler)()
+ else:
+ return getattr(self, attribute)
+
+ def obj_to_primitive(self):
+ """Simple base-case dehydration.
+
+ This calls self._attr_to_primitive() for each item in fields.
+ """
+ primitive = dict()
+ for name in self.fields:
+ if hasattr(self, get_attrname(name)):
+ primitive[name] = self._attr_to_primitive(name)
+ obj = {'iotronic_object.name': self.obj_name(),
+ 'iotronic_object.namespace': 'iotronic',
+ 'iotronic_object.version': self.VERSION,
+ 'iotronic_object.data': primitive}
+ if self.obj_what_changed():
+ obj['iotronic_object.changes'] = list(self.obj_what_changed())
+ return obj
+
+ def obj_load_attr(self, attrname):
+ """Load an additional attribute from the real object.
+
+ This should use self._conductor, and cache any data that might
+ be useful for future load operations.
+ """
+ raise NotImplementedError(
+ _("Cannot load '%(attrname)s' in the base class") %
+ {'attrname': attrname})
+
+ def save(self, context):
+ """Save the changed fields back to the store.
+
+ This is optional for subclasses, but is presented here in the base
+ class for consistency among those that do.
+ """
+ raise NotImplementedError(_("Cannot save anything in the base class"))
+
+ def obj_get_changes(self):
+ """Returns a dict of changed fields and their new values."""
+ changes = {}
+ for key in self.obj_what_changed():
+ changes[key] = self[key]
+ return changes
+
+ def obj_what_changed(self):
+ """Returns a set of fields that have been modified."""
+ return self._changed_fields
+
+ def obj_reset_changes(self, fields=None):
+ """Reset the list of fields that have been changed.
+
+ Note that this is NOT "revert to previous values"
+ """
+ if fields:
+ self._changed_fields -= set(fields)
+ else:
+ self._changed_fields.clear()
+
+ def obj_attr_is_set(self, attrname):
+ """Test object to see if attrname is present.
+
+ Returns True if the named attribute has a value set, or
+ False if not. Raises AttributeError if attrname is not
+ a valid attribute for this object.
+ """
+ if attrname not in self.obj_fields:
+ raise AttributeError(
+ _("%(objname)s object has no attribute '%(attrname)s'") %
+ {'objname': self.obj_name(), 'attrname': attrname})
+ return hasattr(self, get_attrname(attrname))
+
+ @property
+ def obj_fields(self):
+ return list(self.fields) + self.obj_extra_fields
+
+ # dictish syntactic sugar
+ def iteritems(self):
+ """For backwards-compatibility with dict-based objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ for name in list(self.fields) + self.obj_extra_fields:
+ if (hasattr(self, get_attrname(name)) or
+ name in self.obj_extra_fields):
+ yield name, getattr(self, name)
+
+ items = lambda self: list(self.iteritems())
+
+ def __getitem__(self, name):
+ """For backwards-compatibility with dict-based objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ return getattr(self, name)
+
+ def __setitem__(self, name, value):
+ """For backwards-compatibility with dict-based objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ setattr(self, name, value)
+
+ def __contains__(self, name):
+ """For backwards-compatibility with dict-based objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ return hasattr(self, get_attrname(name))
+
+ def get(self, key, value=NotSpecifiedSentinel):
+ """For backwards-compatibility with dict-based objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ if key not in self.obj_fields:
+ raise AttributeError(
+ _("'%(objclass)s' object has no attribute '%(attrname)s'") %
+ {'objclass': self.__class__, 'attrname': key})
+ if value != NotSpecifiedSentinel and not self.obj_attr_is_set(key):
+ return value
+ else:
+ return self[key]
+
+ def update(self, updates):
+ """For backwards-compatibility with dict-base objects.
+
+ NOTE(danms): May be removed in the future.
+ """
+ for key, value in updates.items():
+ self[key] = value
+
+ def as_dict(self):
+ return dict((k, getattr(self, k))
+ for k in self.fields
+ if hasattr(self, k))
+
+
+class ObjectListBase(object):
+ """Mixin class for lists of objects.
+
+ This mixin class can be added as a base class for an object that
+ is implementing a list of objects. It adds a single field of 'objects',
+ which is the list store, and behaves like a list itself. It supports
+ serialization of the list of objects automatically.
+ """
+ fields = {
+ 'objects': list,
+ }
+
+ # This is a dictionary of my_version:child_version mappings so that
+ # we can support backleveling our contents based on the version
+ # requested of the list object.
+ child_versions = {}
+
+ def __iter__(self):
+ """List iterator interface."""
+ return iter(self.objects)
+
+ def __len__(self):
+ """List length."""
+ return len(self.objects)
+
+ def __getitem__(self, index):
+ """List index access."""
+ if isinstance(index, slice):
+ new_obj = self.__class__(self._context)
+ new_obj.objects = self.objects[index]
+ # NOTE(danms): We must be mixed in with an IotronicObject!
+ new_obj.obj_reset_changes()
+ return new_obj
+ return self.objects[index]
+
+ def __contains__(self, value):
+ """List membership test."""
+ return value in self.objects
+
+ def count(self, value):
+ """List count of value occurrences."""
+ return self.objects.count(value)
+
+ def index(self, value):
+ """List index of value."""
+ return self.objects.index(value)
+
+ def _attr_objects_to_primitive(self):
+ """Serialization of object list."""
+ return [x.obj_to_primitive() for x in self.objects]
+
+ def _attr_objects_from_primitive(self, value):
+ """Deserialization of object list."""
+ objects = []
+ for entity in value:
+ obj = IotronicObject.obj_from_primitive(entity,
+ context=self._context)
+ objects.append(obj)
+ return objects
+
+ def obj_make_compatible(self, primitive, target_version):
+ primitives = primitive['objects']
+ child_target_version = self.child_versions.get(target_version, '1.0')
+ for index, item in enumerate(self.objects):
+ self.objects[index].obj_make_compatible(
+ primitives[index]['iotronic_object.data'],
+ child_target_version)
+ primitives[index]['iotronic_object.version'] = child_target_version
+
+ def obj_what_changed(self):
+ changes = set(self._changed_fields)
+ for child in self.objects:
+ if child.obj_what_changed():
+ changes.add('objects')
+ return changes
+
+
+class IotronicObjectSerializer(messaging.NoOpSerializer):
+ """A IotronicObject-aware Serializer.
+
+ This implements the Oslo Serializer interface and provides the
+ ability to serialize and deserialize IotronicObject entities. Any service
+ that needs to accept or return IotronicObjects as arguments or result values
+ should pass this to its RpcProxy and RpcDispatcher objects.
+ """
+
+ def _process_iterable(self, context, action_fn, values):
+ """Process an iterable, taking an action on each value.
+
+ :param:context: Request context
+ :param:action_fn: Action to take on each item in values
+ :param:values: Iterable container of things to take action on
+ :returns: A new container of the same type (except set) with
+ items from values having had action applied.
+ """
+ iterable = values.__class__
+ if iterable == set:
+ # NOTE(danms): A set can't have an unhashable value inside, such as
+ # a dict. Convert sets to tuples, which is fine, since we can't
+ # send them over RPC anyway.
+ iterable = tuple
+ return iterable([action_fn(context, value) for value in values])
+
+ def serialize_entity(self, context, entity):
+ if isinstance(entity, (tuple, list, set)):
+ entity = self._process_iterable(context, self.serialize_entity,
+ entity)
+ elif (hasattr(entity, 'obj_to_primitive') and
+ callable(entity.obj_to_primitive)):
+ entity = entity.obj_to_primitive()
+ return entity
+
+ def deserialize_entity(self, context, entity):
+ if isinstance(entity, dict) and 'iotronic_object.name' in entity:
+ entity = IotronicObject.obj_from_primitive(entity, context=context)
+ elif isinstance(entity, (tuple, list, set)):
+ entity = self._process_iterable(context, self.deserialize_entity,
+ entity)
+ return entity
+
+
+def obj_to_primitive(obj):
+ """Recursively turn an object into a python primitive.
+
+ An IotronicObject becomes a dict, and anything that implements ObjectListBase
+ becomes a list.
+ """
+ if isinstance(obj, ObjectListBase):
+ return [obj_to_primitive(x) for x in obj]
+ elif isinstance(obj, IotronicObject):
+ result = {}
+ for key, value in obj.iteritems():
+ result[key] = obj_to_primitive(value)
+ return result
+ else:
+ return obj
diff --git a/iotronic/objects/board.py b/iotronic/objects/board.py
new file mode 100644
index 0000000..61c7e0c
--- /dev/null
+++ b/iotronic/objects/board.py
@@ -0,0 +1,229 @@
+# coding=utf-8
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import strutils
+from oslo_utils import uuidutils
+
+from iotronic.common import exception
+from iotronic.db import api as db_api
+from iotronic.objects import base
+from iotronic.objects import utils as obj_utils
+
+
+class Board(base.IotronicObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ dbapi = db_api.get_instance()
+
+ fields = {
+ 'id': int,
+ 'uuid': obj_utils.str_or_none,
+ 'name': obj_utils.str_or_none,
+ 'status': obj_utils.str_or_none,
+ 'reservation': obj_utils.str_or_none,
+
+ }
+
+ @staticmethod
+ def _from_db_object(board, db_board):
+ """Converts a database entity to a formal object."""
+ for field in board.fields:
+ board[field] = db_board[field]
+ board.obj_reset_changes()
+ return board
+
+ @base.remotable_classmethod
+ def get(cls, context, board_id):
+ """Find a boad based on its id or uuid and return a Board object.
+
+ :param board_id: the id *or* uuid of a board.
+ :returns: a :class:`Board` object.
+ """
+ if strutils.is_int_like(board_id):
+ return cls.get_by_id(context, board_id)
+ elif uuidutils.is_uuid_like(board_id):
+ return cls.get_by_uuid(context, board_id)
+ else:
+ raise exception.InvalidIdentity(identity=board_id)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, board_id):
+ """Find a board based on its integer id and return a Board object.
+
+ :param board_id: the id of a board.
+ :returns: a :class:`Board` object.
+ """
+ db_board = cls.dbapi.get_board_by_id(board_id)
+ board = Boad._from_db_object(cls(context), db_board)
+ return board
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ """Find a board based on uuid and return a Board object.
+
+ :param uuid: the uuid of a board.
+ :returns: a :class:`Board` object.
+ """
+ db_board = cls.dbapi.get_board_by_uuid(uuid)
+ board = Board._from_db_object(cls(context), db_board)
+ return board
+
+ @base.remotable_classmethod
+ def get_by_name(cls, context, name):
+ """Find a board based on name and return a Board object.
+
+ :param name: the logical name of a board.
+ :returns: a :class:`Board` object.
+ """
+ db_boad = cls.dbapi.get_board_by_name(name)
+ board = Board._from_db_object(cls(context), db_board)
+ return board
+
+ @base.remotable_classmethod
+ def get_by_instance_uuid(cls, context, instance_uuid):
+ """Find a board based on the instance uuid and return a Board object.
+
+ :param uuid: the uuid of the instance.
+ :returns: a :class:`Board` object.
+ """
+ db_board = cls.dbapi.get_board_by_instance(instance_uuid)
+ board = Board._from_db_object(cls(context), db_board)
+ return board
+
+ @base.remotable_classmethod
+ def list(cls, context, limit=None, marker=None, sort_key=None,
+ sort_dir=None, filters=None):
+ """Return a list of Board objects.
+
+ :param context: Security context.
+ :param limit: maximum number of resources to return in a single result.
+ :param marker: pagination marker for large data sets.
+ :param sort_key: column to sort results by.
+ :param sort_dir: direction to sort. "asc" or "desc".
+ :param filters: Filters to apply.
+ :returns: a list of :class:`Board` object.
+
+ """
+ db_boards = cls.dbapi.get_board_list(filters=filters, limit=limit,
+ marker=marker, sort_key=sort_key,
+ sort_dir=sort_dir)
+ return [Board._from_db_object(cls(context), obj) for obj in db_boards]
+
+ @base.remotable_classmethod
+ def reserve(cls, context, tag, board_id):
+ """Get and reserve a board.
+
+ To prevent other ManagerServices from manipulating the given
+ Board while a Task is performed, mark it reserved by this host.
+
+ :param context: Security context.
+ :param tag: A string uniquely identifying the reservation holder.
+ :param board_id: A board id or uuid.
+ :raises: BoardNotFound if the board is not found.
+ :returns: a :class:`Board` object.
+
+ """
+ db_board = cls.dbapi.reserve_board(tag, board_id)
+ board = Board._from_db_object(cls(context), db_board)
+ return board
+
+ @base.remotable_classmethod
+ def release(cls, context, tag, board_id):
+ """Release the reservation on a board.
+
+ :param context: Security context.
+ :param tag: A string uniquely identifying the reservation holder.
+ :param board_id: A board id or uuid.
+ :raises: BoardNotFound if the board is not found.
+
+ """
+ cls.dbapi.release_board(tag, board_id)
+
+ @base.remotable
+ def create(self, context=None):
+ """Create a Board record in the DB.
+
+ Column-wise updates will be made based on the result of
+ self.what_changed(). If target_power_state is provided,
+ it will be checked against the in-database copy of the
+ board before updates are made.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Board(context)
+
+ """
+ values = self.obj_get_changes()
+ db_board = self.dbapi.create_board(values)
+ self._from_db_object(self, db_board)
+
+ @base.remotable
+ def destroy(self, context=None):
+ """Delete the Board from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Board(context)
+ """
+ self.dbapi.destroy_board(self.uuid)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def save(self, context=None):
+ """Save updates to this Board.
+
+ Column-wise updates will be made based on the result of
+ self.what_changed(). If target_power_state is provided,
+ it will be checked against the in-database copy of the
+ board before updates are made.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Board(context)
+ """
+ updates = self.obj_get_changes()
+ if 'driver' in updates and 'driver_internal_info' not in updates:
+ # Clean driver_internal_info when changes driver
+ self.driver_internal_info = {}
+ updates = self.obj_get_changes()
+ self.dbapi.update_board(self.uuid, updates)
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context=None):
+ """Refresh the object by re-fetching from the DB.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Board(context)
+ """
+ current = self.__class__.get_by_uuid(self._context, self.uuid)
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ self[field] != current[field]):
+ self[field] = current[field]
diff --git a/iotronic/objects/conductor.py b/iotronic/objects/conductor.py
new file mode 100644
index 0000000..9113008
--- /dev/null
+++ b/iotronic/objects/conductor.py
@@ -0,0 +1,83 @@
+# coding=utf-8
+#
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from iotronic.common.i18n import _
+from iotronic.db import api as db_api
+from iotronic.objects import base
+from iotronic.objects import utils
+
+
+class Conductor(base.IotronicObject):
+
+ dbapi = db_api.get_instance()
+
+ fields = {
+ 'id': int,
+ 'drivers': utils.list_or_none,
+ 'hostname': str,
+ }
+
+ @staticmethod
+ def _from_db_object(conductor, db_obj):
+ """Converts a database entity to a formal object."""
+ for field in conductor.fields:
+ conductor[field] = db_obj[field]
+
+ conductor.obj_reset_changes()
+ return conductor
+
+ @base.remotable_classmethod
+ def get_by_hostname(cls, context, hostname):
+ """Get a Conductor record by its hostname.
+
+ :param hostname: the hostname on which a Conductor is running
+ :returns: a :class:`Conductor` object.
+ """
+ db_obj = cls.dbapi.get_conductor(hostname)
+ conductor = Conductor._from_db_object(cls(context), db_obj)
+ return conductor
+
+ def save(self, context):
+ """Save is not supported by Conductor objects."""
+ raise NotImplementedError(
+ _('Cannot update a conductor record directly.'))
+
+ @base.remotable
+ def refresh(self, context=None):
+ """Loads and applies updates for this Conductor.
+
+ Loads a :class:`Conductor` with the same uuid from the database and
+ checks for updated attributes. Updates are applied from
+ the loaded chassis column by column, if there are any updates.
+
+ :param context: Security context. NOTE: This should only
+ be used internally by the indirection_api.
+ Unfortunately, RPC requires context as the first
+ argument, even though we don't use it.
+ A context should be set when instantiating the
+ object, e.g.: Conductor(context)
+ """
+ current = self.__class__.get_by_hostname(self._context,
+ hostname=self.hostname)
+ for field in self.fields:
+ if (hasattr(self, base.get_attrname(field)) and
+ self[field] != current[field]):
+ self[field] = current[field]
+
+ @base.remotable
+ def touch(self, context):
+ """Touch this conductor's DB record, marking it as up-to-date."""
+ self.dbapi.touch_conductor(self.hostname)
diff --git a/iotronic/objects/utils.py b/iotronic/objects/utils.py
new file mode 100644
index 0000000..9d21c64
--- /dev/null
+++ b/iotronic/objects/utils.py
@@ -0,0 +1,134 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Utility methods for objects"""
+
+import ast
+import datetime
+
+import iso8601
+import netaddr
+from oslo_utils import timeutils
+import six
+
+from iotronic.common.i18n import _
+
+
+def datetime_or_none(dt):
+ """Validate a datetime or None value."""
+ if dt is None:
+ return None
+ elif isinstance(dt, datetime.datetime):
+ if dt.utcoffset() is None:
+ # NOTE(danms): Legacy objects from sqlalchemy are stored in UTC,
+ # but are returned without a timezone attached.
+ # As a transitional aid, assume a tz-naive object is in UTC.
+ return dt.replace(tzinfo=iso8601.iso8601.Utc())
+ else:
+ return dt
+ raise ValueError(_("A datetime.datetime is required here"))
+
+
+def datetime_or_str_or_none(val):
+ if isinstance(val, six.string_types):
+ return timeutils.parse_isotime(val)
+ return datetime_or_none(val)
+
+
+def int_or_none(val):
+ """Attempt to parse an integer value, or None."""
+ if val is None:
+ return val
+ else:
+ return int(val)
+
+
+def str_or_none(val):
+ """Attempt to stringify a value to unicode, or None."""
+ if val is None:
+ return val
+ else:
+ return six.text_type(val)
+
+
+def dict_or_none(val):
+ """Attempt to dictify a value, or None."""
+ if val is None:
+ return {}
+ elif isinstance(val, six.string_types):
+ return dict(ast.literal_eval(val))
+ else:
+ try:
+ return dict(val)
+ except ValueError:
+ return {}
+
+
+def list_or_none(val):
+ """Attempt to listify a value, or None."""
+ if val is None:
+ return []
+ elif isinstance(val, six.string_types):
+ return list(ast.literal_eval(val))
+ else:
+ try:
+ return list(val)
+ except ValueError:
+ return []
+
+
+def ip_or_none(version):
+ """Return a version-specific IP address validator."""
+ def validator(val, version=version):
+ if val is None:
+ return val
+ else:
+ return netaddr.IPAddress(val, version=version)
+ return validator
+
+
+def nested_object_or_none(objclass):
+ def validator(val, objclass=objclass):
+ if val is None or isinstance(val, objclass):
+ return val
+ raise ValueError(_("An object of class %s is required here")
+ % objclass)
+ return validator
+
+
+def dt_serializer(name):
+ """Return a datetime serializer for a named attribute."""
+ def serializer(self, name=name):
+ if getattr(self, name) is not None:
+ return timeutils.isotime(getattr(self, name))
+ else:
+ return None
+ return serializer
+
+
+def dt_deserializer(instance, val):
+ """A deserializer method for datetime attributes."""
+ if val is None:
+ return None
+ else:
+ return timeutils.parse_isotime(val)
+
+
+def obj_serializer(name):
+ def serializer(self, name=name):
+ if getattr(self, name) is not None:
+ return getattr(self, name).obj_to_primitive()
+ else:
+ return None
+ return serializer
diff --git a/iotronic/openstack/__init__.py b/iotronic/openstack/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/openstack/common/__init__.py b/iotronic/openstack/common/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/iotronic/openstack/common/_i18n.py b/iotronic/openstack/common/_i18n.py
new file mode 100644
index 0000000..e818fe6
--- /dev/null
+++ b/iotronic/openstack/common/_i18n.py
@@ -0,0 +1,45 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo_i18n integration module.
+
+See http://docs.openstack.org/developer/oslo_i18n/usage.html
+
+"""
+
+try:
+ import oslo_i18n
+
+ # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
+ # application name when this module is synced into the separate
+ # repository. It is OK to have more than one translation function
+ # using the same domain, since there will still only be one message
+ # catalog.
+ _translators = oslo_i18n.TranslatorFactory(domain='iotronic')
+
+ # The primary translation function using the well-known name "_"
+ _ = _translators.primary
+
+ # Translators for log levels.
+ #
+ # The abbreviated names are meant to reflect the usual use of a short
+ # name like '_'. The "L" is for "log" and the other letter comes from
+ # the level.
+ _LI = _translators.log_info
+ _LW = _translators.log_warning
+ _LE = _translators.log_error
+ _LC = _translators.log_critical
+except ImportError:
+ # NOTE(dims): Support for cases where a project wants to use
+ # code from oslo_incubator, but is not ready to be internationalized
+ # (like tempest)
+ _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/iotronic/openstack/common/eventlet_backdoor.py b/iotronic/openstack/common/eventlet_backdoor.py
new file mode 100644
index 0000000..381b861
--- /dev/null
+++ b/iotronic/openstack/common/eventlet_backdoor.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2012 OpenStack Foundation.
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import print_function
+
+import copy
+import errno
+import gc
+import logging
+import os
+import pprint
+import socket
+import sys
+import traceback
+
+import eventlet.backdoor
+import greenlet
+from oslo_config import cfg
+
+from iotronic.openstack.common._i18n import _LI
+
+help_for_backdoor_port = (
+ "Acceptable values are 0, , and :, where 0 results "
+ "in listening on a random tcp port number; results in listening "
+ "on the specified port number (and not enabling backdoor if that port "
+ "is in use); and : results in listening on the smallest "
+ "unused port number within the specified range of port numbers. The "
+ "chosen port is displayed in the service's log file.")
+eventlet_backdoor_opts = [
+ cfg.StrOpt('backdoor_port',
+ help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
+]
+
+CONF = cfg.CONF
+CONF.register_opts(eventlet_backdoor_opts)
+LOG = logging.getLogger(__name__)
+
+
+def list_opts():
+ """Entry point for oslo_config-generator.
+ """
+ return [(None, copy.deepcopy(eventlet_backdoor_opts))]
+
+
+class EventletBackdoorConfigValueError(Exception):
+ def __init__(self, port_range, help_msg, ex):
+ msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
+ '%(help)s' %
+ {'range': port_range, 'ex': ex, 'help': help_msg})
+ super(EventletBackdoorConfigValueError, self).__init__(msg)
+ self.port_range = port_range
+
+
+def _dont_use_this():
+ print("Don't use this, just disconnect instead")
+
+
+def _find_objects(t):
+ return [o for o in gc.get_objects() if isinstance(o, t)]
+
+
+def _print_greenthreads():
+ for i, gt in enumerate(_find_objects(greenlet.greenlet)):
+ print(i, gt)
+ traceback.print_stack(gt.gr_frame)
+ print()
+
+
+def _print_nativethreads():
+ for threadId, stack in sys._current_frames().items():
+ print(threadId)
+ traceback.print_stack(stack)
+ print()
+
+
+def _parse_port_range(port_range):
+ if ':' not in port_range:
+ start, end = port_range, port_range
+ else:
+ start, end = port_range.split(':', 1)
+ try:
+ start, end = int(start), int(end)
+ if end < start:
+ raise ValueError
+ return start, end
+ except ValueError as ex:
+ raise EventletBackdoorConfigValueError(port_range, ex,
+ help_for_backdoor_port)
+
+
+def _listen(host, start_port, end_port, listen_func):
+ try_port = start_port
+ while True:
+ try:
+ return listen_func((host, try_port))
+ except socket.error as exc:
+ if (exc.errno != errno.EADDRINUSE or
+ try_port >= end_port):
+ raise
+ try_port += 1
+
+
+def initialize_if_enabled():
+ backdoor_locals = {
+ 'exit': _dont_use_this, # So we don't exit the entire process
+ 'quit': _dont_use_this, # So we don't exit the entire process
+ 'fo': _find_objects,
+ 'pgt': _print_greenthreads,
+ 'pnt': _print_nativethreads,
+ }
+
+ if CONF.backdoor_port is None:
+ return None
+
+ start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
+
+ # NOTE(johannes): The standard sys.displayhook will print the value of
+ # the last expression and set it to __builtin__._, which overwrites
+ # the __builtin__._ that gettext sets. Let's switch to using pprint
+ # since it won't interact poorly with gettext, and it's easier to
+ # read the output too.
+ def displayhook(val):
+ if val is not None:
+ pprint.pprint(val)
+ sys.displayhook = displayhook
+
+ sock = _listen('localhost', start_port, end_port, eventlet.listen)
+
+ # In the case of backdoor port being zero, a port number is assigned by
+ # listen(). In any case, pull the port number out here.
+ port = sock.getsockname()[1]
+ LOG.info(
+ _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
+ {'port': port, 'pid': os.getpid()}
+ )
+ eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
+ locals=backdoor_locals)
+ return port
diff --git a/iotronic/openstack/common/fileutils.py b/iotronic/openstack/common/fileutils.py
new file mode 100644
index 0000000..9097c35
--- /dev/null
+++ b/iotronic/openstack/common/fileutils.py
@@ -0,0 +1,149 @@
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import errno
+import logging
+import os
+import stat
+import tempfile
+
+from oslo_utils import excutils
+
+LOG = logging.getLogger(__name__)
+
+_FILE_CACHE = {}
+DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
+
+
+def ensure_tree(path, mode=DEFAULT_MODE):
+ """Create a directory (and any ancestor directories required)
+
+ :param path: Directory to create
+ :param mode: Directory creation permissions
+ """
+ try:
+ os.makedirs(path, mode)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ if not os.path.isdir(path):
+ raise
+ else:
+ raise
+
+
+def read_cached_file(filename, force_reload=False):
+ """Read from a file if it has been modified.
+
+ :param force_reload: Whether to reload the file.
+ :returns: A tuple with a boolean specifying if the data is fresh
+ or not.
+ """
+ global _FILE_CACHE
+
+ if force_reload:
+ delete_cached_file(filename)
+
+ reloaded = False
+ mtime = os.path.getmtime(filename)
+ cache_info = _FILE_CACHE.setdefault(filename, {})
+
+ if not cache_info or mtime > cache_info.get('mtime', 0):
+ LOG.debug("Reloading cached file %s" % filename)
+ with open(filename) as fap:
+ cache_info['data'] = fap.read()
+ cache_info['mtime'] = mtime
+ reloaded = True
+ return (reloaded, cache_info['data'])
+
+
+def delete_cached_file(filename):
+ """Delete cached file if present.
+
+ :param filename: filename to delete
+ """
+ global _FILE_CACHE
+
+ if filename in _FILE_CACHE:
+ del _FILE_CACHE[filename]
+
+
+def delete_if_exists(path, remove=os.unlink):
+ """Delete a file, but ignore file not found error.
+
+ :param path: File to delete
+ :param remove: Optional function to remove passed path
+ """
+
+ try:
+ remove(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+@contextlib.contextmanager
+def remove_path_on_error(path, remove=delete_if_exists):
+ """Protect code that wants to operate on PATH atomically.
+ Any exception will cause PATH to be removed.
+
+ :param path: File to work with
+ :param remove: Optional function to remove passed path
+ """
+
+ try:
+ yield
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ remove(path)
+
+
+def file_open(*args, **kwargs):
+ """Open file
+
+ see built-in open() documentation for more details
+
+ Note: The reason this is kept in a separate module is to easily
+ be able to provide a stub module that doesn't alter system
+ state at all (for unit tests)
+ """
+ return open(*args, **kwargs)
+
+
+def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
+ """Create temporary file or use existing file.
+
+ This util is needed for creating temporary file with
+ specified content, suffix and prefix. If path is not None,
+ it will be used for writing content. If the path doesn't
+ exist it'll be created.
+
+ :param content: content for temporary file.
+ :param path: same as parameter 'dir' for mkstemp
+ :param suffix: same as parameter 'suffix' for mkstemp
+ :param prefix: same as parameter 'prefix' for mkstemp
+
+ For example: it can be used in database tests for creating
+ configuration files.
+ """
+ if path:
+ ensure_tree(path)
+
+ (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
+ try:
+ os.write(fd, content)
+ finally:
+ os.close(fd)
+ return path
diff --git a/iotronic/openstack/common/imageutils.py b/iotronic/openstack/common/imageutils.py
new file mode 100644
index 0000000..96d2696
--- /dev/null
+++ b/iotronic/openstack/common/imageutils.py
@@ -0,0 +1,152 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods to deal with images.
+"""
+
+import re
+
+from oslo_utils import strutils
+
+from iotronic.openstack.common._i18n import _
+
+
+class QemuImgInfo(object):
+ BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
+ r"\s+(.*?)\)\s*$"), re.I)
+ TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
+ SIZE_RE = re.compile(r"(\d*\.?\d+)(\w+)?(\s*\(\s*(\d+)\s+bytes\s*\))?",
+ re.I)
+
+ def __init__(self, cmd_output=None):
+ details = self._parse(cmd_output or '')
+ self.image = details.get('image')
+ self.backing_file = details.get('backing_file')
+ self.file_format = details.get('file_format')
+ self.virtual_size = details.get('virtual_size')
+ self.cluster_size = details.get('cluster_size')
+ self.disk_size = details.get('disk_size')
+ self.snapshots = details.get('snapshot_list', [])
+ self.encrypted = details.get('encrypted')
+
+ def __str__(self):
+ lines = [
+ 'image: %s' % self.image,
+ 'file_format: %s' % self.file_format,
+ 'virtual_size: %s' % self.virtual_size,
+ 'disk_size: %s' % self.disk_size,
+ 'cluster_size: %s' % self.cluster_size,
+ 'backing_file: %s' % self.backing_file,
+ ]
+ if self.snapshots:
+ lines.append("snapshots: %s" % self.snapshots)
+ if self.encrypted:
+ lines.append("encrypted: %s" % self.encrypted)
+ return "\n".join(lines)
+
+ def _canonicalize(self, field):
+ # Standardize on underscores/lc/no dash and no spaces
+ # since qemu seems to have mixed outputs here... and
+ # this format allows for better integration with python
+ # - i.e. for usage in kwargs and such...
+ field = field.lower().strip()
+ for c in (" ", "-"):
+ field = field.replace(c, '_')
+ return field
+
+ def _extract_bytes(self, details):
+ # Replace it with the byte amount
+ real_size = self.SIZE_RE.search(details)
+ if not real_size:
+ raise ValueError(_('Invalid input value "%s".') % details)
+ magnitude = real_size.group(1)
+ unit_of_measure = real_size.group(2)
+ bytes_info = real_size.group(3)
+ if bytes_info:
+ return int(real_size.group(4))
+ elif not unit_of_measure:
+ return int(magnitude)
+ return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure),
+ return_int=True)
+
+ def _extract_details(self, root_cmd, root_details, lines_after):
+ real_details = root_details
+ if root_cmd == 'backing_file':
+ # Replace it with the real backing file
+ backing_match = self.BACKING_FILE_RE.match(root_details)
+ if backing_match:
+ real_details = backing_match.group(2).strip()
+ elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
+ # Replace it with the byte amount (if we can convert it)
+ if root_details == 'None':
+ real_details = 0
+ else:
+ real_details = self._extract_bytes(root_details)
+ elif root_cmd == 'file_format':
+ real_details = real_details.strip().lower()
+ elif root_cmd == 'snapshot_list':
+ # Next line should be a header, starting with 'ID'
+ if not lines_after or not lines_after.pop(0).startswith("ID"):
+ msg = _("Snapshot list encountered but no header found!")
+ raise ValueError(msg)
+ real_details = []
+ # This is the sprintf pattern we will try to match
+ # "%-10s%-20s%7s%20s%15s"
+ # ID TAG VM SIZE DATE VM CLOCK (current header)
+ while lines_after:
+ line = lines_after[0]
+ line_pieces = line.split()
+ if len(line_pieces) != 6:
+ break
+ # Check against this pattern in the final position
+ # "%02d:%02d:%02d.%03d"
+ date_pieces = line_pieces[5].split(":")
+ if len(date_pieces) != 3:
+ break
+ lines_after.pop(0)
+ real_details.append({
+ 'id': line_pieces[0],
+ 'tag': line_pieces[1],
+ 'vm_size': line_pieces[2],
+ 'date': line_pieces[3],
+ 'vm_clock': line_pieces[4] + " " + line_pieces[5],
+ })
+ return real_details
+
+ def _parse(self, cmd_output):
+ # Analysis done of qemu-img.c to figure out what is going on here
+ # Find all points start with some chars and then a ':' then a newline
+ # and then handle the results of those 'top level' items in a separate
+ # function.
+ #
+ # TODO(harlowja): newer versions might have a json output format
+ # we should switch to that whenever possible.
+ # see: http://bit.ly/XLJXDX
+ contents = {}
+ lines = [x for x in cmd_output.splitlines() if x.strip()]
+ while lines:
+ line = lines.pop(0)
+ top_level = self.TOP_LEVEL_RE.match(line)
+ if top_level:
+ root = self._canonicalize(top_level.group(1))
+ if not root:
+ continue
+ root_details = top_level.group(2).strip()
+ details = self._extract_details(root, root_details, lines)
+ contents[root] = details
+ return contents
diff --git a/iotronic/openstack/common/loopingcall.py b/iotronic/openstack/common/loopingcall.py
new file mode 100644
index 0000000..5e9d3b0
--- /dev/null
+++ b/iotronic/openstack/common/loopingcall.py
@@ -0,0 +1,147 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import sys
+import time
+
+from eventlet import event
+from eventlet import greenthread
+
+from iotronic.openstack.common._i18n import _LE, _LW
+
+LOG = logging.getLogger(__name__)
+
+# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
+# with time.time() called in the standard logging module
+# during unittests.
+_ts = lambda: time.time()
+
+
+class LoopingCallDone(Exception):
+ """Exception to break out and stop a LoopingCallBase.
+
+ The poll-function passed to LoopingCallBase can raise this exception to
+ break out of the loop normally. This is somewhat analogous to
+ StopIteration.
+
+ An optional return-value can be included as the argument to the exception;
+ this return-value will be returned by LoopingCallBase.wait()
+
+ """
+
+ def __init__(self, retvalue=True):
+ """:param retvalue: Value that LoopingCallBase.wait() should return."""
+ self.retvalue = retvalue
+
+
+class LoopingCallBase(object):
+ def __init__(self, f=None, *args, **kw):
+ self.args = args
+ self.kw = kw
+ self.f = f
+ self._running = False
+ self.done = None
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+
+class FixedIntervalLoopingCall(LoopingCallBase):
+ """A fixed interval looping call."""
+
+ def start(self, interval, initial_delay=None):
+ self._running = True
+ done = event.Event()
+
+ def _inner():
+ if initial_delay:
+ greenthread.sleep(initial_delay)
+
+ try:
+ while self._running:
+ start = _ts()
+ self.f(*self.args, **self.kw)
+ end = _ts()
+ if not self._running:
+ break
+ delay = end - start - interval
+ if delay > 0:
+ LOG.warn(_LW('task %(func_name)r run outlasted '
+ 'interval by %(delay).2f sec'),
+ {'func_name': self.f, 'delay': delay})
+ greenthread.sleep(-delay if delay < 0 else 0)
+ except LoopingCallDone as e:
+ self.stop()
+ done.send(e.retvalue)
+ except Exception:
+ LOG.exception(_LE('in fixed duration looping call'))
+ done.send_exception(*sys.exc_info())
+ return
+ else:
+ done.send(True)
+
+ self.done = done
+
+ greenthread.spawn_n(_inner)
+ return self.done
+
+
+class DynamicLoopingCall(LoopingCallBase):
+ """A looping call which sleeps until the next known event.
+
+ The function called should return how long to sleep for before being
+ called again.
+ """
+
+ def start(self, initial_delay=None, periodic_interval_max=None):
+ self._running = True
+ done = event.Event()
+
+ def _inner():
+ if initial_delay:
+ greenthread.sleep(initial_delay)
+
+ try:
+ while self._running:
+ idle = self.f(*self.args, **self.kw)
+ if not self._running:
+ break
+
+ if periodic_interval_max is not None:
+ idle = min(idle, periodic_interval_max)
+ LOG.debug('Dynamic looping call %(func_name)r sleeping '
+ 'for %(idle).02f seconds',
+ {'func_name': self.f, 'idle': idle})
+ greenthread.sleep(idle)
+ except LoopingCallDone as e:
+ self.stop()
+ done.send(e.retvalue)
+ except Exception:
+ LOG.exception(_LE('in dynamic looping call'))
+ done.send_exception(*sys.exc_info())
+ return
+ else:
+ done.send(True)
+
+ self.done = done
+
+ greenthread.spawn(_inner)
+ return self.done
diff --git a/iotronic/openstack/common/periodic_task.py b/iotronic/openstack/common/periodic_task.py
new file mode 100644
index 0000000..31474a8
--- /dev/null
+++ b/iotronic/openstack/common/periodic_task.py
@@ -0,0 +1,232 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import logging
+import random
+import time
+
+from oslo_config import cfg
+import six
+
+from iotronic.openstack.common._i18n import _, _LE, _LI
+
+
+periodic_opts = [
+ cfg.BoolOpt('run_external_periodic_tasks',
+ default=True,
+ help='Some periodic tasks can be run in a separate process. '
+ 'Should we run them here?'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(periodic_opts)
+
+LOG = logging.getLogger(__name__)
+
+DEFAULT_INTERVAL = 60.0
+
+
+def list_opts():
+ """Entry point for oslo_config-generator."""
+ return [(None, copy.deepcopy(periodic_opts))]
+
+
+class InvalidPeriodicTaskArg(Exception):
+ message = _("Unexpected argument for periodic task creation: %(arg)s.")
+
+
+def periodic_task(*args, **kwargs):
+ """Decorator to indicate that a method is a periodic task.
+
+ This decorator can be used in two ways:
+
+ 1. Without arguments '@periodic_task', this will be run on the default
+ interval of 60 seconds.
+
+ 2. With arguments:
+ @periodic_task(spacing=N [, run_immediately=[True|False]]
+ [, name=[None|"string"])
+ this will be run on approximately every N seconds. If this number is
+ negative the periodic task will be disabled. If the run_immediately
+ argument is provided and has a value of 'True', the first run of the
+ task will be shortly after task scheduler starts. If
+ run_immediately is omitted or set to 'False', the first time the
+ task runs will be approximately N seconds after the task scheduler
+ starts. If name is not provided, __name__ of function is used.
+ """
+ def decorator(f):
+ # Test for old style invocation
+ if 'ticks_between_runs' in kwargs:
+ raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
+
+ # Control if run at all
+ f._periodic_task = True
+ f._periodic_external_ok = kwargs.pop('external_process_ok', False)
+ if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
+ f._periodic_enabled = False
+ else:
+ f._periodic_enabled = kwargs.pop('enabled', True)
+ f._periodic_name = kwargs.pop('name', f.__name__)
+
+ # Control frequency
+ f._periodic_spacing = kwargs.pop('spacing', 0)
+ f._periodic_immediate = kwargs.pop('run_immediately', False)
+ if f._periodic_immediate:
+ f._periodic_last_run = None
+ else:
+ f._periodic_last_run = time.time()
+ return f
+
+ # NOTE(sirp): The `if` is necessary to allow the decorator to be used with
+ # and without parenthesis.
+ #
+ # In the 'with-parenthesis' case (with kwargs present), this function needs
+ # to return a decorator function since the interpreter will invoke it like:
+ #
+ # periodic_task(*args, **kwargs)(f)
+ #
+ # In the 'without-parenthesis' case, the original function will be passed
+ # in as the first argument, like:
+ #
+ # periodic_task(f)
+ if kwargs:
+ return decorator
+ else:
+ return decorator(args[0])
+
+
+class _PeriodicTasksMeta(type):
+ def _add_periodic_task(cls, task):
+ """Add a periodic task to the list of periodic tasks.
+
+ The task should already be decorated by @periodic_task.
+
+ :return: whether task was actually enabled
+ """
+ name = task._periodic_name
+
+ if task._periodic_spacing < 0:
+ LOG.info(_LI('Skipping periodic task %(task)s because '
+ 'its interval is negative'),
+ {'task': name})
+ return False
+ if not task._periodic_enabled:
+ LOG.info(_LI('Skipping periodic task %(task)s because '
+ 'it is disabled'),
+ {'task': name})
+ return False
+
+ # A periodic spacing of zero indicates that this task should
+ # be run on the default interval to avoid running too
+ # frequently.
+ if task._periodic_spacing == 0:
+ task._periodic_spacing = DEFAULT_INTERVAL
+
+ cls._periodic_tasks.append((name, task))
+ cls._periodic_spacing[name] = task._periodic_spacing
+ return True
+
+ def __init__(cls, names, bases, dict_):
+ """Metaclass that allows us to collect decorated periodic tasks."""
+ super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
+
+ # NOTE(sirp): if the attribute is not present then we must be the base
+ # class, so, go ahead an initialize it. If the attribute is present,
+ # then we're a subclass so make a copy of it so we don't step on our
+ # parent's toes.
+ try:
+ cls._periodic_tasks = cls._periodic_tasks[:]
+ except AttributeError:
+ cls._periodic_tasks = []
+
+ try:
+ cls._periodic_spacing = cls._periodic_spacing.copy()
+ except AttributeError:
+ cls._periodic_spacing = {}
+
+ for value in cls.__dict__.values():
+ if getattr(value, '_periodic_task', False):
+ cls._add_periodic_task(value)
+
+
+def _nearest_boundary(last_run, spacing):
+ """Find nearest boundary which is in the past, which is a multiple of the
+ spacing with the last run as an offset.
+
+ Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
+ 31, 38...
+
+ 0% to 5% of the spacing value will be added to this value to ensure tasks
+ do not synchronize. This jitter is rounded to the nearest second, this
+ means that spacings smaller than 20 seconds will not have jitter.
+ """
+ current_time = time.time()
+ if last_run is None:
+ return current_time
+ delta = current_time - last_run
+ offset = delta % spacing
+ # Add up to 5% jitter
+ jitter = int(spacing * (random.random() / 20))
+ return current_time - offset + jitter
+
+
+@six.add_metaclass(_PeriodicTasksMeta)
+class PeriodicTasks(object):
+ def __init__(self):
+ super(PeriodicTasks, self).__init__()
+ self._periodic_last_run = {}
+ for name, task in self._periodic_tasks:
+ self._periodic_last_run[name] = task._periodic_last_run
+
+ def add_periodic_task(self, task):
+ """Add a periodic task to the list of periodic tasks.
+
+ The task should already be decorated by @periodic_task.
+ """
+ if self.__class__._add_periodic_task(task):
+ self._periodic_last_run[task._periodic_name] = (
+ task._periodic_last_run)
+
+ def run_periodic_tasks(self, context, raise_on_error=False):
+ """Tasks to be run at a periodic interval."""
+ idle_for = DEFAULT_INTERVAL
+ for task_name, task in self._periodic_tasks:
+ full_task_name = '.'.join([self.__class__.__name__, task_name])
+
+ spacing = self._periodic_spacing[task_name]
+ last_run = self._periodic_last_run[task_name]
+
+ # Check if due, if not skip
+ idle_for = min(idle_for, spacing)
+ if last_run is not None:
+ delta = last_run + spacing - time.time()
+ if delta > 0:
+ idle_for = min(idle_for, delta)
+ continue
+
+ LOG.debug("Running periodic task %(full_task_name)s",
+ {"full_task_name": full_task_name})
+ self._periodic_last_run[task_name] = _nearest_boundary(
+ last_run, spacing)
+
+ try:
+ task(self, context)
+ except Exception as e:
+ if raise_on_error:
+ raise
+ LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
+ {"full_task_name": full_task_name, "e": e})
+ time.sleep(0)
+
+ return idle_for
diff --git a/iotronic/openstack/common/service.py b/iotronic/openstack/common/service.py
new file mode 100644
index 0000000..7615496
--- /dev/null
+++ b/iotronic/openstack/common/service.py
@@ -0,0 +1,517 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Generic Node base class for all workers that run on hosts."""
+
+import errno
+import logging
+import os
+import random
+import signal
+import sys
+import time
+
+try:
+ # Importing just the symbol here because the io module does not
+ # exist in Python 2.6.
+ from io import UnsupportedOperation # noqa
+except ImportError:
+ # Python 2.6
+ UnsupportedOperation = None
+
+import eventlet
+from eventlet import event
+from oslo_config import cfg
+
+from iotronic.openstack.common import eventlet_backdoor
+from iotronic.openstack.common._i18n import _LE, _LI, _LW
+from iotronic.openstack.common import systemd
+from iotronic.openstack.common import threadgroup
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+def _sighup_supported():
+ return hasattr(signal, 'SIGHUP')
+
+
+def _is_daemon():
+ # The process group for a foreground process will match the
+ # process group of the controlling terminal. If those values do
+ # not match, or ioctl() fails on the stdout file handle, we assume
+ # the process is running in the background as a daemon.
+ # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
+ try:
+ is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
+ except OSError as err:
+ if err.errno == errno.ENOTTY:
+ # Assume we are a daemon because there is no terminal.
+ is_daemon = True
+ else:
+ raise
+ except UnsupportedOperation:
+ # Could not get the fileno for stdout, so we must be a daemon.
+ is_daemon = True
+ return is_daemon
+
+
+def _is_sighup_and_daemon(signo):
+ if not (_sighup_supported() and signo == signal.SIGHUP):
+ # Avoid checking if we are a daemon, because the signal isn't
+ # SIGHUP.
+ return False
+ return _is_daemon()
+
+
+def _signo_to_signame(signo):
+ signals = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}
+ if _sighup_supported():
+ signals[signal.SIGHUP] = 'SIGHUP'
+ return signals[signo]
+
+
+def _set_signals_handler(handler):
+ signal.signal(signal.SIGTERM, handler)
+ signal.signal(signal.SIGINT, handler)
+ if _sighup_supported():
+ signal.signal(signal.SIGHUP, handler)
+
+
+class Launcher(object):
+ """Launch one or more services and wait for them to complete."""
+
+ def __init__(self):
+ """Initialize the service launcher.
+
+ :returns: None
+
+ """
+ self.services = Services()
+ self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
+
+ def launch_service(self, service):
+ """Load and start the given service.
+
+ :param service: The service you would like to start.
+ :returns: None
+
+ """
+ service.backdoor_port = self.backdoor_port
+ self.services.add(service)
+
+ def stop(self):
+ """Stop all services which are currently running.
+
+ :returns: None
+
+ """
+ self.services.stop()
+
+ def wait(self):
+ """Waits until all services have been stopped, and then returns.
+
+ :returns: None
+
+ """
+ self.services.wait()
+
+ def restart(self):
+ """Reload config files and restart service.
+
+ :returns: None
+
+ """
+ cfg.CONF.reload_config_files()
+ self.services.restart()
+
+
+class SignalExit(SystemExit):
+ def __init__(self, signo, exccode=1):
+ super(SignalExit, self).__init__(exccode)
+ self.signo = signo
+
+
+class ServiceLauncher(Launcher):
+ def _handle_signal(self, signo, frame):
+ # Allow the process to be killed again and die from natural causes
+ _set_signals_handler(signal.SIG_DFL)
+ raise SignalExit(signo)
+
+ def handle_signal(self):
+ _set_signals_handler(self._handle_signal)
+
+ def _wait_for_exit_or_signal(self, ready_callback=None):
+ status = None
+ signo = 0
+
+ LOG.debug('Full set of CONF:')
+ CONF.log_opt_values(LOG, logging.DEBUG)
+
+ try:
+ if ready_callback:
+ ready_callback()
+ super(ServiceLauncher, self).wait()
+ except SignalExit as exc:
+ signame = _signo_to_signame(exc.signo)
+ LOG.info(_LI('Caught %s, exiting'), signame)
+ status = exc.code
+ signo = exc.signo
+ except SystemExit as exc:
+ status = exc.code
+ finally:
+ self.stop()
+
+ return status, signo
+
+ def wait(self, ready_callback=None):
+ systemd.notify_once()
+ while True:
+ self.handle_signal()
+ status, signo = self._wait_for_exit_or_signal(ready_callback)
+ if not _is_sighup_and_daemon(signo):
+ return status
+ self.restart()
+
+
+class ServiceWrapper(object):
+ def __init__(self, service, workers):
+ self.service = service
+ self.workers = workers
+ self.children = set()
+ self.forktimes = []
+
+
+class ProcessLauncher(object):
+ _signal_handlers_set = set()
+
+ @classmethod
+ def _handle_class_signals(cls, *args, **kwargs):
+ for handler in cls._signal_handlers_set:
+ handler(*args, **kwargs)
+
+ def __init__(self, wait_interval=0.01):
+ """Constructor.
+
+ :param wait_interval: The interval to sleep for between checks
+ of child process exit.
+ """
+ self.children = {}
+ self.sigcaught = None
+ self.running = True
+ self.wait_interval = wait_interval
+ rfd, self.writepipe = os.pipe()
+ self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
+ self.handle_signal()
+
+ def handle_signal(self):
+ self._signal_handlers_set.add(self._handle_signal)
+ _set_signals_handler(self._handle_class_signals)
+
+ def _handle_signal(self, signo, frame):
+ self.sigcaught = signo
+ self.running = False
+
+ # Allow the process to be killed again and die from natural causes
+ _set_signals_handler(signal.SIG_DFL)
+
+ def _pipe_watcher(self):
+ # This will block until the write end is closed when the parent
+ # dies unexpectedly
+ self.readpipe.read()
+
+ LOG.info(_LI('Parent process has died unexpectedly, exiting'))
+
+ sys.exit(1)
+
+ def _child_process_handle_signal(self):
+ # Setup child signal handlers differently
+ def _sigterm(*args):
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ raise SignalExit(signal.SIGTERM)
+
+ def _sighup(*args):
+ signal.signal(signal.SIGHUP, signal.SIG_DFL)
+ raise SignalExit(signal.SIGHUP)
+
+ signal.signal(signal.SIGTERM, _sigterm)
+ if _sighup_supported():
+ signal.signal(signal.SIGHUP, _sighup)
+ # Block SIGINT and let the parent send us a SIGTERM
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ def _child_wait_for_exit_or_signal(self, launcher):
+ status = 0
+ signo = 0
+
+ # NOTE(johannes): All exceptions are caught to ensure this
+ # doesn't fallback into the loop spawning children. It would
+ # be bad for a child to spawn more children.
+ try:
+ launcher.wait()
+ except SignalExit as exc:
+ signame = _signo_to_signame(exc.signo)
+ LOG.info(_LI('Child caught %s, exiting'), signame)
+ status = exc.code
+ signo = exc.signo
+ except SystemExit as exc:
+ status = exc.code
+ except BaseException:
+ LOG.exception(_LE('Unhandled exception'))
+ status = 2
+ finally:
+ launcher.stop()
+
+ return status, signo
+
+ def _child_process(self, service):
+ self._child_process_handle_signal()
+
+ # Reopen the eventlet hub to make sure we don't share an epoll
+ # fd with parent and/or siblings, which would be bad
+ eventlet.hubs.use_hub()
+
+ # Close write to ensure only parent has it open
+ os.close(self.writepipe)
+ # Create greenthread to watch for parent to close pipe
+ eventlet.spawn_n(self._pipe_watcher)
+
+ # Reseed random number generator
+ random.seed()
+
+ launcher = Launcher()
+ launcher.launch_service(service)
+ return launcher
+
+ def _start_child(self, wrap):
+ if len(wrap.forktimes) > wrap.workers:
+ # Limit ourselves to one process a second (over the period of
+ # number of workers * 1 second). This will allow workers to
+ # start up quickly but ensure we don't fork off children that
+ # die instantly too quickly.
+ if time.time() - wrap.forktimes[0] < wrap.workers:
+ LOG.info(_LI('Forking too fast, sleeping'))
+ time.sleep(1)
+
+ wrap.forktimes.pop(0)
+
+ wrap.forktimes.append(time.time())
+
+ pid = os.fork()
+ if pid == 0:
+ launcher = self._child_process(wrap.service)
+ while True:
+ self._child_process_handle_signal()
+ status, signo = self._child_wait_for_exit_or_signal(launcher)
+ if not _is_sighup_and_daemon(signo):
+ break
+ launcher.restart()
+
+ os._exit(status)
+
+ LOG.info(_LI('Started child %d'), pid)
+
+ wrap.children.add(pid)
+ self.children[pid] = wrap
+
+ return pid
+
+ def launch_service(self, service, workers=1):
+ wrap = ServiceWrapper(service, workers)
+
+ LOG.info(_LI('Starting %d workers'), wrap.workers)
+ while self.running and len(wrap.children) < wrap.workers:
+ self._start_child(wrap)
+
+ def _wait_child(self):
+ try:
+ # Don't block if no child processes have exited
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if not pid:
+ return None
+ except OSError as exc:
+ if exc.errno not in (errno.EINTR, errno.ECHILD):
+ raise
+ return None
+
+ if os.WIFSIGNALED(status):
+ sig = os.WTERMSIG(status)
+ LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
+ dict(pid=pid, sig=sig))
+ else:
+ code = os.WEXITSTATUS(status)
+ LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
+ dict(pid=pid, code=code))
+
+ if pid not in self.children:
+ LOG.warning(_LW('pid %d not in child list'), pid)
+ return None
+
+ wrap = self.children.pop(pid)
+ wrap.children.remove(pid)
+ return wrap
+
+ def _respawn_children(self):
+ while self.running:
+ wrap = self._wait_child()
+ if not wrap:
+ # Yield to other threads if no children have exited
+ # Sleep for a short time to avoid excessive CPU usage
+ # (see bug #1095346)
+ eventlet.greenthread.sleep(self.wait_interval)
+ continue
+ while self.running and len(wrap.children) < wrap.workers:
+ self._start_child(wrap)
+
+ def wait(self):
+ """Loop waiting on children to die and respawning as necessary."""
+
+ systemd.notify_once()
+ LOG.debug('Full set of CONF:')
+ CONF.log_opt_values(LOG, logging.DEBUG)
+
+ try:
+ while True:
+ self.handle_signal()
+ self._respawn_children()
+ # No signal means that stop was called. Don't clean up here.
+ if not self.sigcaught:
+ return
+
+ signame = _signo_to_signame(self.sigcaught)
+ LOG.info(_LI('Caught %s, stopping children'), signame)
+ if not _is_sighup_and_daemon(self.sigcaught):
+ break
+
+ cfg.CONF.reload_config_files()
+ for service in set(
+ [wrap.service for wrap in self.children.values()]):
+ service.reset()
+
+ for pid in self.children:
+ os.kill(pid, signal.SIGHUP)
+
+ self.running = True
+ self.sigcaught = None
+ except eventlet.greenlet.GreenletExit:
+ LOG.info(_LI("Wait called after thread killed. Cleaning up."))
+
+ self.stop()
+
+ def stop(self):
+ """Terminate child processes and wait on each."""
+ self.running = False
+ for pid in self.children:
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as exc:
+ if exc.errno != errno.ESRCH:
+ raise
+
+ # Wait for children to die
+ if self.children:
+ LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
+ while self.children:
+ self._wait_child()
+
+
+class Service(object):
+ """Service object for binaries running on hosts."""
+
+ def __init__(self, threads=1000):
+ self.tg = threadgroup.ThreadGroup(threads)
+
+ # signal that the service is done shutting itself down:
+ self._done = event.Event()
+
+ def reset(self):
+ # NOTE(Fengqian): docs for Event.reset() recommend against using it
+ self._done = event.Event()
+
+ def start(self):
+ pass
+
+ def stop(self, graceful=False):
+ self.tg.stop(graceful)
+ self.tg.wait()
+ # Signal that service cleanup is done:
+ if not self._done.ready():
+ self._done.send()
+
+ def wait(self):
+ self._done.wait()
+
+
+class Services(object):
+
+ def __init__(self):
+ self.services = []
+ self.tg = threadgroup.ThreadGroup()
+ self.done = event.Event()
+
+ def add(self, service):
+ self.services.append(service)
+ self.tg.add_thread(self.run_service, service, self.done)
+
+ def stop(self):
+ # wait for graceful shutdown of services:
+ for service in self.services:
+ service.stop()
+ service.wait()
+
+ # Each service has performed cleanup, now signal that the run_service
+ # wrapper threads can now die:
+ if not self.done.ready():
+ self.done.send()
+
+ # reap threads:
+ self.tg.stop()
+
+ def wait(self):
+ self.tg.wait()
+
+ def restart(self):
+ self.stop()
+ self.done = event.Event()
+ for restart_service in self.services:
+ restart_service.reset()
+ self.tg.add_thread(self.run_service, restart_service, self.done)
+
+ @staticmethod
+ def run_service(service, done):
+ """Service start wrapper.
+
+ :param service: service to run
+ :param done: event to wait on until a shutdown is triggered
+ :returns: None
+
+ """
+ service.start()
+ done.wait()
+
+
+def launch(service, workers=1):
+ if workers is None or workers == 1:
+ launcher = ServiceLauncher()
+ launcher.launch_service(service)
+ else:
+ launcher = ProcessLauncher()
+ launcher.launch_service(service, workers=workers)
+
+ return launcher
diff --git a/iotronic/openstack/common/systemd.py b/iotronic/openstack/common/systemd.py
new file mode 100644
index 0000000..36243b3
--- /dev/null
+++ b/iotronic/openstack/common/systemd.py
@@ -0,0 +1,105 @@
+# Copyright 2012-2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper module for systemd service readiness notification.
+"""
+
+import logging
+import os
+import socket
+import sys
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _abstractify(socket_name):
+ if socket_name.startswith('@'):
+ # abstract namespace socket
+ socket_name = '\0%s' % socket_name[1:]
+ return socket_name
+
+
+def _sd_notify(unset_env, msg):
+ notify_socket = os.getenv('NOTIFY_SOCKET')
+ if notify_socket:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ sock.connect(_abstractify(notify_socket))
+ sock.sendall(msg)
+ if unset_env:
+ del os.environ['NOTIFY_SOCKET']
+ except EnvironmentError:
+ LOG.debug("Systemd notification failed", exc_info=True)
+ finally:
+ sock.close()
+
+
+def notify():
+ """Send notification to Systemd that service is ready.
+
+ For details see
+ http://www.freedesktop.org/software/systemd/man/sd_notify.html
+ """
+ _sd_notify(False, 'READY=1')
+
+
+def notify_once():
+ """Send notification once to Systemd that service is ready.
+
+ Systemd sets NOTIFY_SOCKET environment variable with the name of the
+ socket listening for notifications from services.
+ This method removes the NOTIFY_SOCKET environment variable to ensure
+ notification is sent only once.
+ """
+ _sd_notify(True, 'READY=1')
+
+
+def onready(notify_socket, timeout):
+ """Wait for systemd style notification on the socket.
+
+ :param notify_socket: local socket address
+ :type notify_socket: string
+ :param timeout: socket timeout
+ :type timeout: float
+ :returns: 0 service ready
+ 1 service not ready
+ 2 timeout occurred
+ """
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ sock.settimeout(timeout)
+ sock.bind(_abstractify(notify_socket))
+ try:
+ msg = sock.recv(512)
+ except socket.timeout:
+ return 2
+ finally:
+ sock.close()
+ if 'READY=1' in msg:
+ return 0
+ else:
+ return 1
+
+
+if __name__ == '__main__':
+ # simple CLI for testing
+ if len(sys.argv) == 1:
+ notify()
+ elif len(sys.argv) >= 2:
+ timeout = float(sys.argv[1])
+ notify_socket = os.getenv('NOTIFY_SOCKET')
+ if notify_socket:
+ retval = onready(notify_socket, timeout)
+ sys.exit(retval)
diff --git a/iotronic/openstack/common/threadgroup.py b/iotronic/openstack/common/threadgroup.py
new file mode 100644
index 0000000..f36f279
--- /dev/null
+++ b/iotronic/openstack/common/threadgroup.py
@@ -0,0 +1,149 @@
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import logging
+import threading
+
+import eventlet
+from eventlet import greenpool
+
+from iotronic.openstack.common import loopingcall
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _thread_done(gt, *args, **kwargs):
+ """Callback function to be passed to GreenThread.link() when we spawn()
+ Calls the :class:`ThreadGroup` to notify if.
+
+ """
+ kwargs['group'].thread_done(kwargs['thread'])
+
+
+class Thread(object):
+ """Wrapper around a greenthread, that holds a reference to the
+ :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
+ it has done so it can be removed from the threads list.
+ """
+ def __init__(self, thread, group):
+ self.thread = thread
+ self.thread.link(_thread_done, group=group, thread=self)
+
+ def stop(self):
+ self.thread.kill()
+
+ def wait(self):
+ return self.thread.wait()
+
+ def link(self, func, *args, **kwargs):
+ self.thread.link(func, *args, **kwargs)
+
+
+class ThreadGroup(object):
+ """The point of the ThreadGroup class is to:
+
+ * keep track of timers and greenthreads (making it easier to stop them
+ when need be).
+ * provide an easy API to add timers.
+ """
+ def __init__(self, thread_pool_size=10):
+ self.pool = greenpool.GreenPool(thread_pool_size)
+ self.threads = []
+ self.timers = []
+
+ def add_dynamic_timer(self, callback, initial_delay=None,
+ periodic_interval_max=None, *args, **kwargs):
+ timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
+ timer.start(initial_delay=initial_delay,
+ periodic_interval_max=periodic_interval_max)
+ self.timers.append(timer)
+
+ def add_timer(self, interval, callback, initial_delay=None,
+ *args, **kwargs):
+ pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
+ pulse.start(interval=interval,
+ initial_delay=initial_delay)
+ self.timers.append(pulse)
+
+ def add_thread(self, callback, *args, **kwargs):
+ gt = self.pool.spawn(callback, *args, **kwargs)
+ th = Thread(gt, self)
+ self.threads.append(th)
+ return th
+
+ def thread_done(self, thread):
+ self.threads.remove(thread)
+
+ def _stop_threads(self):
+ current = threading.current_thread()
+
+ # Iterate over a copy of self.threads so thread_done doesn't
+ # modify the list while we're iterating
+ for x in self.threads[:]:
+ if x is current:
+ # don't kill the current thread.
+ continue
+ try:
+ x.stop()
+ except eventlet.greenlet.GreenletExit:
+ pass
+ except Exception as ex:
+ LOG.exception(ex)
+
+ def stop_timers(self):
+ for x in self.timers:
+ try:
+ x.stop()
+ except Exception as ex:
+ LOG.exception(ex)
+ self.timers = []
+
+ def stop(self, graceful=False):
+ """stop function has the option of graceful=True/False.
+
+ * In case of graceful=True, wait for all threads to be finished.
+ Never kill threads.
+ * In case of graceful=False, kill threads immediately.
+ """
+ self.stop_timers()
+ if graceful:
+ # In case of graceful=True, wait for all threads to be
+ # finished, never kill threads
+ self.wait()
+ else:
+ # In case of graceful=False(Default), kill threads
+ # immediately
+ self._stop_threads()
+
+ def wait(self):
+ for x in self.timers:
+ try:
+ x.wait()
+ except eventlet.greenlet.GreenletExit:
+ pass
+ except Exception as ex:
+ LOG.exception(ex)
+ current = threading.current_thread()
+
+ # Iterate over a copy of self.threads so thread_done doesn't
+ # modify the list while we're iterating
+ for x in self.threads[:]:
+ if x is current:
+ continue
+ try:
+ x.wait()
+ except eventlet.greenlet.GreenletExit:
+ pass
+ except Exception as ex:
+ LOG.exception(ex)
diff --git a/iotronic/openstack/common/versionutils.py b/iotronic/openstack/common/versionutils.py
new file mode 100644
index 0000000..6a01d70
--- /dev/null
+++ b/iotronic/openstack/common/versionutils.py
@@ -0,0 +1,262 @@
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helpers for comparing version strings.
+"""
+
+import copy
+import functools
+import inspect
+import logging
+
+from oslo_config import cfg
+import pkg_resources
+import six
+
+from iotronic.openstack.common._i18n import _
+
+
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+
+
+deprecated_opts = [
+ cfg.BoolOpt('fatal_deprecations',
+ default=False,
+ help='Enables or disables fatal status of deprecations.'),
+]
+
+
+def list_opts():
+ """Entry point for oslo_config-generator.
+ """
+ return [(None, copy.deepcopy(deprecated_opts))]
+
+
+class deprecated(object):
+ """A decorator to mark callables as deprecated.
+
+ This decorator logs a deprecation message when the callable it decorates is
+ used. The message will include the release where the callable was
+ deprecated, the release where it may be removed and possibly an optional
+ replacement.
+
+ Examples:
+
+ 1. Specifying the required deprecated release
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE)
+ ... def a(): pass
+
+ 2. Specifying a replacement:
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
+ ... def b(): pass
+
+ 3. Specifying the release where the functionality may be removed:
+
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
+ ... def c(): pass
+
+ 4. Specifying the deprecated functionality will not be removed:
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
+ ... def d(): pass
+
+ 5. Specifying a replacement, deprecated functionality will not be removed:
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
+ ... def e(): pass
+
+ """
+
+ # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+ # expected we maintain a gap between Bexar and Folsom in this list.
+ BEXAR = 'B'
+ FOLSOM = 'F'
+ GRIZZLY = 'G'
+ HAVANA = 'H'
+ ICEHOUSE = 'I'
+ JUNO = 'J'
+ KILO = 'K'
+ LIBERTY = 'L'
+
+ _RELEASES = {
+ # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+ # expected we maintain a gap between Bexar and Folsom in this list.
+ 'B': 'Bexar',
+ 'F': 'Folsom',
+ 'G': 'Grizzly',
+ 'H': 'Havana',
+ 'I': 'Icehouse',
+ 'J': 'Juno',
+ 'K': 'Kilo',
+ 'L': 'Liberty',
+ }
+
+ _deprecated_msg_with_alternative = _(
+ '%(what)s is deprecated as of %(as_of)s in favor of '
+ '%(in_favor_of)s and may be removed in %(remove_in)s.')
+
+ _deprecated_msg_no_alternative = _(
+ '%(what)s is deprecated as of %(as_of)s and may be '
+ 'removed in %(remove_in)s. It will not be superseded.')
+
+ _deprecated_msg_with_alternative_no_removal = _(
+ '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
+
+ _deprecated_msg_with_no_alternative_no_removal = _(
+ '%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
+
+ def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
+ """Initialize decorator
+
+ :param as_of: the release deprecating the callable. Constants
+ are define in this class for convenience.
+ :param in_favor_of: the replacement for the callable (optional)
+ :param remove_in: an integer specifying how many releases to wait
+ before removing (default: 2)
+ :param what: name of the thing being deprecated (default: the
+ callable's name)
+
+ """
+ self.as_of = as_of
+ self.in_favor_of = in_favor_of
+ self.remove_in = remove_in
+ self.what = what
+
+ def __call__(self, func_or_cls):
+ if not self.what:
+ self.what = func_or_cls.__name__ + '()'
+ msg, details = self._build_message()
+
+ if inspect.isfunction(func_or_cls):
+
+ @six.wraps(func_or_cls)
+ def wrapped(*args, **kwargs):
+ report_deprecated_feature(LOG, msg, details)
+ return func_or_cls(*args, **kwargs)
+ return wrapped
+ elif inspect.isclass(func_or_cls):
+ orig_init = func_or_cls.__init__
+
+ # TODO(tsufiev): change `functools` module to `six` as
+ # soon as six 1.7.4 (with fix for passing `assigned`
+ # argument to underlying `functools.wraps`) is released
+ # and added to the oslo_incubator requrements
+ @functools.wraps(orig_init, assigned=('__name__', '__doc__'))
+ def new_init(self, *args, **kwargs):
+ report_deprecated_feature(LOG, msg, details)
+ orig_init(self, *args, **kwargs)
+ func_or_cls.__init__ = new_init
+ return func_or_cls
+ else:
+ raise TypeError('deprecated can be used only with functions or '
+ 'classes')
+
+ def _get_safe_to_remove_release(self, release):
+ # TODO(dstanek): this method will have to be reimplemented once
+ # when we get to the X release because once we get to the Y
+ # release, what is Y+2?
+ new_release = chr(ord(release) + self.remove_in)
+ if new_release in self._RELEASES:
+ return self._RELEASES[new_release]
+ else:
+ return new_release
+
+ def _build_message(self):
+ details = dict(what=self.what,
+ as_of=self._RELEASES[self.as_of],
+ remove_in=self._get_safe_to_remove_release(self.as_of))
+
+ if self.in_favor_of:
+ details['in_favor_of'] = self.in_favor_of
+ if self.remove_in > 0:
+ msg = self._deprecated_msg_with_alternative
+ else:
+ # There are no plans to remove this function, but it is
+ # now deprecated.
+ msg = self._deprecated_msg_with_alternative_no_removal
+ else:
+ if self.remove_in > 0:
+ msg = self._deprecated_msg_no_alternative
+ else:
+ # There are no plans to remove this function, but it is
+ # now deprecated.
+ msg = self._deprecated_msg_with_no_alternative_no_removal
+ return msg, details
+
+
+def is_compatible(requested_version, current_version, same_major=True):
+ """Determine whether `requested_version` is satisfied by
+ `current_version`; in other words, `current_version` is >=
+ `requested_version`.
+
+ :param requested_version: version to check for compatibility
+ :param current_version: version to check against
+ :param same_major: if True, the major version must be identical between
+ `requested_version` and `current_version`. This is used when a
+ major-version difference indicates incompatibility between the two
+ versions. Since this is the common-case in practice, the default is
+ True.
+ :returns: True if compatible, False if not
+ """
+ requested_parts = pkg_resources.parse_version(requested_version)
+ current_parts = pkg_resources.parse_version(current_version)
+
+ if same_major and (requested_parts[0] != current_parts[0]):
+ return False
+
+ return current_parts >= requested_parts
+
+
+# Track the messages we have sent already. See
+# report_deprecated_feature().
+_deprecated_messages_sent = {}
+
+
+def report_deprecated_feature(logger, msg, *args, **kwargs):
+ """Call this function when a deprecated feature is used.
+
+ If the system is configured for fatal deprecations then the message
+ is logged at the 'critical' level and :class:`DeprecatedConfig` will
+ be raised.
+
+ Otherwise, the message will be logged (once) at the 'warn' level.
+
+ :raises: :class:`DeprecatedConfig` if the system is configured for
+ fatal deprecations.
+ """
+ stdmsg = _("Deprecated: %s") % msg
+ CONF.register_opts(deprecated_opts)
+ if CONF.fatal_deprecations:
+ logger.critical(stdmsg, *args, **kwargs)
+ raise DeprecatedConfig(msg=stdmsg)
+
+ # Using a list because a tuple with dict can't be stored in a set.
+ sent_args = _deprecated_messages_sent.setdefault(msg, list())
+
+ if args in sent_args:
+ # Already logged this message, so don't log it again.
+ return
+
+ sent_args.append(args)
+ logger.warn(stdmsg, *args, **kwargs)
+
+
+class DeprecatedConfig(Exception):
+ message = _("Fatal call to deprecated config: %(msg)s")
+
+ def __init__(self, msg):
+ super(Exception, self).__init__(self.message % dict(msg=msg))
diff --git a/iotronic/version.py b/iotronic/version.py
new file mode 100644
index 0000000..d90dbec
--- /dev/null
+++ b/iotronic/version.py
@@ -0,0 +1,18 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pbr.version
+
+version_info = pbr.version.VersionInfo('iotronic')
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..7a7584f
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,80 @@
+import os
+from setuptools import setup, find_packages
+
+def read(fname):
+ return open(os.path.join(os.path.dirname(__file__), fname)).read()
+
+setup(
+ name = "iotronic",
+ #packages = ["cwProbe", "plugins"],
+ packages = find_packages(),
+ version = "0.1",
+ description = "iot",
+ author = "",
+ author_email = "",
+ url = "",
+ download_url = "",
+ keywords = ["iotronic", "iot", "s4t"],
+ classifiers = [
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2.7",
+ "Development Status :: 4 - Beta",
+ "Environment :: Other Environment",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ license='GPL',
+ platforms=['Any'],
+ #provides=['plugins',],
+
+
+ dependency_links = [
+
+ ],
+
+
+
+ entry_points={
+ #'cwprobe.plugins.monitors': [
+ #'mycheck = plugins.cwpl_mycheckpoint:Cwpl_MyCheckPoint',
+ #'cpu = plugins.cwpl_cpu:Cwpl_Cpu',
+ #'awstats = plugins.cwpl_awstats:Cwpl_Awstat',
+ #'test = plugins.cwpl_test:Cwpl_Test',
+ #],
+ },
+
+ install_requires=[
+ #'setuptools',
+ #'greenlet',
+ #'httplib2',
+ #'stevedore',
+ #'psutil',
+ #'qpid-python==0.20',
+ #'pyyamllib',
+ #'pyloglib',
+ #'cwconfparser',
+ #'MySQL-python',
+ ],
+
+
+ include_package_data = True,
+
+ data_files=[
+ ('/usr/bin', ['bin/iotronic-conductor']),
+ ],
+
+
+ #package_data = {
+ # '': ['scripts/etc/init.d/cwProbe', 'scripts/usr/bin/cwProbe'],
+ #},
+
+
+ #options = {'bdist_rpm':{'post_install' : 'scripts/post_install'},
+
+ zip_safe=False,
+ #long_description=read('README.txt')
+
+
+)