Switch to oslo.config
This patch set adds support for generating config files using oslo-config-generator. Old invocation without --config-file is no longer supported. Co-Authored-By: Dmitry Tantsur <dtantsur@redhat.com> Change-Id: If640e6dc83c971a8f82f9b245a4496d298dfd042 Closes-Bug: #1398359
This commit is contained in:
parent
b9e8e1e73f
commit
9ec4f0fbc8
13
README.rst
13
README.rst
|
@ -335,6 +335,16 @@ See `1.1.0 release tracking page`_ for details.
|
|||
|
||||
* ``overwrite_existing`` is now enabled by default.
|
||||
|
||||
* Running the service as
|
||||
::
|
||||
|
||||
$ ironic-discoverd /path/to/config
|
||||
|
||||
is no longer supported, use
|
||||
::
|
||||
|
||||
$ ironic-discoverd --config-file /path/to/config
|
||||
|
||||
**Major Features**
|
||||
|
||||
* Default to only creating a port for the NIC that the ramdisk was PXE booted
|
||||
|
@ -357,6 +367,9 @@ See `1.1.0 release tracking page`_ for details.
|
|||
* The default value for ``overwrite_existing`` configuration option was
|
||||
flipped, matching the default behavior for Ironic inspection.
|
||||
|
||||
* Switch to `oslo.config <http://docs.openstack.org/developer/oslo.config/>`_
|
||||
for configuration management (many thanks to Yuiko Takada).
|
||||
|
||||
**Other Changes**
|
||||
|
||||
* New option ``add_ports`` allows precise control over which ports to add,
|
||||
|
|
167
example.conf
167
example.conf
|
@ -1,79 +1,114 @@
|
|||
[DEFAULT]
|
||||
|
||||
|
||||
[discoverd]
|
||||
;; Ironic and Keystone connection settings
|
||||
; Authentication options are mandatory and don't have reasonable defaults.
|
||||
|
||||
; Keystone authentication endpoint.
|
||||
;os_auth_url = http://127.0.0.1:5000/v2.0
|
||||
; User name for accessing Keystone and Ironic API.
|
||||
;os_username =
|
||||
; Password for accessing Keystone and Ironic API.
|
||||
;os_password =
|
||||
; Tenant name for accessing Keystone and Ironic API.
|
||||
;os_tenant_name =
|
||||
; Keystone admin endpoint.
|
||||
;identity_uri = http://127.0.0.1:35357
|
||||
#
|
||||
# From ironic_discoverd
|
||||
#
|
||||
|
||||
; Number of attempts to do when trying to connect to Ironic on start up.
|
||||
;ironic_retry_attempts = 5
|
||||
; Amount of time between attempts to connect to Ironic on start up.
|
||||
;ironic_retry_period = 5
|
||||
# Keystone authentication endpoint. (string value)
|
||||
#os_auth_url = http://127.0.0.1:5000/v2.0
|
||||
|
||||
;; Firewall management settings
|
||||
# User name for accessing Keystone and Ironic API. (string value)
|
||||
#os_username =
|
||||
|
||||
; Whether to manage firewall rules for PXE port.
|
||||
;manage_firewall = true
|
||||
; Interface on which dnsmasq listens, the default is for VM's.
|
||||
;dnsmasq_interface = br-ctlplane
|
||||
; Amount of time in seconds, after which repeat periodic update of firewall.
|
||||
;firewall_update_period = 15
|
||||
# Password for accessing Keystone and Ironic API. (string value)
|
||||
#os_password =
|
||||
|
||||
;; Introspection process settings
|
||||
# Tenant name for accessing Keystone and Ironic API. (string value)
|
||||
#os_tenant_name =
|
||||
|
||||
; Which MAC addresses to add as ports during introspection. Possible values:
|
||||
; all (all MAC addresses), active (MAC addresses of NIC with IP addresses),
|
||||
; pxe (only MAC address of NIC node PXE booted from, falls back to 'active' if
|
||||
; PXE MAC not supplied by the ramdisk).
|
||||
;add_ports = pxe
|
||||
; Timeout after which introspection is considered failed, set to 0 to disable.
|
||||
;timeout = 3600
|
||||
; For how much time (in seconds) to keep status information about nodes after
|
||||
; introspection was finished for them. Default value is 1 week.
|
||||
;node_status_keep_time = 604800
|
||||
; Amount of time in seconds, after which repeat clean up of timed out nodes
|
||||
; and old nodes status information.
|
||||
;clean_up_period = 60
|
||||
; Whether to overwrite existing values in node database.
|
||||
; Disable this option to make introspection a non-destructive operation.
|
||||
;overwrite_existing = true
|
||||
; Whether to enable setting IPMI credentials during introspection. This is an
|
||||
; experimental and not well tested feature, use at your own risk.
|
||||
;enable_setting_ipmi_credentials = false
|
||||
# Keystone admin endpoint. (string value)
|
||||
#identity_uri = http://127.0.0.1:35357
|
||||
|
||||
;; HTTP settings
|
||||
# Number of attempts to do when trying to connect to Ironic on start
|
||||
# up. (integer value)
|
||||
#ironic_retry_attempts = 5
|
||||
|
||||
; IP to listen on.
|
||||
;listen_address = 0.0.0.0
|
||||
; Port to listen on.
|
||||
;listen_port = 5050
|
||||
; Whether to authenticate with Keystone on public HTTP endpoints.
|
||||
; Note that introspection ramdisk postback endpoint is never authenticated.
|
||||
;authenticate = true
|
||||
# Amount of time between attempts to connect to Ironic on start up.
|
||||
# (integer value)
|
||||
#ironic_retry_period = 5
|
||||
|
||||
;; General service settings
|
||||
# Whether to manage firewall rules for PXE port. (boolean value)
|
||||
#manage_firewall = true
|
||||
|
||||
; SQLite3 database to store nodes under introspection, required.
|
||||
; Do not use :memory: here, it won't work.
|
||||
;database =
|
||||
; Comma-separated list of enabled hooks for processing pipeline.
|
||||
; Hook 'scheduler' updates the node with the minimum properties required by the
|
||||
; Nova scheduler. Hook 'validate_interfaces' ensures that valid NIC data was
|
||||
; provided by the ramdisk.
|
||||
; Do not exclude these two unless you really know what you're doing.
|
||||
;processing_hooks = scheduler,validate_interfaces
|
||||
; Debug mode enabled/disabled.
|
||||
;debug = false
|
||||
# Interface on which dnsmasq listens, the default is for VM's. (string
|
||||
# value)
|
||||
#dnsmasq_interface = br-ctlplane
|
||||
|
||||
;; Deprecated options
|
||||
# Amount of time in seconds, after which repeat periodic update of
|
||||
# firewall. (integer value)
|
||||
#firewall_update_period = 15
|
||||
|
||||
; Use add_ports
|
||||
;ports_for_inactive_interfaces = false
|
||||
# Which MAC addresses to add as ports during introspection. Possible
|
||||
# values: all (all MAC addresses), active (MAC addresses of NIC with
|
||||
# IP addresses), pxe (only MAC address of NIC node PXE booted from,
|
||||
# falls back to "active" if PXE MAC is not supplied by the ramdisk).
|
||||
# (string value)
|
||||
# Allowed values: all, active, pxe
|
||||
#add_ports = pxe
|
||||
|
||||
# Timeout after which introspection is considered failed, set to 0 to
|
||||
# disable. (integer value)
|
||||
#timeout = 3600
|
||||
|
||||
# For how much time (in seconds) to keep status information about
|
||||
# nodes after introspection was finished for them. Default value is 1
|
||||
# week. (integer value)
|
||||
#node_status_keep_time = 604800
|
||||
|
||||
# Amount of time in seconds, after which repeat clean up of timed out
|
||||
# nodes and old nodes status information. (integer value)
|
||||
#clean_up_period = 60
|
||||
|
||||
# Whether to overwrite existing values in node database. Disable this
|
||||
# option to make introspection a non-destructive operation. (boolean
|
||||
# value)
|
||||
#overwrite_existing = true
|
||||
|
||||
# Whether to enable setting IPMI credentials during introspection.
|
||||
# This is an experimental and not well tested feature, use at your own
|
||||
# risk. (boolean value)
|
||||
#enable_setting_ipmi_credentials = false
|
||||
|
||||
# IP to listen on. (string value)
|
||||
#listen_address = 0.0.0.0
|
||||
|
||||
# Port to listen on. (integer value)
|
||||
#listen_port = 5050
|
||||
|
||||
# Whether to authenticate with Keystone on public HTTP endpoints. Note
|
||||
# that introspection ramdisk postback endpoint is never authenticated.
|
||||
# (boolean value)
|
||||
#authenticate = true
|
||||
|
||||
# SQLite3 database to store nodes under introspection, required. Do
|
||||
# not use :memory: here, it won't work. (string value)
|
||||
#database =
|
||||
|
||||
# Comma-separated list of enabled hooks for processing pipeline. Hook
|
||||
# 'scheduler' updates the node with the minimum properties required by
|
||||
# the Nova scheduler. Hook 'validate_interfaces' ensures that valid
|
||||
# NIC data was provided by the ramdisk.Do not exclude these two unless
|
||||
# you really know what you're doing. (string value)
|
||||
#processing_hooks = scheduler,validate_interfaces
|
||||
|
||||
# Debug mode enabled/disabled. (boolean value)
|
||||
#debug = false
|
||||
|
||||
# DEPRECATED: use add_ports. (boolean value)
|
||||
#ports_for_inactive_interfaces = false
|
||||
|
||||
|
||||
[edeploy]
|
||||
|
||||
#
|
||||
# From ironic_discoverd.plugins.edeploy
|
||||
#
|
||||
|
||||
# (string value)
|
||||
#lockname = /var/lock/discoverd.lock
|
||||
|
||||
# (string value)
|
||||
#configdir = /etc/edeploy
|
||||
|
|
|
@ -29,7 +29,6 @@ import mock
|
|||
import requests
|
||||
|
||||
from ironic_discoverd import client
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import main
|
||||
from ironic_discoverd.test import base
|
||||
from ironic_discoverd import utils
|
||||
|
@ -41,8 +40,9 @@ os_auth_url = http://url
|
|||
os_username = user
|
||||
os_password = password
|
||||
os_tenant_name = tenant
|
||||
manage_firewall = false
|
||||
enable_setting_ipmi_credentials = true
|
||||
manage_firewall = False
|
||||
enable_setting_ipmi_credentials = True
|
||||
database = %(db_file)s
|
||||
"""
|
||||
|
||||
ROOT = './functest/env'
|
||||
|
@ -57,8 +57,6 @@ JQ = "https://stedolan.github.io/jq/download/linux64/jq"
|
|||
class Test(base.NodeTest):
|
||||
def setUp(self):
|
||||
super(Test, self).setUp()
|
||||
conf.CONF.set('discoverd', 'manage_firewall', 'false')
|
||||
conf.CONF.set('discoverd', 'enable_setting_ipmi_credentials', 'true')
|
||||
self.node.properties.clear()
|
||||
|
||||
self.cli = utils.get_client()
|
||||
|
@ -172,12 +170,12 @@ def run(client_mock, keystone_mock):
|
|||
d = tempfile.mkdtemp()
|
||||
try:
|
||||
conf_file = os.path.join(d, 'test.conf')
|
||||
db_file = os.path.join(d, 'test.db')
|
||||
with open(conf_file, 'wb') as fp:
|
||||
fp.write(CONF)
|
||||
sys.argv[1:] = ['--config-file', conf_file]
|
||||
base.init_test_conf()
|
||||
fp.write(CONF % {'db_file': db_file})
|
||||
|
||||
eventlet.greenthread.spawn_n(main.main)
|
||||
eventlet.greenthread.spawn_n(main.main,
|
||||
args=['--config-file', conf_file])
|
||||
eventlet.greenthread.sleep(1)
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
|
||||
res = unittest.TextTestRunner().run(suite)
|
||||
|
|
|
@ -11,57 +11,115 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import functools
|
||||
|
||||
from six.moves import configparser
|
||||
from oslo_config import cfg
|
||||
|
||||
|
||||
# TODO(dtantsur): switch to oslo.db
|
||||
DEFAULTS = {
|
||||
# Keystone credentials
|
||||
'os_auth_url': 'http://127.0.0.1:5000/v2.0',
|
||||
'identity_uri': 'http://127.0.0.1:35357',
|
||||
# Ironic and Keystone connection settings
|
||||
'ironic_retry_attempts': '5',
|
||||
'ironic_retry_period': '5',
|
||||
# Firewall management settings
|
||||
'manage_firewall': 'true',
|
||||
'dnsmasq_interface': 'br-ctlplane',
|
||||
'firewall_update_period': '15',
|
||||
# Introspection process settings
|
||||
'add_ports': 'pxe',
|
||||
'timeout': '3600',
|
||||
'node_status_keep_time': '604800',
|
||||
'clean_up_period': '60',
|
||||
'overwrite_existing': 'true',
|
||||
'enable_setting_ipmi_credentials': 'false',
|
||||
# HTTP settings
|
||||
'listen_address': '0.0.0.0',
|
||||
'listen_port': '5050',
|
||||
'authenticate': 'true',
|
||||
# General service settings
|
||||
'processing_hooks': 'scheduler,validate_interfaces',
|
||||
'debug': 'false',
|
||||
}
|
||||
VALID_ADD_PORTS_VALUES = ('all', 'active', 'pxe')
|
||||
|
||||
SERVICE_OPTS = [
|
||||
cfg.StrOpt('os_auth_url',
|
||||
default='http://127.0.0.1:5000/v2.0',
|
||||
help='Keystone authentication endpoint.'),
|
||||
cfg.StrOpt('os_username',
|
||||
default='',
|
||||
help='User name for accessing Keystone and Ironic API.'),
|
||||
cfg.StrOpt('os_password',
|
||||
default='',
|
||||
help='Password for accessing Keystone and Ironic API.',
|
||||
secret=True),
|
||||
cfg.StrOpt('os_tenant_name',
|
||||
default='',
|
||||
help='Tenant name for accessing Keystone and Ironic API.'),
|
||||
cfg.StrOpt('identity_uri',
|
||||
default='http://127.0.0.1:35357',
|
||||
help='Keystone admin endpoint.'),
|
||||
cfg.IntOpt('ironic_retry_attempts',
|
||||
default=5,
|
||||
help='Number of attempts to do when trying to connect to '
|
||||
'Ironic on start up.'),
|
||||
cfg.IntOpt('ironic_retry_period',
|
||||
default=5,
|
||||
help='Amount of time between attempts to connect to Ironic '
|
||||
'on start up.'),
|
||||
cfg.BoolOpt('manage_firewall',
|
||||
default=True,
|
||||
help='Whether to manage firewall rules for PXE port.'),
|
||||
cfg.StrOpt('dnsmasq_interface',
|
||||
default='br-ctlplane',
|
||||
help='Interface on which dnsmasq listens, the default is for '
|
||||
'VM\'s.'),
|
||||
cfg.IntOpt('firewall_update_period',
|
||||
default=15,
|
||||
help='Amount of time in seconds, after which repeat periodic '
|
||||
'update of firewall.'),
|
||||
cfg.StrOpt('add_ports',
|
||||
default='pxe',
|
||||
help='Which MAC addresses to add as ports during '
|
||||
'introspection. Possible values: '
|
||||
'all (all MAC addresses), active (MAC addresses of NIC with IP '
|
||||
'addresses), pxe (only MAC address of NIC node PXE booted '
|
||||
'from, falls back to "active" if PXE MAC is not supplied '
|
||||
'by the ramdisk).',
|
||||
choices=VALID_ADD_PORTS_VALUES),
|
||||
cfg.IntOpt('timeout',
|
||||
default=3600,
|
||||
help='Timeout after which introspection is considered failed, '
|
||||
'set to 0 to disable.'),
|
||||
cfg.IntOpt('node_status_keep_time',
|
||||
default=604800,
|
||||
help='For how much time (in seconds) to keep status '
|
||||
'information about nodes after introspection was '
|
||||
'finished for them. Default value is 1 week.'),
|
||||
cfg.IntOpt('clean_up_period',
|
||||
default=60,
|
||||
help='Amount of time in seconds, after which repeat clean up '
|
||||
'of timed out nodes and old nodes status information.'),
|
||||
cfg.BoolOpt('overwrite_existing',
|
||||
default=True,
|
||||
help='Whether to overwrite existing values in node database. '
|
||||
'Disable this option to make introspection a '
|
||||
'non-destructive operation.'),
|
||||
cfg.BoolOpt('enable_setting_ipmi_credentials',
|
||||
default=False,
|
||||
help='Whether to enable setting IPMI credentials during '
|
||||
'introspection. This is an experimental and not well '
|
||||
'tested feature, use at your own risk.'),
|
||||
cfg.StrOpt('listen_address',
|
||||
default='0.0.0.0',
|
||||
help='IP to listen on.'),
|
||||
cfg.IntOpt('listen_port',
|
||||
default=5050,
|
||||
help='Port to listen on.'),
|
||||
cfg.BoolOpt('authenticate',
|
||||
default=True,
|
||||
help='Whether to authenticate with Keystone on public HTTP '
|
||||
'endpoints. Note that introspection ramdisk postback '
|
||||
'endpoint is never authenticated.'),
|
||||
cfg.StrOpt('database',
|
||||
default='',
|
||||
help='SQLite3 database to store nodes under introspection, '
|
||||
'required. Do not use :memory: here, it won\'t work.'),
|
||||
cfg.StrOpt('processing_hooks',
|
||||
default='scheduler,validate_interfaces',
|
||||
help='Comma-separated list of enabled hooks for processing '
|
||||
'pipeline. Hook \'scheduler\' updates the node with the '
|
||||
'minimum properties required by the Nova scheduler. '
|
||||
'Hook \'validate_interfaces\' ensures that valid NIC '
|
||||
'data was provided by the ramdisk.'
|
||||
'Do not exclude these two unless you really know what '
|
||||
'you\'re doing.'),
|
||||
cfg.BoolOpt('debug',
|
||||
default=False,
|
||||
help='Debug mode enabled/disabled.'),
|
||||
cfg.BoolOpt('ports_for_inactive_interfaces',
|
||||
default=False,
|
||||
help='DEPRECATED: use add_ports.'),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(SERVICE_OPTS, group='discoverd')
|
||||
|
||||
|
||||
def with_default(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(section, option, default=None):
|
||||
try:
|
||||
return func(section, option)
|
||||
except (configparser.NoSectionError, configparser.NoOptionError):
|
||||
return default
|
||||
return wrapper
|
||||
|
||||
|
||||
def init_conf():
|
||||
global CONF, get, getint, getboolean, read
|
||||
CONF = configparser.ConfigParser(defaults=DEFAULTS)
|
||||
get = with_default(CONF.get)
|
||||
getint = with_default(CONF.getint)
|
||||
getboolean = with_default(CONF.getboolean)
|
||||
read = CONF.read
|
||||
|
||||
|
||||
init_conf()
|
||||
def list_opts():
|
||||
return [
|
||||
('discoverd', SERVICE_OPTS)
|
||||
]
|
||||
|
|
|
@ -15,9 +15,9 @@ import logging
|
|||
import subprocess
|
||||
|
||||
from eventlet import semaphore
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common.i18n import _LE
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd import utils
|
||||
|
||||
|
@ -27,6 +27,7 @@ NEW_CHAIN = 'discovery_temp'
|
|||
CHAIN = 'discovery'
|
||||
INTERFACE = None
|
||||
LOCK = semaphore.BoundedSemaphore()
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def _iptables(*args, **kwargs):
|
||||
|
@ -50,11 +51,11 @@ def init():
|
|||
|
||||
Must be called one on start-up.
|
||||
"""
|
||||
if not conf.getboolean('discoverd', 'manage_firewall'):
|
||||
if not CONF.discoverd.manage_firewall:
|
||||
return
|
||||
|
||||
global INTERFACE
|
||||
INTERFACE = conf.get('discoverd', 'dnsmasq_interface')
|
||||
INTERFACE = CONF.discoverd.dnsmasq_interface
|
||||
_clean_up(CHAIN)
|
||||
# Not really needed, but helps to validate that we have access to iptables
|
||||
_iptables('-N', CHAIN)
|
||||
|
@ -86,7 +87,7 @@ def update_filters(ironic=None):
|
|||
|
||||
:param ironic: Ironic client instance, optional.
|
||||
"""
|
||||
if not conf.getboolean('discoverd', 'manage_firewall'):
|
||||
if not CONF.discoverd.manage_firewall:
|
||||
return
|
||||
|
||||
assert INTERFACE is not None
|
||||
|
|
|
@ -18,13 +18,15 @@ import string
|
|||
|
||||
import eventlet
|
||||
from ironicclient import exceptions
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LI, _LW
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import firewall
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = logging.getLogger("ironic_discoverd.introspect")
|
||||
# See http://specs.openstack.org/openstack/ironic-specs/specs/kilo/new-ironic-state-machine.html # noqa
|
||||
|
@ -34,7 +36,7 @@ PASSWORD_MAX_LENGTH = 20 # IPMI v2.0
|
|||
|
||||
|
||||
def _validate_ipmi_credentials(node, new_ipmi_credentials):
|
||||
if not conf.getboolean('discoverd', 'enable_setting_ipmi_credentials'):
|
||||
if not CONF.discoverd.enable_setting_ipmi_credentials:
|
||||
raise utils.Error(
|
||||
_('IPMI credentials setup is disabled in configuration'))
|
||||
|
||||
|
@ -156,4 +158,4 @@ def _background_introspect(ironic, cached_node):
|
|||
LOG.info(_LI('Introspection environment is ready for node %(node)s, '
|
||||
'manual power on is required within %(timeout)d seconds') %
|
||||
{'node': cached_node.uuid,
|
||||
'timeout': conf.getint('discoverd', 'timeout')})
|
||||
'timeout': CONF.discoverd.timeout})
|
||||
|
|
|
@ -14,23 +14,26 @@
|
|||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
import argparse
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
from oslo_utils import uuidutils
|
||||
import sys
|
||||
|
||||
import flask
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LE, _LW
|
||||
from ironic_discoverd import conf
|
||||
# Import configuration options
|
||||
from ironic_discoverd import conf # noqa
|
||||
from ironic_discoverd import firewall
|
||||
from ironic_discoverd import introspect
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd import process
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
LOG = logging.getLogger('ironic_discoverd.main')
|
||||
|
@ -132,9 +135,9 @@ def check_ironic_available():
|
|||
2. Keystone has already started
|
||||
3. Ironic has already started
|
||||
"""
|
||||
attempts = conf.getint('discoverd', 'ironic_retry_attempts')
|
||||
attempts = CONF.discoverd.ironic_retry_attempts
|
||||
assert attempts >= 0
|
||||
retry_period = conf.getint('discoverd', 'ironic_retry_period')
|
||||
retry_period = CONF.discoverd.ironic_retry_period
|
||||
LOG.debug('Trying to connect to Ironic')
|
||||
for i in range(attempts + 1): # one attempt always required
|
||||
try:
|
||||
|
@ -150,14 +153,8 @@ def check_ironic_available():
|
|||
eventlet.greenthread.sleep(retry_period)
|
||||
|
||||
|
||||
def config_shim(args):
|
||||
"""Make new argument parsing method backwards compatible."""
|
||||
if len(args) == 2 and args[1][0] != '-':
|
||||
return ['--config-file', args[1]]
|
||||
|
||||
|
||||
def init():
|
||||
if conf.getboolean('discoverd', 'authenticate'):
|
||||
if CONF.discoverd.authenticate:
|
||||
utils.add_auth_middleware(app)
|
||||
else:
|
||||
LOG.warning(_LW('Starting unauthenticated, please check'
|
||||
|
@ -166,29 +163,21 @@ def init():
|
|||
node_cache.init()
|
||||
check_ironic_available()
|
||||
|
||||
if conf.getboolean('discoverd', 'manage_firewall'):
|
||||
if CONF.discoverd.manage_firewall:
|
||||
firewall.init()
|
||||
period = conf.getint('discoverd', 'firewall_update_period')
|
||||
period = CONF.discoverd.firewall_update_period
|
||||
eventlet.greenthread.spawn_n(periodic_update, period)
|
||||
|
||||
if conf.getint('discoverd', 'timeout') > 0:
|
||||
period = conf.getint('discoverd', 'clean_up_period')
|
||||
if CONF.discoverd.timeout > 0:
|
||||
period = CONF.discoverd.clean_up_period
|
||||
eventlet.greenthread.spawn_n(periodic_clean_up, period)
|
||||
else:
|
||||
LOG.warning(_LW('Timeout is disabled in configuration'))
|
||||
|
||||
|
||||
def main(): # pragma: no cover
|
||||
old_args = config_shim(sys.argv)
|
||||
parser = argparse.ArgumentParser(description='''Hardware introspection
|
||||
service for OpenStack Ironic.
|
||||
''')
|
||||
parser.add_argument('--config-file', dest='config', required=True)
|
||||
# if parse_args is passed None it uses sys.argv instead.
|
||||
args = parser.parse_args(old_args)
|
||||
|
||||
conf.read(args.config)
|
||||
debug = conf.getboolean('discoverd', 'debug')
|
||||
def main(args=sys.argv[1:]): # pragma: no cover
|
||||
CONF(args, project='ironic-discoverd')
|
||||
debug = CONF.discoverd.debug
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
|
||||
for third_party in ('urllib3.connectionpool',
|
||||
|
@ -198,12 +187,7 @@ def main(): # pragma: no cover
|
|||
logging.getLogger('ironicclient.common.http').setLevel(
|
||||
logging.INFO if debug else logging.ERROR)
|
||||
|
||||
if old_args:
|
||||
LOG.warning(_LW('"ironic-discoverd <config-file>" syntax is deprecated'
|
||||
' use "ironic-discoverd --config-file <config-file>"'
|
||||
' instead'))
|
||||
|
||||
init()
|
||||
app.run(debug=debug,
|
||||
host=conf.get('discoverd', 'listen_address'),
|
||||
port=conf.getint('discoverd', 'listen_port'))
|
||||
host=CONF.discoverd.listen_address,
|
||||
port=CONF.discoverd.listen_port)
|
||||
|
|
|
@ -21,10 +21,13 @@ import sqlite3
|
|||
import sys
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LC, _LE
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = logging.getLogger("ironic_discoverd.node_cache")
|
||||
_DB_NAME = None
|
||||
|
@ -128,7 +131,7 @@ def init():
|
|||
"""Initialize the database."""
|
||||
global _DB_NAME
|
||||
|
||||
_DB_NAME = conf.get('discoverd', 'database', default='').strip()
|
||||
_DB_NAME = CONF.discoverd.database.strip()
|
||||
if not _DB_NAME:
|
||||
LOG.critical(_LC('Configuration option discoverd.database'
|
||||
' should be set'))
|
||||
|
@ -262,13 +265,13 @@ def clean_up():
|
|||
:return: list of timed out node UUID's
|
||||
"""
|
||||
status_keep_threshold = (time.time() -
|
||||
conf.getint('discoverd', 'node_status_keep_time'))
|
||||
CONF.discoverd.node_status_keep_time)
|
||||
|
||||
with _db() as db:
|
||||
db.execute('delete from nodes where finished_at < ?',
|
||||
(status_keep_threshold,))
|
||||
|
||||
timeout = conf.getint('discoverd', 'timeout')
|
||||
timeout = CONF.discoverd.timeout
|
||||
if timeout <= 0:
|
||||
return []
|
||||
|
||||
|
|
|
@ -15,10 +15,12 @@
|
|||
|
||||
import abc
|
||||
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
from stevedore import named
|
||||
|
||||
from ironic_discoverd import conf
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
|
@ -72,7 +74,7 @@ def processing_hooks_manager(*args):
|
|||
global _HOOKS_MGR
|
||||
if _HOOKS_MGR is None:
|
||||
names = [x.strip()
|
||||
for x in conf.get('discoverd', 'processing_hooks').split(',')
|
||||
for x in CONF.discoverd.processing_hooks.split(',')
|
||||
if x.strip()]
|
||||
_HOOKS_MGR = named.NamedExtensionManager('ironic_discoverd.hooks',
|
||||
names=names,
|
||||
|
|
|
@ -21,16 +21,33 @@ import logging
|
|||
|
||||
from hardware import matcher
|
||||
from hardware import state
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LW
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd.plugins import base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = logging.getLogger('ironic_discoverd.plugins.edeploy')
|
||||
|
||||
|
||||
EDEPLOY_OPTS = [
|
||||
cfg.StrOpt('lockname',
|
||||
default='/var/lock/discoverd.lock'),
|
||||
cfg.StrOpt('configdir',
|
||||
default='/etc/edeploy'),
|
||||
]
|
||||
CONF.register_opts(EDEPLOY_OPTS, group='edeploy')
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [
|
||||
('edeploy', EDEPLOY_OPTS)
|
||||
]
|
||||
|
||||
|
||||
class eDeployHook(base.ProcessingHook):
|
||||
"""Interact with eDeploy ramdisk for discovery data processing hooks."""
|
||||
|
||||
|
@ -60,9 +77,8 @@ class eDeployHook(base.ProcessingHook):
|
|||
sobj = None
|
||||
|
||||
try:
|
||||
sobj = state.State(lockname=conf.get('edeploy', 'lockname',
|
||||
'/var/lock/discoverd.lock'))
|
||||
sobj.load(conf.get('edeploy', 'configdir', '/etc/edeploy'))
|
||||
sobj = state.State(CONF.edeploy.lockname)
|
||||
sobj.load(CONF.edeploy.configdir)
|
||||
prof, var = sobj.find_match(hw_items)
|
||||
var['profile'] = prof
|
||||
|
||||
|
|
|
@ -16,11 +16,15 @@
|
|||
import logging
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LC, _LI, _LW
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd.plugins import base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = logging.getLogger('ironic_discoverd.plugins.standard')
|
||||
|
||||
|
@ -44,7 +48,7 @@ class SchedulerHook(base.ProcessingHook):
|
|||
|
||||
def before_update(self, node, ports, node_info):
|
||||
"""Update node with scheduler properties."""
|
||||
overwrite = conf.getboolean('discoverd', 'overwrite_existing')
|
||||
overwrite = CONF.discoverd.overwrite_existing
|
||||
patch = [{'op': 'add', 'path': '/properties/%s' % key,
|
||||
'value': str(node_info[key])}
|
||||
for key in self.KEYS
|
||||
|
@ -52,27 +56,24 @@ class SchedulerHook(base.ProcessingHook):
|
|||
return patch, {}
|
||||
|
||||
|
||||
VALID_ADD_PORTS_VALUES = ('all', 'active', 'pxe')
|
||||
|
||||
|
||||
class ValidateInterfacesHook(base.ProcessingHook):
|
||||
"""Hook to validate network interfaces."""
|
||||
|
||||
def __init__(self):
|
||||
if conf.get('discoverd', 'add_ports') not in VALID_ADD_PORTS_VALUES:
|
||||
if CONF.discoverd.add_ports not in conf.VALID_ADD_PORTS_VALUES:
|
||||
LOG.critical(_LC('Accepted values for [discoverd]add_ports are '
|
||||
'%(valid)s, got %(actual)s'),
|
||||
{'valid': VALID_ADD_PORTS_VALUES,
|
||||
'actual': conf.get('discoverd', 'add_ports')})
|
||||
{'valid': conf.VALID_ADD_PORTS_VALUES,
|
||||
'actual': CONF.discoverd.add_ports})
|
||||
sys.exit(1)
|
||||
|
||||
def _ports_to_add(self):
|
||||
if conf.getboolean('discoverd', 'ports_for_inactive_interfaces'):
|
||||
if CONF.discoverd.ports_for_inactive_interfaces:
|
||||
LOG.warning(_LW('Using deprecated option '
|
||||
'[discoverd]ports_for_inactive_interfaces'))
|
||||
return 'all'
|
||||
else:
|
||||
return conf.get('discoverd', 'add_ports')
|
||||
return CONF.discoverd.add_ports
|
||||
|
||||
def before_processing(self, node_info):
|
||||
"""Validate information about network interfaces."""
|
||||
|
|
|
@ -15,18 +15,31 @@ import tempfile
|
|||
import unittest
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd.common import i18n
|
||||
from ironic_discoverd import conf
|
||||
# Import configuration options
|
||||
from ironic_discoverd import conf # noqa
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd.plugins import base as plugins_base
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def init_test_conf():
|
||||
db_file = tempfile.NamedTemporaryFile()
|
||||
conf.init_conf()
|
||||
conf.CONF.add_section('discoverd')
|
||||
conf.CONF.set('discoverd', 'database', db_file.name)
|
||||
try:
|
||||
# Functional tests
|
||||
CONF.reload_config_files()
|
||||
# Unit tests
|
||||
except Exception:
|
||||
CONF.reset()
|
||||
CONF.register_group(cfg.OptGroup('discoverd'))
|
||||
if not CONF.discoverd.database:
|
||||
# Might be set in functional tests
|
||||
db_file = tempfile.NamedTemporaryFile()
|
||||
CONF.set_override('database', db_file.name, 'discoverd')
|
||||
else:
|
||||
db_file = None
|
||||
node_cache._DB_NAME = None
|
||||
return db_file
|
||||
|
||||
|
@ -36,7 +49,8 @@ class BaseTest(unittest.TestCase):
|
|||
super(BaseTest, self).setUp()
|
||||
self.db_file = init_test_conf()
|
||||
self.db = node_cache._db()
|
||||
self.addCleanup(lambda: self.db_file.close())
|
||||
if self.db_file:
|
||||
self.addCleanup(lambda: self.db_file.close())
|
||||
plugins_base._HOOKS_MGR = None
|
||||
for name in ('_', '_LI', '_LW', '_LE', '_LC'):
|
||||
patch = mock.patch.object(i18n, name, lambda s: s)
|
||||
|
|
|
@ -14,14 +14,16 @@
|
|||
import eventlet
|
||||
from ironicclient import exceptions
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import firewall
|
||||
from ironic_discoverd import introspect
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd.test import base as test_base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class BaseTest(test_base.NodeTest):
|
||||
def setUp(self):
|
||||
|
@ -266,7 +268,7 @@ class TestIntrospect(BaseTest):
|
|||
class TestSetIpmiCredentials(BaseTest):
|
||||
def setUp(self):
|
||||
super(TestSetIpmiCredentials, self).setUp()
|
||||
conf.CONF.set('discoverd', 'enable_setting_ipmi_credentials', 'true')
|
||||
CONF.set_override('enable_setting_ipmi_credentials', True, 'discoverd')
|
||||
self.new_creds = ('user', 'password')
|
||||
self.cached_node.options['new_ipmi_credentials'] = self.new_creds
|
||||
self.node.maintenance = True
|
||||
|
@ -288,7 +290,8 @@ class TestSetIpmiCredentials(BaseTest):
|
|||
'new_ipmi_credentials', self.new_creds)
|
||||
|
||||
def test_disabled(self, client_mock, add_mock, filters_mock):
|
||||
conf.CONF.set('discoverd', 'enable_setting_ipmi_credentials', 'false')
|
||||
CONF.set_override('enable_setting_ipmi_credentials', False,
|
||||
'discoverd')
|
||||
self._prepare(client_mock)
|
||||
|
||||
self.assertRaisesRegexp(utils.Error, 'disabled',
|
||||
|
|
|
@ -18,7 +18,6 @@ import eventlet
|
|||
import mock
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import introspect
|
||||
from ironic_discoverd import main
|
||||
from ironic_discoverd import node_cache
|
||||
|
@ -27,6 +26,9 @@ from ironic_discoverd.plugins import example as example_plugin
|
|||
from ironic_discoverd import process
|
||||
from ironic_discoverd.test import base as test_base
|
||||
from ironic_discoverd import utils
|
||||
from oslo_config import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestApi(test_base.BaseTest):
|
||||
|
@ -34,12 +36,12 @@ class TestApi(test_base.BaseTest):
|
|||
super(TestApi, self).setUp()
|
||||
main.app.config['TESTING'] = True
|
||||
self.app = main.app.test_client()
|
||||
conf.CONF.set('discoverd', 'authenticate', 'false')
|
||||
CONF.set_override('authenticate', False, 'discoverd')
|
||||
self.uuid = uuidutils.generate_uuid()
|
||||
|
||||
@mock.patch.object(introspect, 'introspect', autospec=True)
|
||||
def test_introspect_no_authentication(self, introspect_mock):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'false')
|
||||
CONF.set_override('authenticate', False, 'discoverd')
|
||||
res = self.app.post('/v1/introspection/%s' % self.uuid)
|
||||
self.assertEqual(202, res.status_code)
|
||||
introspect_mock.assert_called_once_with(self.uuid,
|
||||
|
@ -47,7 +49,7 @@ class TestApi(test_base.BaseTest):
|
|||
|
||||
@mock.patch.object(introspect, 'introspect', autospec=True)
|
||||
def test_introspect_set_ipmi_credentials(self, introspect_mock):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'false')
|
||||
CONF.set_override('authenticate', False, 'discoverd')
|
||||
res = self.app.post('/v1/introspection/%s?new_ipmi_username=user&'
|
||||
'new_ipmi_password=password' % self.uuid)
|
||||
self.assertEqual(202, res.status_code)
|
||||
|
@ -57,7 +59,7 @@ class TestApi(test_base.BaseTest):
|
|||
|
||||
@mock.patch.object(introspect, 'introspect', autospec=True)
|
||||
def test_introspect_set_ipmi_credentials_no_user(self, introspect_mock):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'false')
|
||||
CONF.set_override('authenticate', False, 'discoverd')
|
||||
res = self.app.post('/v1/introspection/%s?'
|
||||
'new_ipmi_password=password' % self.uuid)
|
||||
self.assertEqual(202, res.status_code)
|
||||
|
@ -79,7 +81,7 @@ class TestApi(test_base.BaseTest):
|
|||
@mock.patch.object(introspect, 'introspect', autospec=True)
|
||||
def test_introspect_failed_authentication(self, introspect_mock,
|
||||
auth_mock):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'true')
|
||||
CONF.set_override('authenticate', True, 'discoverd')
|
||||
auth_mock.side_effect = utils.Error('Boom', code=403)
|
||||
res = self.app.post('/v1/introspection/%s' % self.uuid,
|
||||
headers={'X-Auth-Token': 'token'})
|
||||
|
@ -106,7 +108,8 @@ class TestApi(test_base.BaseTest):
|
|||
|
||||
@mock.patch.object(process, 'process', autospec=True)
|
||||
def test_continue(self, process_mock):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'true') # should be ignored
|
||||
# should be ignored
|
||||
CONF.set_override('authenticate', True, 'discoverd')
|
||||
process_mock.return_value = [42]
|
||||
res = self.app.post('/v1/continue', data='"JSON"')
|
||||
self.assertEqual(200, res.status_code)
|
||||
|
@ -157,10 +160,10 @@ class TestCheckIronicAvailable(test_base.BaseTest):
|
|||
self.assertEqual(2, client_mock.call_count)
|
||||
cli.driver.list.assert_called_once_with()
|
||||
sleep_mock.assert_called_once_with(
|
||||
conf.getint('discoverd', 'ironic_retry_period'))
|
||||
CONF.discoverd.ironic_retry_period)
|
||||
|
||||
def test_failed(self, client_mock, sleep_mock):
|
||||
attempts = conf.getint('discoverd', 'ironic_retry_attempts')
|
||||
attempts = CONF.discoverd.ironic_retry_attempts
|
||||
client_mock.side_effect = RuntimeError()
|
||||
self.assertRaises(RuntimeError, main.check_ironic_available)
|
||||
self.assertEqual(1 + attempts, client_mock.call_count)
|
||||
|
@ -174,7 +177,7 @@ class TestPlugins(unittest.TestCase):
|
|||
'before_update', autospec=True)
|
||||
def test_hook(self, mock_post, mock_pre):
|
||||
plugins_base._HOOKS_MGR = None
|
||||
conf.CONF.set('discoverd', 'processing_hooks', 'example')
|
||||
CONF.set_override('processing_hooks', 'example', 'discoverd')
|
||||
mgr = plugins_base.processing_hooks_manager()
|
||||
mgr.map_method('before_processing', 'node_info')
|
||||
mock_pre.assert_called_once_with(mock.ANY, 'node_info')
|
||||
|
@ -185,15 +188,3 @@ class TestPlugins(unittest.TestCase):
|
|||
def test_manager_is_cached(self):
|
||||
self.assertIs(plugins_base.processing_hooks_manager(),
|
||||
plugins_base.processing_hooks_manager())
|
||||
|
||||
|
||||
class TestConfigShim(unittest.TestCase):
|
||||
def test_old_style_invocation(self):
|
||||
self.assertEqual(main.config_shim(
|
||||
['ironic-discoverd', '/etc/conf']),
|
||||
['--config-file', '/etc/conf'])
|
||||
|
||||
def test_new_style_returns_None(self):
|
||||
self.assertEqual(main.config_shim(
|
||||
['ironic-discoverd', '--config-file', '/etc/conf']),
|
||||
None)
|
||||
|
|
|
@ -17,12 +17,14 @@ import time
|
|||
import unittest
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd.test import base as test_base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestNodeCache(test_base.NodeTest):
|
||||
def test_add_node(self):
|
||||
|
@ -156,7 +158,7 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
|
|||
'values(?, ?, ?)', (self.uuid, 'foo', 'bar'))
|
||||
|
||||
def test_no_timeout(self):
|
||||
conf.CONF.set('discoverd', 'timeout', '0')
|
||||
CONF.set_override('timeout', 0, 'discoverd')
|
||||
|
||||
self.assertFalse(node_cache.clean_up())
|
||||
|
||||
|
@ -190,7 +192,7 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
|
|||
'values(?, ?, ?)', (self.uuid + '1',
|
||||
self.started_at,
|
||||
self.started_at + 60))
|
||||
conf.CONF.set('discoverd', 'timeout', '99')
|
||||
CONF.set_override('timeout', 99, 'discoverd')
|
||||
time_mock.return_value = self.started_at + 100
|
||||
|
||||
self.assertEqual([self.uuid], node_cache.clean_up())
|
||||
|
@ -206,7 +208,7 @@ class TestNodeCacheCleanUp(test_base.NodeTest):
|
|||
'select * from options').fetchall())
|
||||
|
||||
def test_old_status(self):
|
||||
conf.CONF.set('discoverd', 'node_status_keep_time', '42')
|
||||
CONF.set_override('node_status_keep_time', 42, 'discoverd')
|
||||
with self.db:
|
||||
self.db.execute('update nodes set finished_at=?',
|
||||
(time.time() - 100,))
|
||||
|
@ -270,13 +272,11 @@ class TestNodeInfoFinished(test_base.NodeTest):
|
|||
class TestInit(unittest.TestCase):
|
||||
def setUp(self):
|
||||
super(TestInit, self).setUp()
|
||||
conf.init_conf()
|
||||
conf.CONF.add_section('discoverd')
|
||||
node_cache._DB_NAME = None
|
||||
|
||||
def test_ok(self):
|
||||
with tempfile.NamedTemporaryFile() as db_file:
|
||||
conf.CONF.set('discoverd', 'database', db_file.name)
|
||||
CONF.set_override('database', db_file.name, 'discoverd')
|
||||
node_cache.init()
|
||||
|
||||
self.assertIsNotNone(node_cache._DB_NAME)
|
||||
|
@ -285,11 +285,12 @@ class TestInit(unittest.TestCase):
|
|||
|
||||
def test_create_dir(self):
|
||||
temp = tempfile.mkdtemp()
|
||||
conf.CONF.set('discoverd', 'database',
|
||||
os.path.join(temp, 'dir', 'file'))
|
||||
CONF.set_override('database', os.path.join(temp, 'dir', 'file'),
|
||||
'discoverd')
|
||||
node_cache.init()
|
||||
|
||||
def test_no_database(self):
|
||||
CONF.set_override('database', '', 'discoverd')
|
||||
self.assertRaises(SystemExit, node_cache.init)
|
||||
|
||||
|
||||
|
|
|
@ -16,12 +16,14 @@ import os
|
|||
from hardware import cmdb
|
||||
from hardware import state
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd.plugins import edeploy
|
||||
from ironic_discoverd.test import base as test_base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def fake_load(obj, cfg_dir):
|
||||
obj._cfg_dir = cfg_dir
|
||||
|
@ -36,11 +38,10 @@ class TestEdeploy(test_base.NodeTest):
|
|||
|
||||
def setUp(self):
|
||||
super(TestEdeploy, self).setUp()
|
||||
conf.init_conf()
|
||||
conf.CONF.add_section('edeploy')
|
||||
basedir = os.path.dirname(os.path.abspath(__file__))
|
||||
conf.CONF.set('edeploy', 'configdir', os.path.join(basedir,
|
||||
'edeploy_conf'))
|
||||
CONF.set_override('configdir',
|
||||
os.path.join(basedir, 'edeploy_conf'),
|
||||
'edeploy')
|
||||
|
||||
def test_hook(self):
|
||||
hook = edeploy.eDeployHook()
|
||||
|
|
|
@ -17,8 +17,8 @@ import time
|
|||
import eventlet
|
||||
from ironicclient import exceptions
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd import firewall
|
||||
from ironic_discoverd import node_cache
|
||||
from ironic_discoverd.plugins import example as example_plugin
|
||||
|
@ -27,12 +27,15 @@ from ironic_discoverd import process
|
|||
from ironic_discoverd.test import base as test_base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class BaseTest(test_base.NodeTest):
|
||||
def setUp(self):
|
||||
super(BaseTest, self).setUp()
|
||||
conf.CONF.set('discoverd', 'processing_hooks',
|
||||
'ramdisk_error,scheduler,validate_interfaces')
|
||||
CONF.set_override('processing_hooks',
|
||||
'ramdisk_error,scheduler,validate_interfaces',
|
||||
'discoverd')
|
||||
self.started_at = time.time()
|
||||
self.all_macs = self.macs + ['DE:AD:BE:EF:DE:AD']
|
||||
self.pxe_mac = self.macs[1]
|
||||
|
@ -139,7 +142,7 @@ class TestProcess(BaseTest):
|
|||
|
||||
@prepare_mocks
|
||||
def test_add_ports_active(self, cli, pop_mock, process_mock):
|
||||
conf.CONF.set('discoverd', 'add_ports', 'active')
|
||||
CONF.set_override('add_ports', 'active', 'discoverd')
|
||||
|
||||
res = process.process(self.data)
|
||||
|
||||
|
@ -159,7 +162,7 @@ class TestProcess(BaseTest):
|
|||
|
||||
@prepare_mocks
|
||||
def test_add_ports_all(self, cli, pop_mock, process_mock):
|
||||
conf.CONF.set('discoverd', 'add_ports', 'all')
|
||||
CONF.set_override('add_ports', 'all', 'discoverd')
|
||||
|
||||
res = process.process(self.data)
|
||||
|
||||
|
@ -196,8 +199,7 @@ class TestProcess(BaseTest):
|
|||
|
||||
@prepare_mocks
|
||||
def test_ports_for_inactive(self, cli, pop_mock, process_mock):
|
||||
conf.CONF.set('discoverd', 'ports_for_inactive_interfaces', 'true')
|
||||
conf.CONF.remove_option('discoverd', 'add_ports')
|
||||
CONF.set_override('ports_for_inactive_interfaces', True, 'discoverd')
|
||||
del self.data['boot_interface']
|
||||
|
||||
process.process(self.data)
|
||||
|
@ -317,8 +319,10 @@ class TestProcess(BaseTest):
|
|||
class TestProcessNode(BaseTest):
|
||||
def setUp(self):
|
||||
super(TestProcessNode, self).setUp()
|
||||
conf.CONF.set('discoverd', 'processing_hooks',
|
||||
'ramdisk_error,scheduler,validate_interfaces,example')
|
||||
CONF.set_override('processing_hooks',
|
||||
'ramdisk_error,scheduler,validate_interfaces,'
|
||||
'example',
|
||||
'discoverd')
|
||||
self.validate_attempts = 5
|
||||
self.data['macs'] = self.macs # validate_interfaces hook
|
||||
self.ports = self.all_ports
|
||||
|
@ -373,7 +377,7 @@ class TestProcessNode(BaseTest):
|
|||
finished_mock.assert_called_once_with(mock.ANY)
|
||||
|
||||
def test_overwrite_disabled(self, filters_mock, post_hook_mock):
|
||||
conf.CONF.set('discoverd', 'overwrite_existing', 'false')
|
||||
CONF.set_override('overwrite_existing', False, 'discoverd')
|
||||
patch = [
|
||||
{'op': 'add', 'path': '/properties/cpus', 'value': '2'},
|
||||
{'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'},
|
||||
|
@ -508,5 +512,5 @@ class TestProcessNode(BaseTest):
|
|||
|
||||
class TestValidateInterfacesHook(test_base.BaseTest):
|
||||
def test_wrong_add_ports(self):
|
||||
conf.CONF.set('discoverd', 'add_ports', 'foobar')
|
||||
CONF.set_override('add_ports', 'foobar', 'discoverd')
|
||||
self.assertRaises(SystemExit, std_plugins.ValidateInterfacesHook)
|
||||
|
|
|
@ -17,22 +17,24 @@ import eventlet
|
|||
from ironicclient import exceptions
|
||||
from keystonemiddleware import auth_token
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_discoverd import conf
|
||||
from ironic_discoverd.test import base
|
||||
from ironic_discoverd import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestCheckAuth(base.BaseTest):
|
||||
def setUp(self):
|
||||
super(TestCheckAuth, self).setUp()
|
||||
conf.CONF.set('discoverd', 'authenticate', 'true')
|
||||
CONF.set_override('authenticate', True, 'discoverd')
|
||||
|
||||
@mock.patch.object(auth_token, 'AuthProtocol')
|
||||
def test_middleware(self, mock_auth):
|
||||
conf.CONF.set('discoverd', 'os_username', 'admin')
|
||||
conf.CONF.set('discoverd', 'os_tenant_name', 'admin')
|
||||
conf.CONF.set('discoverd', 'os_password', 'password')
|
||||
CONF.set_override('os_username', 'admin', 'discoverd')
|
||||
CONF.set_override('os_tenant_name', 'admin', 'discoverd')
|
||||
CONF.set_override('os_password', 'password', 'discoverd')
|
||||
|
||||
app = mock.Mock(wsgi_app=mock.sentinel.app)
|
||||
utils.add_auth_middleware(app)
|
||||
|
@ -60,7 +62,7 @@ class TestCheckAuth(base.BaseTest):
|
|||
self.assertRaises(utils.Error, utils.check_auth, request)
|
||||
|
||||
def test_disabled(self):
|
||||
conf.CONF.set('discoverd', 'authenticate', 'false')
|
||||
CONF.set_override('authenticate', False, 'discoverd')
|
||||
request = mock.Mock(headers={'X-Identity-Status': 'Invalid'})
|
||||
utils.check_auth(request)
|
||||
|
||||
|
|
|
@ -19,16 +19,15 @@ import eventlet
|
|||
from ironicclient import client
|
||||
from ironicclient import exceptions
|
||||
from keystonemiddleware import auth_token
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from ironic_discoverd.common.i18n import _, _LE, _LW
|
||||
from ironic_discoverd import conf
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = logging.getLogger('ironic_discoverd.utils')
|
||||
OS_ARGS = ('os_password', 'os_username', 'os_auth_url', 'os_tenant_name')
|
||||
MIDDLEWARE_ARGS = ('admin_password', 'admin_user', 'auth_uri',
|
||||
'admin_tenant_name')
|
||||
RETRY_COUNT = 12
|
||||
RETRY_DELAY = 5
|
||||
|
||||
|
@ -44,7 +43,10 @@ class Error(Exception):
|
|||
|
||||
def get_client(): # pragma: no cover
|
||||
"""Get Ironic client instance."""
|
||||
args = dict((k, conf.get('discoverd', k)) for k in OS_ARGS)
|
||||
args = dict({'os_password': CONF.discoverd.os_password,
|
||||
'os_username': CONF.discoverd.os_username,
|
||||
'os_auth_url': CONF.discoverd.os_auth_url,
|
||||
'os_tenant_name': CONF.discoverd.os_tenant_name})
|
||||
return client.get_client(1, **args)
|
||||
|
||||
|
||||
|
@ -53,10 +55,12 @@ def add_auth_middleware(app):
|
|||
|
||||
:param app: application.
|
||||
"""
|
||||
auth_conf = {key: conf.get('discoverd', value)
|
||||
for (key, value) in zip(MIDDLEWARE_ARGS, OS_ARGS)}
|
||||
auth_conf = dict({'admin_password': CONF.discoverd.os_password,
|
||||
'admin_user': CONF.discoverd.os_username,
|
||||
'auth_uri': CONF.discoverd.os_auth_url,
|
||||
'admin_tenant_name': CONF.discoverd.os_tenant_name})
|
||||
auth_conf['delay_auth_decision'] = True
|
||||
auth_conf['identity_uri'] = conf.get('discoverd', 'identity_uri')
|
||||
auth_conf['identity_uri'] = CONF.discoverd.identity_uri
|
||||
app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, auth_conf)
|
||||
|
||||
|
||||
|
@ -66,7 +70,7 @@ def check_auth(request):
|
|||
:param request: Flask request
|
||||
:raises: utils.Error if access is denied
|
||||
"""
|
||||
if not conf.getboolean('discoverd', 'authenticate'):
|
||||
if not CONF.discoverd.authenticate:
|
||||
return
|
||||
if request.headers.get('X-Identity-Status').lower() == 'invalid':
|
||||
raise Error(_('Authentication required'), code=401)
|
||||
|
|
|
@ -9,6 +9,7 @@ python-ironicclient>=0.2.1
|
|||
python-keystoneclient>=1.1.0
|
||||
python-openstackclient>=1.0.0
|
||||
requests>=2.2.0,!=2.4.0
|
||||
oslo.config>=1.9.0 # Apache-2.0
|
||||
oslo.i18n>=1.3.0 # Apache-2.0
|
||||
oslo.utils>=1.2.0 # Apache-2.0
|
||||
six>=1.7.0
|
||||
|
|
4
setup.py
4
setup.py
|
@ -46,6 +46,10 @@ setup(
|
|||
"baremetal_introspection_start = ironic_discoverd.shell:StartCommand",
|
||||
"baremetal_introspection_status = ironic_discoverd.shell:StatusCommand",
|
||||
],
|
||||
'oslo.config.opts': [
|
||||
"ironic_discoverd = ironic_discoverd.conf:list_opts",
|
||||
"ironic_discoverd.plugins.edeploy = ironic_discoverd.plugins.edeploy:list_opts",
|
||||
],
|
||||
},
|
||||
classifiers = [
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
|
|
Loading…
Reference in New Issue