Rename files and fix things.

This commit is contained in:
Devananda van der Veen 2013-05-03 14:01:05 -07:00
parent 05e9ce4c48
commit 0480834614
73 changed files with 487 additions and 391 deletions

5
.gitignore vendored
View File

@ -21,6 +21,7 @@ develop-eggs
.installed.cfg .installed.cfg
# Other # Other
*.DS_Store
.testrepository .testrepository
.tox .tox
.*.swp .*.swp
@ -28,3 +29,7 @@ develop-eggs
cover cover
AUTHORS AUTHORS
ChangeLog ChangeLog
.testrepository/
.tox
.venv

6
etc/ironic/policy.json Normal file
View File

@ -0,0 +1,6 @@
{
"admin_api": "is_admin:True",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"context_is_admin": "role:admin",
"default": "rule:admin_or_owner",
}

16
ironic/__init__.py Normal file
View File

@ -0,0 +1,16 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

27
ironic/cmd/__init__.py Normal file
View File

@ -0,0 +1,27 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(mikal): move eventlet imports to ironic.__init__ once we move to PBR
import os
import sys
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
eventlet.monkey_patch(os=False)
from ironic.openstack.common import gettextutils
gettextutils.install('ironic')

View File

@ -33,7 +33,7 @@ from wsgiref import simple_server
from nova import config from nova import config
from nova import context as nova_context from nova import context as nova_context
from nova import exception from nova import exception
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import utils from nova import utils
from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db from nova.virt.baremetal import db

View File

@ -63,8 +63,8 @@ from oslo.config import cfg
gettext.install('nova', unicode=1) gettext.install('nova', unicode=1)
from nova import config from nova import config
from nova.openstack.common import cliutils from ironic.openstack.common import cliutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import version from nova import version
from nova.virt.baremetal.db import migration as bmdb_migration from nova.virt.baremetal.db import migration as bmdb_migration

View File

@ -12,4 +12,5 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.tests.baremetal import *
from ironic.db.api import *

View File

@ -22,9 +22,9 @@
The underlying driver is loaded as a :class:`LazyPluggable`. The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.virt.baremetal.db Functions in this module are imported into the ironic.db
namespace. Call these functions from nova.virt.baremetal.db namespace, not namespace. Call these functions from ironic.db namespace, not
the nova.virt.baremetal.db.api namespace. the ironic.db.api namespace.
All functions in this module return objects that implement a dictionary-like All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that interface. Currently, many of these objects are sqlalchemy objects that
@ -34,38 +34,30 @@ these objects be simple dictionaries.
**Related Flags** **Related Flags**
:baremetal_db_backend: string to lookup in the list of LazyPluggable backends. :db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now. `sqlalchemy` is the only supported backend right now.
:[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to :sql_connection: string specifying the sqlalchemy connection to
use, like: `sqlite:///var/lib/nova/nova.sqlite`. use, like: `sqlite:///var/lib/ironic/ironic.sqlite`.
""" """
from oslo.config import cfg from oslo.config import cfg
from nova import utils from ironic import utils
# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
# because utils.LazyPluggable doesn't support reading from
# option groups. See bug #1093043.
db_opts = [ db_opts = [
cfg.StrOpt('db_backend', cfg.StrOpt('db_backend',
default='sqlalchemy', default='sqlalchemy',
help='The backend to use for bare-metal database'), help='The backend to use for the ironic database'),
] ]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(db_opts)
CONF.register_opts(db_opts, baremetal_group)
IMPL = utils.LazyPluggable( IMPL = utils.LazyPluggable(
'db_backend', 'db_backend',
config_group='baremetal', sqlalchemy='ironic.db.sqlalchemy.api')
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api')
def bm_node_get_all(context, service_host=None): def bm_node_get_all(context, service_host=None):

View File

@ -18,13 +18,12 @@
"""Database setup and migration commands.""" """Database setup and migration commands."""
from nova import utils from ironic import utils
IMPL = utils.LazyPluggable( IMPL = utils.LazyPluggable(
'db_backend', 'db_backend',
config_group='baremetal', sqlalchemy='ironic.db.sqlalchemy.migration')
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration')
INIT_VERSION = 0 INIT_VERSION = 0

View File

@ -28,9 +28,9 @@ from sqlalchemy.sql.expression import literal_column
import nova.context import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception from nova import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.openstack.common import timeutils from ironic.openstack.common import timeutils
from nova.openstack.common import uuidutils from ironic.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models from nova.virt.baremetal.db.sqlalchemy import models
from nova.virt.baremetal.db.sqlalchemy import session as db_session from nova.virt.baremetal.db.sqlalchemy import session as db_session

View File

@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from sqlalchemy import and_, MetaData, select, Table, exists from sqlalchemy import and_, MetaData, select, Table, exists
from sqlalchemy import exc from sqlalchemy import exc

View File

@ -22,9 +22,9 @@ from migrate.versioning import util as migrate_util
import os import os
import sqlalchemy import sqlalchemy
from nova import exception from ironic import exception
from nova.virt.baremetal.db import migration from ironic.db import migration
from nova.virt.baremetal.db.sqlalchemy import session from ironic.db.sqlalchemy import session
@migrate_util.decorator @migrate_util.decorator

View File

@ -21,7 +21,7 @@
from oslo.config import cfg from oslo.config import cfg
from nova.openstack.common.db.sqlalchemy import session as nova_session from ironic.openstack.common.db.sqlalchemy import session as nova_session
from nova import paths from nova import paths
opts = [ opts = [
@ -39,7 +39,7 @@ CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group) CONF.register_opts(opts, baremetal_group)
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session') CONF.import_opt('sqlite_db', 'ironic.openstack.common.db.sqlalchemy.session')
_ENGINE = None _ENGINE = None
_MAKER = None _MAKER = None

View File

@ -16,9 +16,9 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Nova base exception handling. """Ironic base exception handling.
Includes decorator for re-raising Nova-type exceptions. Includes decorator for re-raising Ironic-type exceptions.
SHOULD include dedicated exception logging. SHOULD include dedicated exception logging.
@ -27,11 +27,11 @@ SHOULD include dedicated exception logging.
import functools import functools
from oslo.config import cfg from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils from ironic.openstack.common import excutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import safe_utils from ironic import safe_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -45,14 +45,6 @@ CONF = cfg.CONF
CONF.register_opts(exc_log_opts) CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError): class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None): description=None):

View File

@ -12,6 +12,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.virt.baremetal import driver from ironic.manager import driver
BareMetalDriver = driver.BareMetalDriver BareMetalDriver = driver.BareMetalDriver

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.virt.baremetal import baremetal_states from ironic import states
class NodeDriver(object): class NodeDriver(object):
@ -51,19 +51,19 @@ class NodeDriver(object):
class PowerManager(object): class PowerManager(object):
def __init__(self, **kwargs): def __init__(self, **kwargs):
self.state = baremetal_states.DELETED self.state = states.DELETED
pass pass
def activate_node(self): def activate_node(self):
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
return self.state return self.state
def reboot_node(self): def reboot_node(self):
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
return self.state return self.state
def deactivate_node(self): def deactivate_node(self):
self.state = baremetal_states.DELETED self.state = states.DELETED
return self.state return self.state
def is_power_on(self): def is_power_on(self):

View File

@ -24,14 +24,14 @@ A driver for Bare-metal platform.
from oslo.config import cfg from oslo.config import cfg
from nova.compute import power_state from nova.compute import power_state
from nova import context as nova_context from ironic import context as ironic_context
from nova import exception from ironic import exception
from nova.openstack.common import excutils from ironic.openstack.common import excutils
from nova.openstack.common import importutils from ironic.openstack.common import importutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import paths from nova import paths
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import db from ironic import db
from nova.virt import driver from nova.virt import driver
from nova.virt import firewall from nova.virt import firewall
from nova.virt.libvirt import imagecache from nova.virt.libvirt import imagecache
@ -70,13 +70,9 @@ opts = [
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(opts)
CONF.register_opts(opts, baremetal_group) CONF.import_opt('host', 'ironic.netconf')
CONF.import_opt('host', 'nova.netconf')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__, firewall.__name__,
@ -107,7 +103,7 @@ def _update_state(context, node, instance, state):
def get_power_manager(**kwargs): def get_power_manager(**kwargs):
cls = importutils.import_class(CONF.baremetal.power_manager) cls = importutils.import_class(CONF.power_manager)
return cls(**kwargs) return cls(**kwargs)
@ -122,18 +118,18 @@ class BareMetalDriver(driver.ComputeDriver):
super(BareMetalDriver, self).__init__(virtapi) super(BareMetalDriver, self).__init__(virtapi)
self.driver = importutils.import_object( self.driver = importutils.import_object(
CONF.baremetal.driver, virtapi) CONF.driver, virtapi)
self.vif_driver = importutils.import_object( self.vif_driver = importutils.import_object(
CONF.baremetal.vif_driver) CONF.vif_driver)
self.firewall_driver = firewall.load_driver( self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER) default=DEFAULT_FIREWALL_DRIVER)
self.volume_driver = importutils.import_object( self.volume_driver = importutils.import_object(
CONF.baremetal.volume_driver, virtapi) CONF.volume_driver, virtapi)
self.image_cache_manager = imagecache.ImageCacheManager() self.image_cache_manager = imagecache.ImageCacheManager()
extra_specs = {} extra_specs = {}
extra_specs["baremetal_driver"] = CONF.baremetal.driver extra_specs["baremetal_driver"] = CONF.driver
for pair in CONF.baremetal.instance_type_extra_specs: for pair in CONF.instance_type_extra_specs:
keyval = pair.split(':', 1) keyval = pair.split(':', 1)
keyval[0] = keyval[0].strip() keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip() keyval[1] = keyval[1].strip()
@ -234,7 +230,7 @@ class BareMetalDriver(driver.ComputeDriver):
node = db.bm_node_associate_and_update(context, node_uuid, node = db.bm_node_associate_and_update(context, node_uuid,
{'instance_uuid': instance['uuid'], {'instance_uuid': instance['uuid'],
'instance_name': instance['hostname'], 'instance_name': instance['hostname'],
'task_state': baremetal_states.BUILDING}) 'task_state': states.BUILDING})
try: try:
self._plug_vifs(instance, network_info, context=context) self._plug_vifs(instance, network_info, context=context)
@ -251,7 +247,7 @@ class BareMetalDriver(driver.ComputeDriver):
self.driver.activate_bootloader(context, node, instance) self.driver.activate_bootloader(context, node, instance)
self.power_on(instance, node) self.power_on(instance, node)
self.driver.activate_node(context, node, instance) self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE) _update_state(context, node, instance, states.ACTIVE)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Error deploying instance %(instance)s " LOG.error(_("Error deploying instance %(instance)s "
@ -261,7 +257,7 @@ class BareMetalDriver(driver.ComputeDriver):
# Do not set instance=None yet. This prevents another # Do not set instance=None yet. This prevents another
# spawn() while we are cleaning up. # spawn() while we are cleaning up.
_update_state(context, node, instance, baremetal_states.ERROR) _update_state(context, node, instance, states.ERROR)
self.driver.deactivate_node(context, node, instance) self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node) self.power_off(instance, node)
@ -272,7 +268,7 @@ class BareMetalDriver(driver.ComputeDriver):
self._stop_firewall(instance, network_info) self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info) self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED) _update_state(context, node, None, states.DELETED)
def reboot(self, context, instance, network_info, reboot_type, def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None): block_device_info=None, bad_volumes_callback=None):
@ -280,7 +276,7 @@ class BareMetalDriver(driver.ComputeDriver):
ctx = nova_context.get_admin_context() ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance) pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node() state = pm.reboot_node()
if pm.state != baremetal_states.ACTIVE: if pm.state != states.ACTIVE:
raise exception.InstanceRebootFailure(_( raise exception.InstanceRebootFailure(_(
"Baremetal power manager failed to restart node " "Baremetal power manager failed to restart node "
"for instance %r") % instance['uuid']) "for instance %r") % instance['uuid'])
@ -306,14 +302,14 @@ class BareMetalDriver(driver.ComputeDriver):
self._stop_firewall(instance, network_info) self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info) self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED) _update_state(context, node, None, states.DELETED)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
try: try:
LOG.error(_("Error from baremetal driver " LOG.error(_("Error from baremetal driver "
"during destroy: %s") % e) "during destroy: %s") % e)
_update_state(context, node, instance, _update_state(context, node, instance,
baremetal_states.ERROR) states.ERROR)
except Exception: except Exception:
LOG.error(_("Error while recording destroy failure in " LOG.error(_("Error while recording destroy failure in "
"baremetal database: %s") % e) "baremetal database: %s") % e)
@ -324,7 +320,7 @@ class BareMetalDriver(driver.ComputeDriver):
node = _get_baremetal_node_by_instance_uuid(instance['uuid']) node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance) pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node() pm.deactivate_node()
if pm.state != baremetal_states.DELETED: if pm.state != states.DELETED:
raise exception.InstancePowerOffFailure(_( raise exception.InstancePowerOffFailure(_(
"Baremetal power manager failed to stop node " "Baremetal power manager failed to stop node "
"for instance %r") % instance['uuid']) "for instance %r") % instance['uuid'])
@ -336,7 +332,7 @@ class BareMetalDriver(driver.ComputeDriver):
node = _get_baremetal_node_by_instance_uuid(instance['uuid']) node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance) pm = get_power_manager(node=node, instance=instance)
pm.activate_node() pm.activate_node()
if pm.state != baremetal_states.ACTIVE: if pm.state != states.ACTIVE:
raise exception.InstancePowerOnFailure(_( raise exception.InstancePowerOnFailure(_(
"Baremetal power manager failed to start node " "Baremetal power manager failed to start node "
"for instance %r") % instance['uuid']) "for instance %r") % instance['uuid'])

View File

@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.virt.baremetal import base from ironic.manager import base
from nova.virt import firewall from nova.virt import firewall

View File

@ -27,14 +27,14 @@ import tempfile
from oslo.config import cfg from oslo.config import cfg
from nova import exception from ironic import exception
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.openstack.common import loopingcall from ironic.openstack.common import loopingcall
from nova import paths from nova import paths
from nova import utils from ironic import utils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import base from ironic.manager import base
from nova.virt.baremetal import utils as bm_utils from ironic import utils as bm_utils
opts = [ opts = [
cfg.StrOpt('terminal', cfg.StrOpt('terminal',
@ -51,12 +51,8 @@ opts = [
help='maximal number of retries for IPMI operations'), help='maximal number of retries for IPMI operations'),
] ]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(opts)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -71,7 +67,7 @@ def _make_password_file(password):
def _get_console_pid_path(node_id): def _get_console_pid_path(node_id):
name = "%s.pid" % node_id name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name) path = os.path.join(CONF.terminal_pid_dir, name)
return path return path
@ -149,10 +145,10 @@ class IPMI(base.PowerManager):
"""Called at an interval until the node's power is on.""" """Called at an interval until the node's power is on."""
if self._is_power("on"): if self._is_power("on"):
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry: if self.retries > CONF.ipmi_power_retry:
self.state = baremetal_states.ERROR self.state = states.ERROR
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
try: try:
self.retries += 1 self.retries += 1
@ -171,10 +167,10 @@ class IPMI(base.PowerManager):
"""Called at an interval until the node's power is off.""" """Called at an interval until the node's power is off."""
if self._is_power("off"): if self._is_power("off"):
self.state = baremetal_states.DELETED self.state = states.DELETED
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry: if self.retries > CONF.ipmi_power_retry:
self.state = baremetal_states.ERROR self.state = states.ERROR
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
try: try:
self.retries += 1 self.retries += 1
@ -194,7 +190,7 @@ class IPMI(base.PowerManager):
def activate_node(self): def activate_node(self):
"""Turns the power to node ON.""" """Turns the power to node ON."""
if self._is_power("on") and self.state == baremetal_states.ACTIVE: if self._is_power("on") and self.state == states.ACTIVE:
LOG.warning(_("Activate node called, but node %s " LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address) "is already active") % self.address)
self._set_pxe_for_next_boot() self._set_pxe_for_next_boot()
@ -220,10 +216,10 @@ class IPMI(base.PowerManager):
if not self.port: if not self.port:
return return
args = [] args = []
args.append(CONF.baremetal.terminal) args.append(CONF.terminal)
if CONF.baremetal.terminal_cert_dir: if CONF.terminal_cert_dir:
args.append("-c") args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir) args.append(CONF.terminal_cert_dir)
else: else:
args.append("-t") args.append("-t")
args.append("-p") args.append("-p")

View File

@ -26,16 +26,16 @@ import os
from oslo.config import cfg from oslo.config import cfg
from nova.compute import instance_types from nova.compute import instance_types
from nova import exception from ironic import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils from ironic.openstack.common import fileutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.openstack.common import loopingcall from ironic.openstack.common import loopingcall
from nova.openstack.common import timeutils from ironic.openstack.common import timeutils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import base from ironic.manager import base
from nova.virt.baremetal import db from ironic import db
from nova.virt.baremetal import utils as bm_utils from ironic import utils as bm_utils
pxe_opts = [ pxe_opts = [
cfg.StrOpt('deploy_kernel', cfg.StrOpt('deploy_kernel',
@ -43,13 +43,12 @@ pxe_opts = [
cfg.StrOpt('deploy_ramdisk', cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'), help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template', cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/' default='$pybasedir/ironic/net-dhcp.ubuntu.template',
'net-dhcp.ubuntu.template',
help='Template file for injected network config'), help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params', cfg.StrOpt('pxe_append_params',
help='additional append parameters for baremetal PXE boot'), help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template', cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template', default='$pybasedir/ironic/pxe_config.template',
help='Template file for PXE configuration'), help='Template file for PXE configuration'),
cfg.IntOpt('pxe_deploy_timeout', cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)', help='Timeout for PXE deployments. Default: 0 (unlimited)',
@ -58,13 +57,9 @@ pxe_opts = [
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(pxe_opts)
CONF.register_opts(pxe_opts, baremetal_group) CONF.import_opt('use_ipv6', 'ironic.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf')
CHEETAH = None CHEETAH = None
@ -98,11 +93,11 @@ def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
'deployment_ari_path': deployment_ari_path, 'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path, 'aki_path': aki_path,
'ari_path': ari_path, 'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params, 'pxe_append_params': CONF.pxe_append_params,
} }
cheetah = _get_cheetah() cheetah = _get_cheetah()
pxe_config = str(cheetah( pxe_config = str(cheetah(
open(CONF.baremetal.pxe_config_template).read(), open(CONF.pxe_config_template).read(),
searchList=[{'pxe_options': pxe_options, searchList=[{'pxe_options': pxe_options,
'ROOT': '${ROOT}', 'ROOT': '${ROOT}',
}])) }]))
@ -139,7 +134,7 @@ def build_network_config(network_info):
cheetah = _get_cheetah() cheetah = _get_cheetah()
network_config = str(cheetah( network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(), open(CONF.net_config_template).read(),
searchList=[ searchList=[
{'interfaces': interfaces, {'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6, 'use_ipv6': CONF.use_ipv6,
@ -150,12 +145,12 @@ def build_network_config(network_info):
def get_deploy_aki_id(instance_type): def get_deploy_aki_id(instance_type):
return instance_type.get('extra_specs', {}).\ return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel) get('baremetal:deploy_kernel_id', CONF.deploy_kernel)
def get_deploy_ari_id(instance_type): def get_deploy_ari_id(instance_type):
return instance_type.get('extra_specs', {}).\ return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk) get('baremetal:deploy_ramdisk_id', CONF.deploy_ramdisk)
def get_image_dir_path(instance): def get_image_dir_path(instance):
@ -170,7 +165,7 @@ def get_image_file_path(instance):
def get_pxe_config_file_path(instance): def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file.""" """Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config') return os.path.join(CONF.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance): def get_partition_sizes(instance):
@ -190,7 +185,7 @@ def get_partition_sizes(instance):
def get_pxe_mac_path(mac): def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name.""" """Convert a MAC address into a PXE config file name."""
return os.path.join( return os.path.join(
CONF.baremetal.tftp_root, CONF.tftp_root,
'pxelinux.cfg', 'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower() "01-" + mac.replace(":", "-").lower()
) )
@ -225,7 +220,7 @@ def get_tftp_image_info(instance, instance_type):
if not uuid: if not uuid:
missing_labels.append(label) missing_labels.append(label)
else: else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, image_info[label][1] = os.path.join(CONF.tftp_root,
instance['uuid'], label) instance['uuid'], label)
if missing_labels: if missing_labels:
raise exception.NovaException(_( raise exception.NovaException(_(
@ -250,7 +245,7 @@ class PXE(base.NodeDriver):
def _cache_tftp_images(self, context, instance, image_info): def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance.""" """Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree( fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) os.path.join(CONF.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") % LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name']) instance['name'])
@ -272,7 +267,7 @@ class PXE(base.NodeDriver):
to the appropriate places on local disk. to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root. are stored under CONF.tftp_root.
At present, the AMI is cached and certain files are injected. At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected Debian/ubuntu-specific assumptions are made regarding the injected
@ -444,7 +439,7 @@ class PXE(base.NodeDriver):
bm_utils.unlink_without_raise(get_pxe_mac_path(mac)) bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.rmtree_without_raise( bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) os.path.join(CONF.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance): def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete.""" """Wait for PXE deployment to complete."""
@ -461,23 +456,23 @@ class PXE(base.NodeDriver):
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
status = row.get('task_state') status = row.get('task_state')
if (status == baremetal_states.DEPLOYING if (status == states.DEPLOYING
and locals['started'] is False): and locals['started'] is False):
LOG.info(_("PXE deploy started for instance %s") LOG.info(_("PXE deploy started for instance %s")
% instance['uuid']) % instance['uuid'])
locals['started'] = True locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE, elif status in (states.DEPLOYDONE,
baremetal_states.ACTIVE): states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s") LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid']) % instance['uuid'])
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
elif status == baremetal_states.DEPLOYFAIL: elif status == states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s") locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound: except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting " locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s") "for deployment of instance %s")
if (CONF.baremetal.pxe_deploy_timeout and if (CONF.pxe_deploy_timeout and
timeutils.utcnow() > expiration): timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for " locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s") "PXE deploy of instance %s")
@ -485,7 +480,7 @@ class PXE(base.NodeDriver):
raise loopingcall.LoopingCallDone() raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta( expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.baremetal.pxe_deploy_timeout) seconds=CONF.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait() timer.start(interval=1).wait()

View File

@ -25,32 +25,27 @@ import os
from oslo.config import cfg from oslo.config import cfg
from nova.compute import instance_types from nova.compute import instance_types
from nova import exception from ironic import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils from ironic.openstack.common import fileutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import utils from ironic import utils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import base from ironic.manager import base
from nova.virt.baremetal import db from ironic import db
from nova.virt.baremetal import utils as bm_utils from ironic import utils as bm_utils
tilera_opts = [ tilera_opts = [
cfg.StrOpt('net_config_template', cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/' default='$pybasedir/ironic/net-dhcp.ubuntu.template',
'net-dhcp.ubuntu.template',
help='Template file for injected network config'), help='Template file for injected network config'),
] ]
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(tilera_opts)
CONF.register_opts(tilera_opts, baremetal_group) CONF.import_opt('use_ipv6', 'ironic.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf')
CHEETAH = None CHEETAH = None
@ -91,7 +86,7 @@ def build_network_config(network_info):
cheetah = _get_cheetah() cheetah = _get_cheetah()
network_config = str(cheetah( network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(), open(CONF.net_config_template).read(),
searchList=[ searchList=[
{'interfaces': interfaces, {'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6, 'use_ipv6': CONF.use_ipv6,
@ -113,7 +108,7 @@ def get_image_file_path(instance):
def get_tilera_nfs_path(node_id): def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs.""" """Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id) tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir) return os.path.join(CONF.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance): def get_partition_sizes(instance):
@ -147,7 +142,7 @@ def get_tftp_image_info(instance):
if not uuid: if not uuid:
missing_labels.append(label) missing_labels.append(label)
else: else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, image_info[label][1] = os.path.join(CONF.tftp_root,
instance['uuid'], label) instance['uuid'], label)
if missing_labels: if missing_labels:
raise exception.NovaException(_( raise exception.NovaException(_(
@ -173,7 +168,7 @@ class Tilera(base.NodeDriver):
def _cache_tftp_images(self, context, instance, image_info): def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance.""" """Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree( fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) os.path.join(CONF.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") % LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name']) instance['name'])
@ -195,7 +190,7 @@ class Tilera(base.NodeDriver):
to the appropriate places on local disk. to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.baremetal.tftp_root. are stored under CONF.tftp_root.
At present, the AMI is cached and certain files are injected. At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected Debian/ubuntu-specific assumptions are made regarding the injected
@ -335,10 +330,10 @@ class Tilera(base.NodeDriver):
except db_exc.DBError: except db_exc.DBError:
pass pass
if os.path.exists(os.path.join(CONF.baremetal.tftp_root, if os.path.exists(os.path.join(CONF.tftp_root,
instance['uuid'])): instance['uuid'])):
bm_utils.rmtree_without_raise( bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) os.path.join(CONF.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data): def _iptables_set(self, node_ip, user_data):
"""Sets security setting (iptables:port) if needed. """Sets security setting (iptables:port) if needed.
@ -346,7 +341,7 @@ class Tilera(base.NodeDriver):
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node. /tftpboot/iptables_rule script sets iptables rule on the given node.
""" """
rule_path = CONF.baremetal.tftp_root + "/iptables_rule" rule_path = CONF.tftp_root + "/iptables_rule"
if user_data is not None: if user_data is not None:
open_ip = base64.b64decode(user_data) open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip) utils.execute(rule_path, node_ip, open_ip)
@ -363,14 +358,14 @@ class Tilera(base.NodeDriver):
" while waiting for deploy of %s") " while waiting for deploy of %s")
status = row.get('task_state') status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and if (status == states.DEPLOYING and
locals['started'] is False): locals['started'] is False):
LOG.info(_('Tilera deploy started for instance %s') LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid']) % instance['uuid'])
locals['started'] = True locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE, elif status in (states.DEPLOYDONE,
baremetal_states.BUILDING, states.BUILDING,
baremetal_states.ACTIVE): states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s") LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid']) % instance['uuid'])
node_ip = node['pm_address'] node_ip = node['pm_address']
@ -381,7 +376,7 @@ class Tilera(base.NodeDriver):
self.deactivate_bootloader(context, node, instance) self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is " raise exception.NovaException(_("Node is "
"unknown error state.")) "unknown error state."))
elif status == baremetal_states.DEPLOYFAIL: elif status == states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s") locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound: except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting " locals['error'] = _("Baremetal node deleted while waiting "

View File

@ -24,11 +24,11 @@ import time
from oslo.config import cfg from oslo.config import cfg
from nova import exception from ironic import exception
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import utils from ironic import utils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import base from ironic.manager import base
opts = [ opts = [
cfg.StrOpt('tile_pdu_ip', cfg.StrOpt('tile_pdu_ip',
@ -52,12 +52,8 @@ opts = [
'after tilera power operations'), 'after tilera power operations'),
] ]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(opts)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -98,56 +94,56 @@ class Pdu(base.PowerManager):
changed. /tftpboot/pdu_mgr script handles power management of changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit). PDU (Power Distribution Unit).
""" """
if mode == CONF.baremetal.tile_pdu_status: if mode == CONF.tile_pdu_status:
try: try:
utils.execute('ping', '-c1', self.address, utils.execute('ping', '-c1', self.address,
check_exit_code=True) check_exit_code=True)
return CONF.baremetal.tile_pdu_on return CONF.tile_pdu_on
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
return CONF.baremetal.tile_pdu_off return CONF.tile_pdu_off
else: else:
try: try:
utils.execute(CONF.baremetal.tile_pdu_mgr, utils.execute(CONF.tile_pdu_mgr,
CONF.baremetal.tile_pdu_ip, mode) CONF.tile_pdu_ip, mode)
time.sleep(CONF.baremetal.tile_power_wait) time.sleep(CONF.tile_power_wait)
return mode return mode
except exception.ProcessExecutionError: except exception.ProcessExecutionError:
LOG.exception(_("PDU failed")) LOG.exception(_("PDU failed"))
def _is_power(self, state): def _is_power(self, state):
out_err = self._exec_pdutool(CONF.baremetal.tile_pdu_status) out_err = self._exec_pdutool(CONF.tile_pdu_status)
return out_err == state return out_err == state
def _power_on(self): def _power_on(self):
"""Turn the power to this node ON.""" """Turn the power to this node ON."""
try: try:
self._exec_pdutool(CONF.baremetal.tile_pdu_on) self._exec_pdutool(CONF.tile_pdu_on)
if self._is_power(CONF.baremetal.tile_pdu_on): if self._is_power(CONF.tile_pdu_on):
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
else: else:
self.state = baremetal_states.ERROR self.state = states.ERROR
except Exception: except Exception:
self.state = baremetal_states.ERROR self.state = states.ERROR
LOG.exception(_("PDU power on failed")) LOG.exception(_("PDU power on failed"))
def _power_off(self): def _power_off(self):
"""Turn the power to this node OFF.""" """Turn the power to this node OFF."""
try: try:
self._exec_pdutool(CONF.baremetal.tile_pdu_off) self._exec_pdutool(CONF.tile_pdu_off)
if self._is_power(CONF.baremetal.tile_pdu_off): if self._is_power(CONF.tile_pdu_off):
self.state = baremetal_states.DELETED self.state = states.DELETED
else: else:
self.state = baremetal_states.ERROR self.state = states.ERROR
except Exception: except Exception:
self.state = baremetal_states.ERROR self.state = states.ERROR
LOG.exception(_("PDU power off failed")) LOG.exception(_("PDU power off failed"))
def activate_node(self): def activate_node(self):
"""Turns the power to node ON.""" """Turns the power to node ON."""
if (self._is_power(CONF.baremetal.tile_pdu_on) if (self._is_power(CONF.tile_pdu_on)
and self.state == baremetal_states.ACTIVE): and self.state == states.ACTIVE):
LOG.warning(_("Activate node called, but node %s " LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address) "is already active") % self.address)
self._power_on() self._power_on()
@ -165,7 +161,7 @@ class Pdu(base.PowerManager):
return self.state return self.state
def is_power_on(self): def is_power_on(self):
return self._is_power(CONF.baremetal.tile_pdu_on) return self._is_power(CONF.tile_pdu_on)
def start_console(self): def start_console(self):
pass pass

View File

@ -19,7 +19,7 @@ import errno
import os import os
import shutil import shutil
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.virt.disk import api as disk_api from nova.virt.disk import api as disk_api
from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import utils as libvirt_utils

View File

@ -15,10 +15,10 @@
from oslo.config import cfg from oslo.config import cfg
from nova import context from ironic import context as nova_context
from nova import exception from ironic import exception
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb from ironic import db
CONF = cfg.CONF CONF = cfg.CONF
@ -38,15 +38,15 @@ class BareMetalVIFDriver(object):
% {'uuid': instance['uuid'], 'vif': vif}) % {'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif network, mapping = vif
vif_uuid = mapping['vif_uuid'] vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context() ctx = nova_context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid']) node = db.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query # TODO(deva): optimize this database query
# this is just searching for a free physical interface # this is just searching for a free physical interface
pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id']) pifs = db.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs: for pif in pifs:
if not pif['vif_uuid']: if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid) db.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)") LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid}) % {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, network, mapping, pif) self._after_plug(instance, network, mapping, pif)
@ -64,10 +64,10 @@ class BareMetalVIFDriver(object):
{'uuid': instance['uuid'], 'vif': vif}) {'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif network, mapping = vif
vif_uuid = mapping['vif_uuid'] vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context() ctx = nova_context.get_admin_context()
try: try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid) pif = db.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None) db.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)") LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid}) % {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, network, mapping, pif) self._after_unplug(instance, network, mapping, pif)

View File

@ -19,14 +19,14 @@
from oslo.config import cfg from oslo.config import cfg
from nova import context as nova_context from ironic import context as nova_context
from nova import exception from ironic import exception
from nova.openstack.common import importutils from ironic.openstack.common import importutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import utils from ironic import utils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import base from ironic.manager import base
from nova.virt.baremetal import db from ironic import db
import nova.virt.powervm.common as connection import nova.virt.powervm.common as connection
opts = [ opts = [
@ -51,12 +51,8 @@ opts = [
] ]
baremetal_vp = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_vp) CONF.register_opts(opts)
CONF.register_opts(opts, baremetal_vp)
_conn = None _conn = None
_virtual_power_settings = None _virtual_power_settings = None
@ -89,9 +85,9 @@ class VirtualPowerManager(base.PowerManager):
if _cmds is None: if _cmds is None:
LOG.debug("Setting up %s commands." % LOG.debug("Setting up %s commands." %
CONF.baremetal.virtual_power_type) CONF.virtual_power_type)
_vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \ _vpc = 'ironic.virtual_power_driver_settings.%s' % \
CONF.baremetal.virtual_power_type CONF.virtual_power_type
_cmds = importutils.import_class(_vpc) _cmds = importutils.import_class(_vpc)
self._vp_cmd = _cmds() self._vp_cmd = _cmds()
self.connection_data = _conn self.connection_data = _conn
@ -106,26 +102,26 @@ class VirtualPowerManager(base.PowerManager):
self.state = None self.state = None
def _get_conn(self): def _get_conn(self):
if not CONF.baremetal.virtual_power_ssh_host: if not CONF.virtual_power_ssh_host:
raise exception.NovaException( raise exception.NovaException(
_('virtual_power_ssh_host not defined. Can not Start')) _('virtual_power_ssh_host not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_user: if not CONF.virtual_power_host_user:
raise exception.NovaException( raise exception.NovaException(
_('virtual_power_host_user not defined. Can not Start')) _('virtual_power_host_user not defined. Can not Start'))
if not CONF.baremetal.virtual_power_host_pass: if not CONF.virtual_power_host_pass:
# it is ok to not have a password if you have a keyfile # it is ok to not have a password if you have a keyfile
if CONF.baremetal.virtual_power_host_key is None: if CONF.virtual_power_host_key is None:
raise exception.NovaException( raise exception.NovaException(
_('virtual_power_host_pass/key not set. Can not Start')) _('virtual_power_host_pass/key not set. Can not Start'))
_conn = connection.Connection( _conn = connection.Connection(
CONF.baremetal.virtual_power_ssh_host, CONF.virtual_power_ssh_host,
CONF.baremetal.virtual_power_host_user, CONF.virtual_power_host_user,
CONF.baremetal.virtual_power_host_pass, CONF.virtual_power_host_pass,
CONF.baremetal.virtual_power_ssh_port, CONF.virtual_power_ssh_port,
CONF.baremetal.virtual_power_host_key) CONF.virtual_power_host_key)
return _conn return _conn
def _set_connection(self): def _set_connection(self):
@ -163,9 +159,9 @@ class VirtualPowerManager(base.PowerManager):
self._run_command(cmd) self._run_command(cmd)
if self.is_power_on(): if self.is_power_on():
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
else: else:
self.state = baremetal_states.ERROR self.state = states.ERROR
return self.state return self.state
def reboot_node(self): def reboot_node(self):
@ -174,9 +170,9 @@ class VirtualPowerManager(base.PowerManager):
cmd = self._vp_cmd.reboot_cmd cmd = self._vp_cmd.reboot_cmd
self._run_command(cmd) self._run_command(cmd)
if self.is_power_on(): if self.is_power_on():
self.state = baremetal_states.ACTIVE self.state = states.ACTIVE
else: else:
self.state = baremetal_states.ERROR self.state = states.ERROR
return self.state return self.state
def deactivate_node(self): def deactivate_node(self):
@ -187,9 +183,9 @@ class VirtualPowerManager(base.PowerManager):
self._run_command(cmd) self._run_command(cmd)
if self.is_power_on(): if self.is_power_on():
self.state = baremetal_states.ERROR self.state = states.ERROR
else: else:
self.state = baremetal_states.DELETED self.state = states.DELETED
return self.state return self.state
def is_power_on(self): def is_power_on(self):

View File

@ -20,12 +20,12 @@ import re
from oslo.config import cfg from oslo.config import cfg
from nova import context as nova_context from ironic import context as nova_context
from nova import exception from ironic import exception
from nova.openstack.common import importutils from ironic.openstack.common import importutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import utils from ironic import utils
from nova.virt.baremetal import db as bmdb from ironic import db
from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt import utils as libvirt_utils
opts = [ opts = [
@ -39,15 +39,11 @@ opts = [
help='iSCSI IQN prefix used in baremetal volume connections.'), help='iSCSI IQN prefix used in baremetal volume connections.'),
] ]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_group(baremetal_group) CONF.register_opts(opts)
CONF.register_opts(opts, baremetal_group)
CONF.import_opt('host', 'nova.netconf') CONF.import_opt('host', 'ironic.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('use_ipv6', 'ironic.netconf')
CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver') CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -55,7 +51,7 @@ LOG = logging.getLogger(__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid): def _get_baremetal_node_by_instance_uuid(instance_uuid):
context = nova_context.get_admin_context() context = nova_context.get_admin_context()
return bmdb.bm_node_get_by_instance_uuid(context, instance_uuid) return db.bm_node_get_by_instance_uuid(context, instance_uuid)
def _create_iscsi_export_tgtadm(path, tid, iqn): def _create_iscsi_export_tgtadm(path, tid, iqn):
@ -165,7 +161,7 @@ def _find_tid(iqn):
def _get_iqn(instance_name, mountpoint): def _get_iqn(instance_name, mountpoint):
mp = mountpoint.replace('/', '-').strip('-') mp = mountpoint.replace('/', '-').strip('-')
iqn = '%s:%s-%s' % (CONF.baremetal.iscsi_iqn_prefix, iqn = '%s:%s-%s' % (CONF.iscsi_iqn_prefix,
instance_name, instance_name,
mp) mp)
return iqn return iqn
@ -220,9 +216,9 @@ class LibvirtVolumeDriver(VolumeDriver):
def attach_volume(self, connection_info, instance, mountpoint): def attach_volume(self, connection_info, instance, mountpoint):
node = _get_baremetal_node_by_instance_uuid(instance['uuid']) node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context() ctx = nova_context.get_admin_context()
pxe_ip = bmdb.bm_pxe_ip_get_by_bm_node_id(ctx, node['id']) pxe_ip = db.bm_pxe_ip_get_by_bm_node_id(ctx, node['id'])
if not pxe_ip: if not pxe_ip:
if not CONF.baremetal.use_unsafe_iscsi: if not CONF.use_unsafe_iscsi:
raise exception.NovaException(_( raise exception.NovaException(_(
'No fixed PXE IP is associated to %s') % instance['uuid']) 'No fixed PXE IP is associated to %s') % instance['uuid'])
@ -242,7 +238,7 @@ class LibvirtVolumeDriver(VolumeDriver):
# instance's initiator ip, it allows any initiators # instance's initiator ip, it allows any initiators
# to connect to the volume. This means other bare-metal # to connect to the volume. This means other bare-metal
# instances that are not attached the volume can connect # instances that are not attached the volume can connect
# to the volume. Do not set CONF.baremetal.use_unsafe_iscsi # to the volume. Do not set CONF.use_unsafe_iscsi
# out of dev/test environments. # out of dev/test environments.
# TODO(NTTdocomo): support CHAP # TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL') _allow_iscsi_tgtadm(tid, 'ALL')

View File

@ -297,11 +297,5 @@ def _get_impl():
"""Delay import of rpc_backend until configuration is loaded.""" """Delay import of rpc_backend until configuration is loaded."""
global _RPCIMPL global _RPCIMPL
if _RPCIMPL is None: if _RPCIMPL is None:
try: _RPCIMPL = importutils.import_module(CONF.rpc_backend)
_RPCIMPL = importutils.import_module(CONF.rpc_backend)
except ImportError:
# For backwards compatibility with older nova config.
impl = CONF.rpc_backend.replace('nova.rpc',
'nova.openstack.common.rpc')
_RPCIMPL = importutils.import_module(impl)
return _RPCIMPL return _RPCIMPL

View File

@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
"""Policy Engine For Nova.""" """Policy Engine For Ironic."""
import os.path import os.path
@ -71,7 +71,7 @@ def _set_rules(data):
def enforce(context, action, target, do_raise=True): def enforce(context, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context. """Verifies that the action is valid on the target in this context.
:param context: nova context :param context: ironic context
:param action: string representing the action to be checked :param action: string representing the action to be checked
this should be colon separated for clarity. this should be colon separated for clarity.
i.e. ``compute:create_instance``, i.e. ``compute:create_instance``,

View File

@ -34,11 +34,11 @@ from oslo.config import cfg
from nova import conductor from nova import conductor
from nova import context from nova import context
from nova import exception from nova import exception
from nova.openstack.common import eventlet_backdoor from ironic.openstack.common import eventlet_backdoor
from nova.openstack.common import importutils from ironic.openstack.common import importutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.openstack.common import loopingcall from ironic.openstack.common import loopingcall
from nova.openstack.common import rpc from ironic.openstack.common import rpc
from nova import servicegroup from nova import servicegroup
from nova import utils from nova import utils
from nova import version from nova import version

View File

@ -37,17 +37,17 @@ from oslo.config import cfg
import stubout import stubout
import testtools import testtools
from nova import context from ironic import context
from nova import db from ironic import db
from nova.db import migration from ironic.db import migration
from nova.network import manager as network_manager from nova.network import manager as network_manager
from nova.openstack.common.db.sqlalchemy import session from ironic.openstack.common.db.sqlalchemy import session
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.openstack.common import timeutils from ironic.openstack.common import timeutils
from nova import paths from ironic import paths
from nova import service from ironic import service
from nova.tests import conf_fixture from ironic.tests import conf_fixture
from nova.tests import policy_fixture from ironic.tests import policy_fixture
test_opts = [ test_opts = [
@ -59,11 +59,11 @@ test_opts = [
CONF = cfg.CONF CONF = cfg.CONF
CONF.register_opts(test_opts) CONF.register_opts(test_opts)
CONF.import_opt('sql_connection', CONF.import_opt('sql_connection',
'nova.openstack.common.db.sqlalchemy.session') 'ironic.openstack.common.db.sqlalchemy.session')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.sqlalchemy.session') CONF.import_opt('sqlite_db', 'ironic.openstack.common.db.sqlalchemy.session')
CONF.set_override('use_stderr', False) CONF.set_override('use_stderr', False)
logging.setup('nova') logging.setup('ironic')
_DB_CACHE = None _DB_CACHE = None
@ -154,7 +154,7 @@ class ServiceFixture(fixtures.Fixture):
name = name name = name
host = host and host or uuid.uuid4().hex host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host) kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name) kwargs.setdefault('binary', 'ironic-%s' % name)
self.kwargs = kwargs self.kwargs = kwargs
def setUp(self): def setUp(self):

38
ironic/tests/__init__.py Normal file
View File

@ -0,0 +1,38 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`Ironic.tests` -- ironic Unittests
=====================================================
.. automodule:: ironic.tests
:platform: Unix
"""
# TODO(mikal): move eventlet imports to ironic.__init__ once we move to PBR
import os
import sys
import eventlet
eventlet.monkey_patch(os=False)
# See http://code.google.com/p/python-nose/issues/detail?id=373
# The code below enables nosetests to work with i18n _() blocks
import __builtin__
setattr(__builtin__, '_', lambda x: x)

View File

@ -19,23 +19,13 @@
import fixtures import fixtures
from oslo.config import cfg from oslo.config import cfg
from nova import config from ironic import config
from nova import ipv6 from ironic import paths
from nova import paths from ironic.tests import utils
from nova.tests import utils
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('use_ipv6', 'ironic.netconf')
CONF.import_opt('host', 'nova.netconf') CONF.import_opt('host', 'ironic.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('policy_file', 'nova.policy')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(fixtures.Fixture): class ConfFixture(fixtures.Fixture):
@ -48,28 +38,16 @@ class ConfFixture(fixtures.Fixture):
super(ConfFixture, self).setUp() super(ConfFixture, self).setUp()
self.conf.set_default('api_paste_config', self.conf.set_default('api_paste_config',
paths.state_path_def('etc/nova/api-paste.ini')) paths.state_path_def('etc/ironic/api-paste.ini'))
self.conf.set_default('host', 'fake-mini') self.conf.set_default('host', 'fake-mini')
self.conf.set_default('compute_driver', 'nova.virt.fake.FakeDriver')
self.conf.set_default('fake_network', True)
self.conf.set_default('fake_rabbit', True)
self.conf.set_default('flat_network_bridge', 'br100')
self.conf.set_default('floating_ip_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('instance_dns_manager',
'nova.tests.utils.dns_manager')
self.conf.set_default('lock_path', None)
self.conf.set_default('network_size', 8)
self.conf.set_default('num_networks', 2)
self.conf.set_default('rpc_backend', self.conf.set_default('rpc_backend',
'nova.openstack.common.rpc.impl_fake') 'ironic.openstack.common.rpc.impl_fake')
self.conf.set_default('rpc_cast_timeout', 5) self.conf.set_default('rpc_cast_timeout', 5)
self.conf.set_default('rpc_response_timeout', 5) self.conf.set_default('rpc_response_timeout', 5)
self.conf.set_default('sql_connection', "sqlite://") self.conf.set_default('sql_connection', "sqlite://")
self.conf.set_default('sqlite_synchronous', False) self.conf.set_default('sqlite_synchronous', False)
self.conf.set_default('use_ipv6', True) self.conf.set_default('use_ipv6', True)
self.conf.set_default('verbose', True) self.conf.set_default('verbose', True)
self.conf.set_default('vlan_interface', 'eth0')
config.parse_args([], default_config_files=[]) config.parse_args([], default_config_files=[])
self.addCleanup(self.conf.reset) self.addCleanup(self.conf.reset)
self.addCleanup(utils.cleanup_dns_managers) self.addCleanup(utils.cleanup_dns_managers)

View File

@ -13,4 +13,4 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from nova.tests.baremetal.db import * from ironic.tests.db import *

View File

@ -17,17 +17,16 @@
from oslo.config import cfg from oslo.config import cfg
from nova import context as nova_context from ironic import context as ironic_context
from nova import test from ironic import test
from nova.virt.baremetal.db import migration as bm_migration from ironic.db import migration as bm_migration
from nova.virt.baremetal.db.sqlalchemy import session as bm_session from ironic.db.sqlalchemy import session as bm_session
_DB_CACHE = None _DB_CACHE = None
CONF = cfg.CONF CONF = cfg.CONF
CONF.import_opt('sql_connection', CONF.import_opt('sql_connection',
'nova.virt.baremetal.db.sqlalchemy.session', 'ironic.db.sqlalchemy.session')
group='baremetal')
class Database(test.Database): class Database(test.Database):
@ -40,11 +39,11 @@ class BMDBTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(BMDBTestCase, self).setUp() super(BMDBTestCase, self).setUp()
self.flags(sql_connection='sqlite://', group='baremetal') self.flags(sql_connection='sqlite://')
global _DB_CACHE global _DB_CACHE
if not _DB_CACHE: if not _DB_CACHE:
_DB_CACHE = Database(bm_session, bm_migration, _DB_CACHE = Database(bm_session, bm_migration,
sql_connection=CONF.baremetal.sql_connection, sql_connection=CONF.sql_connection,
sqlite_db=None, sqlite_db=None,
sqlite_clean_db=None) sqlite_clean_db=None)
self.useFixture(_DB_CACHE) self.useFixture(_DB_CACHE)

View File

@ -18,7 +18,7 @@ Bare-metal DB testcase for BareMetalInterface
""" """
from nova import exception from nova import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base from nova.tests.baremetal.db import base
from nova.virt.baremetal import db from nova.virt.baremetal import db

View File

@ -18,7 +18,7 @@ Bare-metal DB testcase for BareMetalPxeIp
""" """
from nova import exception from nova import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base from nova.tests.baremetal.db import base
from nova.tests.baremetal.db import utils from nova.tests.baremetal.db import utils
from nova.virt.baremetal import db from nova.virt.baremetal import db

View File

@ -15,8 +15,8 @@
"""Bare-metal test utils.""" """Bare-metal test utils."""
from nova import test from ironic import test
from nova.virt.baremetal.db.sqlalchemy import models as bm_models from ironic.db.sqlalchemy import models as bm_models
def new_bm_node(**kwargs): def new_bm_node(**kwargs):

View File

@ -18,6 +18,8 @@
policy_data = """ policy_data = """
{ {
"admin_api": "role:admin", "admin_api": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"context_is_admin": "role:admin or role:administrator", "context_is_admin": "role:admin or role:administrator",
"default": "rule:admin_or_owner"
} }
""" """

View File

@ -12,5 +12,4 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from ironic.tests.manager import *
from nova.virt.baremetal.db.api import *

View File

@ -26,37 +26,36 @@ import mox
from oslo.config import cfg from oslo.config import cfg
from testtools import matchers from testtools import matchers
from nova import exception from ironic import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base from ironic.tests.db import base as db_base
from nova.tests.baremetal.db import utils as bm_db_utils from ironic.tests.db import utils as db_utils
from nova.tests.image import fake as fake_image from ironic.tests.image import fake as fake_image
from nova.tests import utils from ironic.tests import utils
from nova.virt.baremetal import baremetal_states from ironic import states
from nova.virt.baremetal import db from ironic import db
from nova.virt.baremetal import pxe from ironic import pxe
from nova.virt.baremetal import utils as bm_utils from ironic import utils as bm_utils
from nova.virt.disk import api as disk_api from ironic.virt.disk import api as disk_api
from nova.virt import fake as fake_virt from ironic.virt import fake as fake_virt
CONF = cfg.CONF CONF = cfg.CONF
COMMON_FLAGS = dict( COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver', firewall_driver='ironic.fake.FakeFirewallDriver',
host='test_host', host='test_host',
) )
BAREMETAL_FLAGS = dict( BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.pxe.PXE', driver='ironic.pxe.PXE',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'], instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager', power_manager='ironic.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver', vif_driver='ironic.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver', volume_driver='ironic.fake.FakeVolumeDriver',
group='baremetal',
) )
class BareMetalPXETestCase(bm_db_base.BMDBTestCase): class BareMetalPXETestCase(db_base.BMDBTestCase):
def setUp(self): def setUp(self):
super(BareMetalPXETestCase, self).setUp() super(BareMetalPXETestCase, self).setUp()
@ -70,7 +69,7 @@ class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
self.test_block_device_info = None, self.test_block_device_info = None,
self.instance = utils.get_test_instance() self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(), self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node( self.node_info = db_utils.new_bm_node(
service_host='test_host', service_host='test_host',
cpus=4, cpus=4,
memory_mb=2048, memory_mb=2048,
@ -154,7 +153,7 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_network_config_dhcp(self): def test_build_network_config_dhcp(self):
self.flags( self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/' net_config_template='$pybasedir/ironic/'
'net-dhcp.ubuntu.template', 'net-dhcp.ubuntu.template',
group='baremetal', group='baremetal',
) )
@ -166,7 +165,7 @@ class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_network_config_static(self): def test_build_network_config_static(self):
self.flags( self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/' net_config_template='$pybasedir/ironic/'
'net-static.ubuntu.template', 'net-static.ubuntu.template',
group='baremetal', group='baremetal',
) )
@ -560,7 +559,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.flags(pxe_deploy_timeout=1, group='baremetal') self.flags(pxe_deploy_timeout=1, group='baremetal')
db.bm_node_update(self.context, 1, db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING, {'task_state': states.DEPLOYING,
'instance_uuid': 'fake-uuid'}) 'instance_uuid': 'fake-uuid'})
# test timeout # test timeout
@ -570,12 +569,12 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
# test DEPLOYDONE # test DEPLOYDONE
db.bm_node_update(self.context, 1, db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE}) {'task_state': states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance) self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE # test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1, db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE}) {'task_state': states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance) self.driver.activate_node(self.context, self.node, self.instance)
# test node gone # test node gone

View File

@ -23,7 +23,7 @@ import os
from oslo.config import cfg from oslo.config import cfg
from nova import exception from nova import exception
from nova.openstack.common.db import exception as db_exc from ironic.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image from nova.tests.image import fake as fake_image

View File

@ -17,9 +17,9 @@ import os
import fixtures import fixtures
from oslo.config import cfg from oslo.config import cfg
from nova.openstack.common import policy as common_policy from ironic.openstack.common import policy as common_policy
import nova.policy import ironic.policy
from nova.tests import fake_policy from ironic.tests import fake_policy
CONF = cfg.CONF CONF = cfg.CONF
@ -34,9 +34,9 @@ class PolicyFixture(fixtures.Fixture):
with open(self.policy_file_name, 'w') as policy_file: with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.policy_data) policy_file.write(fake_policy.policy_data)
CONF.set_override('policy_file', self.policy_file_name) CONF.set_override('policy_file', self.policy_file_name)
nova.policy.reset() ironic.policy.reset()
nova.policy.init() ironic.policy.init()
self.addCleanup(nova.policy.reset) self.addCleanup(ironic.policy.reset)
def set_rules(self, rules): def set_rules(self, rules):
common_policy.set_rules(common_policy.Rules( common_policy.set_rules(common_policy.Rules(

View File

@ -23,7 +23,7 @@ import time
import mox import mox
from nova.cmd import baremetal_deploy_helper as bmdh from nova.cmd import baremetal_deploy_helper as bmdh
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova import test from nova import test
from nova.tests.baremetal.db import base as bm_db_base from nova.tests.baremetal.db import base as bm_db_base
from nova.virt.baremetal import db as bm_db from nova.virt.baremetal import db as bm_db

View File

@ -44,14 +44,14 @@ import netaddr
from oslo.config import cfg from oslo.config import cfg
from nova import exception from ironic import exception
from nova.openstack.common import excutils from ironic.openstack.common import excutils
from nova.openstack.common import importutils from ironic.openstack.common import importutils
from nova.openstack.common import log as logging from ironic.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common from ironic.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils from ironic.openstack.common import timeutils
notify_decorator = 'nova.openstack.common.notifier.api.notify_decorator' notify_decorator = 'ironic.openstack.common.notifier.api.notify_decorator'
monkey_patch_opts = [ monkey_patch_opts = [
cfg.BoolOpt('monkey_patch', cfg.BoolOpt('monkey_patch',

View File

74
tools/install_venv.py Normal file
View File

@ -0,0 +1,74 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import install_venv_common as install_venv
def print_help(venv, root):
help = """
Ironic development environment setup is complete.
Ironic development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Ironic virtualenv for the extent of your current shell
session you can run:
$ source %s/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ %s/tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help % (venv, root)
def main(argv):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if os.environ.get('tools_path'):
root = os.environ['tools_path']
venv = os.path.join(root, '.venv')
if os.environ.get('venv'):
venv = os.environ['venv']
pip_requires = os.path.join(root, 'tools', 'pip-requires')
test_requires = os.path.join(root, 'tools', 'test-requires')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Nova'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
py_version, project)
options = install.parse_args(argv)
install.check_python_version()
install.check_dependencies()
install.create_virtualenv(no_site_packages=options.no_site_packages)
install.install_dependencies()
install.post_process()
print_help(venv, root)
if __name__ == '__main__':
main(sys.argv)

View File

@ -24,8 +24,6 @@ environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common Synced in from openstack-common
""" """
from __future__ import print_function
import optparse import optparse
import os import os
import subprocess import subprocess
@ -44,7 +42,7 @@ class InstallVenv(object):
self.project = project self.project = project
def die(self, message, *args): def die(self, message, *args):
print(message % args, file=sys.stderr) print >> sys.stderr, message % args
sys.exit(1) sys.exit(1)
def check_python_version(self): def check_python_version(self):
@ -91,20 +89,20 @@ class InstallVenv(object):
virtual environment. virtual environment.
""" """
if not os.path.isdir(self.venv): if not os.path.isdir(self.venv):
print('Creating venv...', end=' ') print 'Creating venv...',
if no_site_packages: if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages', self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv]) self.venv])
else: else:
self.run_command(['virtualenv', '-q', self.venv]) self.run_command(['virtualenv', '-q', self.venv])
print('done.') print 'done.'
print('Installing pip in venv...', end=' ') print 'Installing pip in venv...',
if not self.run_command(['tools/with_venv.sh', 'easy_install', if not self.run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip(): 'pip>1.0']).strip():
self.die("Failed to install pip.") self.die("Failed to install pip.")
print('done.') print 'done.'
else: else:
print("venv already exists...") print "venv already exists..."
pass pass
def pip_install(self, *args): def pip_install(self, *args):
@ -113,7 +111,7 @@ class InstallVenv(object):
redirect_output=False) redirect_output=False)
def install_dependencies(self): def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...') print 'Installing dependencies with pip (this can take a while)...'
# First things first, make sure our venv has the latest pip and # First things first, make sure our venv has the latest pip and
# distribute. # distribute.
@ -155,12 +153,12 @@ class Distro(InstallVenv):
return return
if self.check_cmd('easy_install'): if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ') print 'Installing virtualenv via easy_install...',
if self.run_command(['easy_install', 'virtualenv']): if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded') print 'Succeeded'
return return
else: else:
print('Failed') print 'Failed'
self.die('ERROR: virtualenv not found.\n\n%s development' self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your' ' requires virtualenv, please install it using your'

View File

@ -17,7 +17,7 @@
import os import os
import sys import sys
import tools.install_venv_common as install_venv import install_venv_common as install_venv
def main(argv): def main(argv):
@ -25,8 +25,8 @@ def main(argv):
venv = os.environ['VIRTUAL_ENV'] venv = os.environ['VIRTUAL_ENV']
pip_requires = os.path.join(root, 'requirements.txt') pip_requires = os.path.join(root, 'tools', 'pip-requires')
test_requires = os.path.join(root, 'test-requirements.txt') test_requires = os.path.join(root, 'tools', 'test-requires')
py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
project = 'Nova' project = 'Nova'
install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,

7
tools/with_venv.sh Executable file
View File

@ -0,0 +1,7 @@
#!/bin/bash
tools_path=${tools_path:-$(dirname $0)}
venv_path=${venv_path:-${tools_path}}
venv_dir=${venv_name:-/../.venv}
TOOLS=${tools_path}
VENV=${venv:-${venv_path}/${venv_dir}}
source ${VENV}/bin/activate && "$@"