Remove baremetal virt driver

This patch removes the baremetal virt driver from nova, which was
deprecated in Juno. It does not aim to perform any of the refactoring
we need to do now that the driver is gone, but is intended as a clean
removal of all the baremetal code itself.

Of specific note are the api_samples tests. We didn't have samples to
test the ironic proxy code, so I removed the baremetal ones for show
and index, resetting that extension back to "untested" status as if it
was new.

Tests that stub out ironic interactions and re-introduce those tests
should be added after this patch.

Change-Id: Ia76e41a8a3b7230701872ae7a1975edc3d9ea847
This commit is contained in:
Dan Smith 2014-10-07 07:08:45 -07:00 committed by Russell Bryant
parent 4bdfb21ccf
commit 46ed619b9a
105 changed files with 35 additions and 10076 deletions

View File

@ -30,8 +30,6 @@ Reference
nova-api-metadata
nova-api-os-compute
nova-api
nova-baremetal-deploy-helper
nova-baremetal-manage
nova-cert
nova-compute
nova-conductor

View File

@ -1,52 +0,0 @@
============================
nova-baremetal-deploy-helper
============================
------------------------------------------------------------------
Writes images to a bare-metal node and switch it to instance-mode
------------------------------------------------------------------
:Author: openstack@lists.openstack.org
:Date: 2012-10-17
:Copyright: OpenStack Foundation
:Version: 2013.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
nova-baremetal-deploy-helper
DESCRIPTION
===========
This is a service which should run on nova-compute host when using the
baremetal driver. During a baremetal node's first boot,
nova-baremetal-deploy-helper works in conjunction with diskimage-builder's
"deploy" ramdisk to write an image from glance onto the baremetal node's disks
using iSCSI. After that is complete, nova-baremetal-deploy-helper switches the
PXE config to reference the kernel and ramdisk which correspond to the running
image.
OPTIONS
=======
**General options**
FILES
========
* /etc/nova/nova.conf
* /etc/nova/rootwrap.conf
* /etc/nova/rootwrap.d/
SEE ALSO
========
* `OpenStack Nova <http://nova.openstack.org>`__
BUGS
====
* Nova bugs are managed at Launchpad `Bugs : Nova <https://bugs.launchpad.net/nova>`__

View File

@ -1,67 +0,0 @@
=====================
nova-baremetal-manage
=====================
------------------------------------------------------
Manage bare-metal DB in OpenStack Nova
------------------------------------------------------
:Author: openstack@lists.openstack.org
:Date: 2012-10-17
:Copyright: OpenStack Foundation
:Version: 2013.1
:Manual section: 1
:Manual group: cloud computing
SYNOPSIS
========
nova-baremetal-manage <category> <action> [<args>]
DESCRIPTION
===========
nova-baremetal-manage manages bare-metal DB schema.
OPTIONS
=======
The standard pattern for executing a nova-baremetal-manage command is:
``nova-baremetal-manage <category> <command> [<args>]``
Run without arguments to see a list of available command categories:
``nova-baremetal-manage``
Categories are db. Detailed descriptions are below.
You can also run with a category argument such as "db" to see a list of all commands in that category:
``nova-baremetal-manage db``
These sections describe the available categories and arguments for nova-baremetal-manage.
Bare-Metal DB
~~~~~~~~~~~~~
``nova-baremetal-manage db version``
Print the current database version.
``nova-baremetal-manage db sync``
Sync the database up to the most recent version. This is the standard way to create the db as well.
FILES
========
/etc/nova/nova.conf: get location of bare-metal DB
SEE ALSO
========
* `OpenStack Nova <http://nova.openstack.org>`__
BUGS
====
* Nova bugs are managed at Launchpad `Bugs : Nova <https://bugs.launchpad.net/nova>`__

View File

@ -15,7 +15,6 @@
"""The bare-metal admin extension with Ironic Proxy."""
import netaddr
from oslo.config import cfg
from oslo.utils import importutils
import webob
@ -23,10 +22,8 @@ import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.baremetal import db
ironic_client = importutils.try_import('ironicclient.client')
@ -80,14 +77,6 @@ def _make_interface_elem(elem):
elem.set(f)
def _use_ironic():
# TODO(lucasagomes): This switch this should also be deleted as
# part of the Nova Baremetal removal effort. At that point, any
# code that checks it should assume True, the False case should be
# removed, and this API will only/always proxy to Ironic.
return 'ironic' in CONF.compute_driver
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
@ -110,20 +99,6 @@ def _no_ironic_proxy(cmd):
"action.") % {'cmd': cmd})
def is_valid_mac(address):
"""Verify the format of a MAC address."""
class mac_dialect(netaddr.mac_eui48):
word_fmt = '%.02x'
word_sep = ':'
try:
na = netaddr.EUI(address, dialect=mac_dialect)
except Exception:
return False
return str(na) == address.lower()
class NodeTemplate(xmlutil.TemplateBuilder):
def construct(self):
node_elem = xmlutil.TemplateElement('node', selector='node')
@ -149,13 +124,6 @@ class NodesTemplate(xmlutil.TemplateBuilder):
return xmlutil.MasterTemplate(root, 1)
class InterfaceTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('interface', selector='interface')
_make_interface_elem(root)
return xmlutil.MasterTemplate(root, 1)
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API.
@ -182,161 +150,54 @@ class BareMetalNodeController(wsgi.Controller):
context = req.environ['nova.context']
authorize(context)
nodes = []
if _use_ironic():
# proxy command to Ironic
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties['cpus'],
'memory_mb': inode.properties['memory_mb'],
'disk_gb': inode.properties['local_gb']}
nodes.append(node)
else:
# use nova baremetal
nodes_from_db = db.bm_node_get_all(context)
for node_from_db in nodes_from_db:
try:
ifs = db.bm_interface_get_all_by_bm_node_id(
context, node_from_db['id'])
except exception.NodeNotFound:
ifs = []
node = self._node_dict(node_from_db)
node['interfaces'] = [_interface_dict(i) for i in ifs]
nodes.append(node)
return {'nodes': nodes}
@wsgi.serializers(xml=NodeTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
if _use_ironic():
# proxy command to Ironic
icli = _get_ironic_client()
inode = icli.node.get(id)
iports = icli.node.list_ports(id)
# proxy command to Ironic
icli = _get_ironic_client()
ironic_nodes = icli.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties['cpus'],
'memory_mb': inode.properties['memory_mb'],
'disk_gb': inode.properties['local_gb'],
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
else:
# use nova baremetal
try:
node = db.bm_node_get(context, id)
except exception.NodeNotFound:
raise webob.exc.HTTPNotFound()
try:
ifs = db.bm_interface_get_all_by_bm_node_id(context, id)
except exception.NodeNotFound:
ifs = []
node = self._node_dict(node)
node['interfaces'] = [_interface_dict(i) for i in ifs]
'disk_gb': inode.properties['local_gb']}
nodes.append(node)
return {'nodes': nodes}
@wsgi.serializers(xml=NodeTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
icli = _get_ironic_client()
inode = icli.node.get(id)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties['cpus'],
'memory_mb': inode.properties['memory_mb'],
'disk_gb': inode.properties['local_gb'],
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
@wsgi.serializers(xml=NodeTemplate)
def create(self, req, body):
if _use_ironic():
_no_ironic_proxy("node-create")
context = req.environ['nova.context']
authorize(context)
values = body['node'].copy()
prov_mac_address = values.pop('prov_mac_address', None)
if (prov_mac_address is not None
and not is_valid_mac(prov_mac_address)):
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify address "
"in the form of xx:xx:xx:xx:xx:xx"))
node = db.bm_node_create(context, values)
node = self._node_dict(node)
if prov_mac_address:
if_id = db.bm_interface_create(context,
bm_node_id=node['id'],
address=prov_mac_address,
datapath_id=None,
port_no=None)
if_ref = db.bm_interface_get(context, if_id)
node['interfaces'] = [_interface_dict(if_ref)]
else:
node['interfaces'] = []
return {'node': node}
_no_ironic_proxy("port-create")
def delete(self, req, id):
if _use_ironic():
_no_ironic_proxy("node-delete")
_no_ironic_proxy("port-create")
context = req.environ['nova.context']
authorize(context)
try:
db.bm_node_destroy(context, id)
except exception.NodeNotFound:
raise webob.exc.HTTPNotFound()
return webob.Response(status_int=202)
def _check_node_exists(self, context, node_id):
try:
db.bm_node_get(context, node_id)
except exception.NodeNotFound:
raise webob.exc.HTTPNotFound()
@wsgi.serializers(xml=InterfaceTemplate)
@wsgi.action('add_interface')
def _add_interface(self, req, id, body):
if _use_ironic():
_no_ironic_proxy("port-create")
_no_ironic_proxy("port-create")
context = req.environ['nova.context']
authorize(context)
self._check_node_exists(context, id)
body = body['add_interface']
address = body['address']
datapath_id = body.get('datapath_id')
port_no = body.get('port_no')
if not is_valid_mac(address):
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify address "
"in the form of xx:xx:xx:xx:xx:xx"))
if_id = db.bm_interface_create(context,
bm_node_id=id,
address=address,
datapath_id=datapath_id,
port_no=port_no)
if_ref = db.bm_interface_get(context, if_id)
return {'interface': _interface_dict(if_ref)}
@wsgi.response(202)
@wsgi.action('remove_interface')
def _remove_interface(self, req, id, body):
if _use_ironic():
_no_ironic_proxy("port-delete")
context = req.environ['nova.context']
authorize(context)
self._check_node_exists(context, id)
body = body['remove_interface']
if_id = body.get('id')
address = body.get('address')
if not if_id and not address:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify id or address"))
ifs = db.bm_interface_get_all_by_bm_node_id(context, id)
for i in ifs:
if if_id and if_id != i['id']:
continue
if address and address != i['address']:
continue
db.bm_interface_destroy(context, i['id'])
return webob.Response(status_int=202)
raise webob.exc.HTTPNotFound()
_no_ironic_proxy("port-delete")
class Baremetal_nodes(extensions.ExtensionDescriptor):

View File

@ -1,376 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Bare-Metal Deployment Service."""
import cgi
import os
import Queue
import re
import socket
import stat
import sys
import threading
import time
from wsgiref import simple_server
from oslo.utils import excutils
from oslo.utils import units
from nova import config
from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.disk import api as disk
QUEUE = Queue.Queue()
LOG = logging.getLogger(__name__)
class BareMetalDeployException(Exception):
pass
# All functions are called from deploy() directly or indirectly.
# They are split for stub-out.
def discovery(portal_address, portal_port):
"""Do iSCSI discovery on portal."""
utils.execute('iscsiadm',
'-m', 'discovery',
'-t', 'st',
'-p', '%s:%s' % (portal_address, portal_port),
run_as_root=True,
check_exit_code=[0])
def login_iscsi(portal_address, portal_port, target_iqn):
"""Login to an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--login',
run_as_root=True,
check_exit_code=[0])
# Ensure the login complete
time.sleep(10)
def logout_iscsi(portal_address, portal_port, target_iqn):
"""Logout from an iSCSI target."""
utils.execute('iscsiadm',
'-m', 'node',
'-p', '%s:%s' % (portal_address, portal_port),
'-T', target_iqn,
'--logout',
run_as_root=True,
check_exit_code=[0])
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb):
"""Create partitions for root, ephemeral and swap on a disk device."""
# Lead in with 1MB to allow room for the partition table itself, otherwise
# the way sfdisk adjusts doesn't shift the partition up to compensate, and
# we lose the space.
# http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/
# raring/view/head:/fdisk/sfdisk.c#L1940
if ephemeral_mb:
stdin_command = ('1,%d,83;\n,%d,82;\n,%d,83;\n0,0;\n' %
(ephemeral_mb, swap_mb, root_mb))
else:
stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' %
(root_mb, swap_mb))
utils.execute('sfdisk', '-uM', dev, process_input=stdin_command,
run_as_root=True,
attempts=3,
check_exit_code=[0])
# avoid "device is busy"
time.sleep(10)
def is_block_device(dev):
"""Check whether a device is block or not."""
s = os.stat(dev)
return stat.S_ISBLK(s.st_mode)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.execute('dd',
'if=%s' % src,
'of=%s' % dst,
'bs=1M',
'oflag=direct',
run_as_root=True,
check_exit_code=[0])
def mkswap(dev, label='swap1'):
"""Execute mkswap on a device."""
utils.execute('mkswap',
'-L', label,
dev,
run_as_root=True,
check_exit_code=[0])
def mkfs_ephemeral(dev, label="ephemeral0"):
# TODO(jogo) support non-default mkfs options as well
disk.mkfs("default", label, dev)
def block_uuid(dev):
"""Get UUID of a block device."""
out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def switch_pxe_config(path, root_uuid):
"""Switch a pxe config from deployment mode to service mode."""
with open(path) as f:
lines = f.readlines()
root = 'UUID=%s' % root_uuid
rre = re.compile(r'\$\{ROOT\}')
dre = re.compile('^default .*$')
with open(path, 'w') as f:
for line in lines:
line = rre.sub(root, line)
line = dre.sub('default boot', line)
f.write(line)
def notify(address, port):
"""Notify a node that it becomes ready to reboot."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((address, port))
s.send('done')
finally:
s.close()
def get_dev(address, port, iqn, lun):
"""Returns a device path for given parameters."""
dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \
% (address, port, iqn, lun)
return dev
def get_image_mb(image_path):
"""Get size of an image in Megabyte."""
mb = units.Mi
image_byte = os.path.getsize(image_path)
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path,
preserve_ephemeral):
"""Creates partitions and write an image to the root partition.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
"""
def raise_exception(msg):
LOG.error(msg)
raise BareMetalDeployException(msg)
if ephemeral_mb:
ephemeral_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
root_part = "%s-part3" % dev
else:
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
raise_exception(_("parent device '%s' not found") % dev)
make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
if not is_block_device(root_part):
raise_exception(_("root device '%s' not found") % root_part)
if not is_block_device(swap_part):
raise_exception(_("swap device '%s' not found") % swap_part)
if ephemeral_mb and not is_block_device(ephemeral_part):
raise_exception(_("ephemeral device '%s' not found") % ephemeral_part)
dd(image_path, root_part)
mkswap(swap_part)
if ephemeral_mb and not preserve_ephemeral:
mkfs_ephemeral(ephemeral_part)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid
def deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb, ephemeral_mb, preserve_ephemeral=False):
"""All-in-one function to deploy a node.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
"""
dev = get_dev(address, port, iqn, lun)
image_mb = get_image_mb(image_path)
if image_mb > root_mb:
root_mb = image_mb
discovery(address, port)
login_iscsi(address, port, iqn)
try:
root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb,
image_path, preserve_ephemeral)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
# Log output if there was a error
LOG.error(_("Cmd : %s"), err.cmd)
LOG.error(_("StdOut : %r"), err.stdout)
LOG.error(_("StdErr : %r"), err.stderr)
finally:
logout_iscsi(address, port, iqn)
switch_pxe_config(pxe_config_path, root_uuid)
# Ensure the node started netcat on the port after POST the request.
time.sleep(3)
notify(address, 10000)
class Worker(threading.Thread):
"""Thread that handles requests in queue."""
def __init__(self):
super(Worker, self).__init__()
self.setDaemon(True)
self.stop = False
self.queue_timeout = 1
def run(self):
while not self.stop:
try:
# Set timeout to check self.stop periodically
(node_id, params) = QUEUE.get(block=True,
timeout=self.queue_timeout)
except Queue.Empty:
pass
else:
# Requests comes here from BareMetalDeploy.post()
LOG.info(_('start deployment for node %(node_id)s, '
'params %(params)s'),
{'node_id': node_id, 'params': params})
context = nova_context.get_admin_context()
try:
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYING})
deploy(**params)
except Exception:
LOG.exception(_('deployment to node %s failed'), node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYFAIL})
else:
LOG.info(_('deployment to node %s done'), node_id)
db.bm_node_update(context, node_id,
{'task_state': baremetal_states.DEPLOYDONE})
class BareMetalDeploy(object):
"""WSGI server for bare-metal deployment."""
def __init__(self):
self.worker = Worker()
self.worker.start()
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
if method == 'POST':
return self.post(environ, start_response)
else:
start_response('501 Not Implemented',
[('Content-type', 'text/plain')])
return 'Not Implemented'
def post(self, environ, start_response):
LOG.info(_("post: environ=%s"), environ)
inpt = environ['wsgi.input']
length = int(environ.get('CONTENT_LENGTH', 0))
x = inpt.read(length)
q = dict(cgi.parse_qsl(x))
try:
node_id = q['i']
deploy_key = q['k']
address = q['a']
port = q.get('p', '3260')
iqn = q['n']
lun = q.get('l', '1')
err_msg = q.get('e')
except KeyError as e:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return "parameter '%s' is not defined" % e
if err_msg:
LOG.error(_('Deploy agent error message: %s'), err_msg)
context = nova_context.get_admin_context()
d = db.bm_node_get(context, node_id)
if d['deploy_key'] != deploy_key:
start_response('400 Bad Request', [('Content-type', 'text/plain')])
return 'key is not match'
params = {'address': address,
'port': port,
'iqn': iqn,
'lun': lun,
'image_path': d['image_path'],
'pxe_config_path': d['pxe_config_path'],
'root_mb': int(d['root_mb']),
'swap_mb': int(d['swap_mb']),
'ephemeral_mb': int(d['ephemeral_mb']),
'preserve_ephemeral': d['preserve_ephemeral'],
}
# Restart worker, if needed
if not self.worker.isAlive():
self.worker = Worker()
self.worker.start()
LOG.info(_("request is queued: node %(node_id)s, params %(params)s"),
{'node_id': node_id, 'params': params})
QUEUE.put((node_id, params))
# Requests go to Worker.run()
start_response('200 OK', [('Content-type', 'text/plain')])
return ''
def main():
config.parse_args(sys.argv)
logging.setup("nova")
global LOG
LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
objects.register_all()
app = BareMetalDeploy()
srv = simple_server.make_server('', 10000, app)
srv.serve_forever()

View File

@ -1,208 +0,0 @@
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova bare-metal management.
"""
import os
import sys
from oslo.config import cfg
import six
from nova import config
from nova.i18n import _
from nova import objects
from nova.openstack.common import cliutils
from nova.openstack.common import log as logging
from nova import version
from nova.virt.baremetal.db import migration as bmdb_migration
CONF = cfg.CONF
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class BareMetalDbCommands(object):
"""Class for managing the bare-metal database."""
def __init__(self):
pass
@args('--version', dest='version', metavar='<version>',
help='Bare-metal Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
bmdb_migration.db_sync(version)
def version(self):
"""Print the current database version."""
v = bmdb_migration.db_version()
print(v)
# return for unittest
return v
CATEGORIES = {
'db': BareMetalDbCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore.
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*')
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
try:
config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run nova-manage as root.'))
return(2)
objects.register_all()
if CONF.category.name == "version":
print(version.version_string_with_package())
return(0)
if CONF.category.name == "bash-completion":
if not CONF.category.query_category:
print(" ".join(CATEGORIES.keys()))
elif CONF.category.query_category in CATEGORIES:
fn = CATEGORIES[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
return(0)
fn = CONF.category.action_fn
fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.string_types):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
cliutils.validate_args(fn, *fn_args, **fn_kwargs)
except cliutils.MissingArgs as e:
print(fn.__doc__)
print(e)
return(1)
try:
fn(*fn_args, **fn_kwargs)
return(0)
except Exception:
print(_("Command failed, please check log for more info"))
raise

View File

@ -20,10 +20,8 @@ from webob import exc
from nova.api.openstack.compute.contrib import baremetal_nodes
from nova.api.openstack import extensions
from nova import context
from nova import exception
from nova import test
from nova.tests.virt.ironic import utils as ironic_utils
from nova.virt.baremetal import db
CONF = cfg.CONF
@ -63,17 +61,6 @@ def fake_node_ext_status(**updates):
return node
def fake_interface(**updates):
interface = {
'id': 1,
'address': '11:11:11:11:11:11',
'datapath_id': 2,
'port_no': 3,
}
if updates:
interface.update(updates)
return interface
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
@ -89,302 +76,8 @@ class BareMetalNodesTest(test.NoDBTestCase):
self.controller = baremetal_nodes.BareMetalNodeController(self.ext_mgr)
self.request = FakeRequest(self.context)
def _test_create(self, node, ext_status=False):
response = node.copy()
del response['pm_password']
response['instance_uuid'] = None
self.mox.StubOutWithMock(db, 'bm_node_create')
db.bm_node_create(self.context, node).AndReturn(response)
self.ext_mgr.is_loaded('os-baremetal-ext-status').AndReturn(ext_status)
self.mox.ReplayAll()
res_dict = self.controller.create(self.request, {'node': node})
self.assertEqual({'node': response}, res_dict)
def _test_show(self, node, ext_status=False):
interfaces = [fake_interface(id=1, address='11:11:11:11:11:11'),
fake_interface(id=2, address='22:22:22:22:22:22'),
]
node.update(interfaces=interfaces)
response = node.copy()
del response['pm_password']
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get(self.context, node['id']).AndReturn(node)
db.bm_interface_get_all_by_bm_node_id(self.context, node['id']).\
AndReturn(interfaces)
self.ext_mgr.is_loaded('os-baremetal-ext-status').AndReturn(ext_status)
self.mox.ReplayAll()
res_dict = self.controller.show(self.request, node['id'])
self.assertEqual({'node': response}, res_dict)
self.assertEqual(2, len(res_dict['node']['interfaces']))
def _test_show_no_interfaces(self, ext_status=False):
node_id = 1
node = {'id': node_id}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get(self.context, node_id).AndReturn(node)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.ext_mgr.is_loaded('os-baremetal-ext-status').AndReturn(ext_status)
self.mox.ReplayAll()
res_dict = self.controller.show(self.request, node_id)
self.assertEqual(node_id, res_dict['node']['id'])
self.assertEqual(0, len(res_dict['node']['interfaces']))
def _test_index(self, ext_status=False):
nodes = [{'id': 1},
{'id': 2},
]
interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'},
{'id': 2, 'address': '22:22:22:22:22:22'},
]
self.mox.StubOutWithMock(db, 'bm_node_get_all')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
db.bm_node_get_all(self.context).AndReturn(nodes)
db.bm_interface_get_all_by_bm_node_id(self.context, 1).\
AndRaise(exception.NodeNotFound(node_id=1))
for n in nodes:
self.ext_mgr.is_loaded('os-baremetal-ext-status').\
AndReturn(ext_status)
db.bm_interface_get_all_by_bm_node_id(self.context, 2).\
AndReturn(interfaces)
self.mox.ReplayAll()
res_dict = self.controller.index(self.request)
self.assertEqual(2, len(res_dict['nodes']))
self.assertEqual([], res_dict['nodes'][0]['interfaces'])
self.assertEqual(2, len(res_dict['nodes'][1]['interfaces']))
def test_create(self):
node = fake_node(id=100)
self._test_create(node)
def test_create_ext_status(self):
node = fake_node_ext_status(id=100)
self._test_create(node, ext_status=True)
def test_create_with_prov_mac_address(self):
node = {
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
}
intf = {
'address': '1a:B2:3C:4d:e5:6f',
'datapath_id': None,
'id': None,
'port_no': None,
}
request = node.copy()
request['prov_mac_address'] = intf['address']
db_node = node.copy()
db_node['id'] = 100
response = node.copy()
response.update(id=db_node['id'],
instance_uuid=None,
interfaces=[intf])
del response['pm_password']
self.mox.StubOutWithMock(db, 'bm_node_create')
self.mox.StubOutWithMock(db, 'bm_interface_create')
self.mox.StubOutWithMock(db, 'bm_interface_get')
db.bm_node_create(self.context, node).AndReturn(db_node)
self.ext_mgr.is_loaded('os-baremetal-ext-status').AndReturn(False)
db.bm_interface_create(self.context,
bm_node_id=db_node['id'],
address=intf['address'],
datapath_id=intf['datapath_id'],
port_no=intf['port_no']).AndReturn(1000)
db.bm_interface_get(self.context, 1000).AndReturn(intf)
self.mox.ReplayAll()
res_dict = self.controller.create(self.request, {'node': request})
self.assertEqual({'node': response}, res_dict)
def test_create_with_invalid_prov_mac_address(self):
node = {
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'prov_mac_address': 'INVALID!!',
}
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': node})
def test_delete(self):
self.mox.StubOutWithMock(db, 'bm_node_destroy')
db.bm_node_destroy(self.context, 1)
self.mox.ReplayAll()
self.controller.delete(self.request, 1)
def test_delete_node_not_found(self):
self.mox.StubOutWithMock(db, 'bm_node_destroy')
db.bm_node_destroy(self.context, 1).\
AndRaise(exception.NodeNotFound(node_id=1))
self.mox.ReplayAll()
self.assertRaises(
exc.HTTPNotFound,
self.controller.delete,
self.request,
1)
def test_index(self):
self._test_index()
def test_index_ext_status(self):
self._test_index(ext_status=True)
def test_show(self):
node = fake_node(id=1)
self._test_show(node)
def test_show_ext_status(self):
node = fake_node_ext_status(id=1)
self._test_show(node, ext_status=True)
def test_show_no_interfaces(self):
self._test_show_no_interfaces()
def test_show_no_interfaces_ext_status(self):
self._test_show_no_interfaces(ext_status=True)
def test_add_interface(self):
node_id = 1
address = '11:22:33:ab:cd:ef'
body = {'add_interface': {'address': address}}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_create')
self.mox.StubOutWithMock(db, 'bm_interface_get')
db.bm_node_get(self.context, node_id)
db.bm_interface_create(self.context,
bm_node_id=node_id,
address=address,
datapath_id=None,
port_no=None).\
AndReturn(12345)
db.bm_interface_get(self.context, 12345).\
AndReturn({'id': 12345, 'address': address})
self.mox.ReplayAll()
res_dict = self.controller._add_interface(self.request, node_id, body)
self.assertEqual(12345, res_dict['interface']['id'])
self.assertEqual(address, res_dict['interface']['address'])
def test_add_interface_invalid_address(self):
node_id = 1
body = {'add_interface': {'address': ''}}
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id)
self.mox.ReplayAll()
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request,
node_id,
body)
def test_remove_interface(self):
node_id = 1
interfaces = [{'id': 1},
{'id': 2},
{'id': 3},
]
body = {'remove_interface': {'id': 2}}
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
self.mox.StubOutWithMock(db, 'bm_interface_destroy')
db.bm_node_get(self.context, node_id)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndReturn(interfaces)
db.bm_interface_destroy(self.context, 2)
self.mox.ReplayAll()
self.controller._remove_interface(self.request, node_id, body)
def test_remove_interface_by_address(self):
node_id = 1
interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'},
{'id': 2, 'address': '22:22:22:22:22:22'},
{'id': 3, 'address': '33:33:33:33:33:33'},
]
self.mox.StubOutWithMock(db, 'bm_node_get')
self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id')
self.mox.StubOutWithMock(db, 'bm_interface_destroy')
db.bm_node_get(self.context, node_id)
db.bm_interface_get_all_by_bm_node_id(self.context, node_id).\
AndReturn(interfaces)
db.bm_interface_destroy(self.context, 2)
self.mox.ReplayAll()
body = {'remove_interface': {'address': '22:22:22:22:22:22'}}
self.controller._remove_interface(self.request, node_id, body)
def test_remove_interface_no_id_no_address(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id)
self.mox.ReplayAll()
body = {'remove_interface': {}}
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request,
node_id,
body)
def test_add_interface_node_not_found(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'add_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
self.controller._add_interface,
self.request,
node_id,
body)
def test_remove_interface_node_not_found(self):
node_id = 1
self.mox.StubOutWithMock(db, 'bm_node_get')
db.bm_node_get(self.context, node_id).\
AndRaise(exception.NodeNotFound(node_id=node_id))
self.mox.ReplayAll()
body = {'remove_interface': {'address': '11:11:11:11:11:11'}}
self.assertRaises(exc.HTTPNotFound,
self.controller._remove_interface,
self.request,
node_id,
body)
def test_is_valid_mac(self):
self.assertFalse(baremetal_nodes.is_valid_mac(None))
self.assertTrue(baremetal_nodes.is_valid_mac("52:54:00:cf:2d:31"))
self.assertTrue(baremetal_nodes.is_valid_mac(u"52:54:00:cf:2d:31"))
self.assertFalse(baremetal_nodes.is_valid_mac("127.0.0.1"))
self.assertFalse(baremetal_nodes.is_valid_mac("not:a:mac:address"))
self.assertFalse(baremetal_nodes.is_valid_mac("52-54-00-cf-2d-31"))
self.assertFalse(baremetal_nodes.is_valid_mac("5254.00cf.2d31"))
self.assertFalse(baremetal_nodes.is_valid_mac("52:54:0:cf:2d:31"))
self.assertFalse(baremetal_nodes.is_valid_mac("aa bb cc dd ee ff"))
self.assertTrue(baremetal_nodes.is_valid_mac("AA:BB:CC:DD:EE:FF"))
self.assertFalse(baremetal_nodes.is_valid_mac("AA BB CC DD EE FF"))
self.assertFalse(baremetal_nodes.is_valid_mac("AA-BB-CC-DD-EE-FF"))
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
@ -404,8 +97,6 @@ class BareMetalNodesTest(test.NoDBTestCase):
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic(self, mock_get, mock_list_ports):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
@ -429,8 +120,6 @@ class BareMetalNodesTest(test.NoDBTestCase):
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
mock_get.return_value = node
@ -442,25 +131,21 @@ class BareMetalNodesTest(test.NoDBTestCase):
mock_list_ports.assert_called_once_with(node.uuid)
def test_create_ironic_not_supported(self):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': object()})
def test_delete_ironic_not_supported(self):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.request, 'fake-id')
def test_add_interface_ironic_not_supported(self):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request, 'fake-id', 'fake-body')
def test_remove_interface_ironic_not_supported(self):
CONF.set_override('compute_driver', 'nova.virt.ironic.driver')
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')

View File

@ -27,9 +27,8 @@ which allows testing against all 3 databases (sqlite in memory, mysql, pg) in
a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
and 'openstack_baremetal_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost. The test will then use that db and u/p combo
to run the tests.
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
@ -37,8 +36,6 @@ For postgres on Ubuntu this can be done with the following commands::
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
| postgres=# create database openstack_baremetal_citest with owner
| openstack_citest;
"""
@ -60,7 +57,6 @@ from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import test
from nova import utils
import nova.virt.baremetal.db.sqlalchemy.migrate_repo
LOG = logging.getLogger(__name__)
@ -111,8 +107,7 @@ def get_pgsql_connection_info(conn_pieces):
class CommonTestsMixIn(object):
"""These tests are shared between TestNovaMigrations and
TestBaremetalMigrations.
"""Base class for migration tests.
BaseMigrationTestCase is effectively an abstract class, meant to be derived
from and not directly tested against; that's why these `test_` methods need
@ -889,144 +884,6 @@ class TestNovaMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
['host']]))
class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
USER = "openstack_citest"
PASSWD = "openstack_citest"
DATABASE = "openstack_baremetal_citest"
def __init__(self, *args, **kwargs):
super(TestBaremetalMigrations, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'../virt/baremetal/test_baremetal_migrations.conf')
# Test machines can set the NOVA_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get(
'BAREMETAL_TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.MIGRATE_FILE = \
nova.virt.baremetal.db.sqlalchemy.migrate_repo.__file__
self.REPOSITORY = repository.Repository(
os.path.abspath(os.path.dirname(self.MIGRATE_FILE)))
def setUp(self):
super(TestBaremetalMigrations, self).setUp()
if self.migration is None:
self.migration = __import__('nova.virt.baremetal.db.migration',
globals(), locals(), ['db_initial_version'], -1)
self.INIT_VERSION = self.migration.db_initial_version()
if self.migration_api is None:
temp = __import__('nova.virt.baremetal.db.sqlalchemy.migration',
globals(), locals(), ['versioning_api'], -1)
self.migration_api = temp.versioning_api
def _pre_upgrade_002(self, engine):
data = [{'id': 1, 'key': 'fake-key', 'image_path': '/dev/null',
'pxe_config_path': '/dev/null/', 'root_mb': 0, 'swap_mb': 0}]
table = oslodbutils.get_table(engine, 'bm_deployments')
engine.execute(table.insert(), data)
return data
def _check_002(self, engine, data):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'bm_deployments')
def _post_downgrade_004(self, engine):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertNotIn(u'instance_name', [c.name for c in bm_nodes.columns])
def _check_005(self, engine, data):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
columns = [c.name for c in bm_nodes.columns]
self.assertNotIn(u'prov_vlan_id', columns)
self.assertNotIn(u'registration_status', columns)
def _pre_upgrade_006(self, engine):
nodes = oslodbutils.get_table(engine, 'bm_nodes')
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
# node 1 has two different addresses in bm_nodes and bm_interfaces
engine.execute(nodes.insert(),
[{'id': 1,
'prov_mac_address': 'aa:aa:aa:aa:aa:aa'}])
engine.execute(ifs.insert(),
[{'id': 101,
'bm_node_id': 1,
'address': 'bb:bb:bb:bb:bb:bb'}])
# node 2 has one same address both in bm_nodes and bm_interfaces
engine.execute(nodes.insert(),
[{'id': 2,
'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}])
engine.execute(ifs.insert(),
[{'id': 201,
'bm_node_id': 2,
'address': 'cc:cc:cc:cc:cc:cc'}])
def _check_006(self, engine, data):
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
rows = ifs.select().\
where(ifs.c.bm_node_id == 1).\
execute().\
fetchall()
self.assertEqual(len(rows), 2)
rows = ifs.select().\
where(ifs.c.bm_node_id == 2).\
execute().\
fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['address'], 'cc:cc:cc:cc:cc:cc')
def _post_downgrade_006(self, engine):
ifs = oslodbutils.get_table(engine, 'bm_interfaces')
rows = ifs.select().where(ifs.c.bm_node_id == 1).execute().fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['address'], 'bb:bb:bb:bb:bb:bb')
rows = ifs.select().where(ifs.c.bm_node_id == 2).execute().fetchall()
self.assertEqual(len(rows), 0)
def _check_007(self, engine, data):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
columns = [c.name for c in bm_nodes.columns]
self.assertNotIn(u'prov_mac_address', columns)
def _check_008(self, engine, data):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'bm_pxe_ips')
def _post_downgrade_008(self, engine):
oslodbutils.get_table(engine, 'bm_pxe_ips')
def _pre_upgrade_010(self, engine):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
data = [{'id': 10, 'prov_mac_address': 'cc:cc:cc:cc:cc:cc'}]
engine.execute(bm_nodes.insert(), data)
return data
def _check_010(self, engine, data):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertIn('preserve_ephemeral', bm_nodes.columns)
default = engine.execute(
sqlalchemy.select([bm_nodes.c.preserve_ephemeral])
.where(bm_nodes.c.id == data[0]['id'])
).scalar()
self.assertEqual(default, False)
bm_nodes.delete().where(bm_nodes.c.id == data[0]['id']).execute()
def _post_downgrade_010(self, engine):
bm_nodes = oslodbutils.get_table(engine, 'bm_nodes')
self.assertNotIn('preserve_ephemeral', bm_nodes.columns)
def _skippable_migrations(self):
# NOTE(danms): This is deprecated code, soon to be removed, so don't
# obsess about tests here.
return range(1, 100)
class ProjectTestCase(test.NoDBTestCase):
def test_all_migrations_have_downgrade(self):

View File

@ -1,5 +0,0 @@
{
"add_interface": {
"address": "%(address)s"
}
}

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<add_interface
address="%(address)s"
/>

View File

@ -1,8 +0,0 @@
{
"interface": {
"id": %(interface_id)s,
"address": "aa:aa:aa:aa:aa:aa",
"datapath_id": null,
"port_no": null
}
}

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<interface
id="%(interface_id)s"
address="aa:aa:aa:aa:aa:aa"
datapath_id="None"
port_no="None"
/>

View File

@ -1,12 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pm_password": "pm_pass",
"terminal_port": 8000
}
}

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
/>

View File

@ -1,18 +0,0 @@
{
"node": {
"cpus": 8,
"id": %(node_id)s,
"instance_uuid": null,
"interfaces": [],
"local_gb": 128,
"memory_mb": 8192,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pxe_config_path": null,
"service_host": "host",
"task_state": null,
"terminal_port": 8000,
"updated_at": null,
"uuid": "%(node_uuid)s"
}
}

View File

@ -1,4 +0,0 @@
<?xml version='1.0' encoding='UTF-8'?>
<node instance_uuid="None" pm_address="10.1.2.3" task_state="None" uuid="%(node_uuid)s" pxe_config_path="None" cpus="8" updated_at="None" memory_mb="8192" service_host="host" local_gb="128" id="%(node_id)s" pm_user="pm_user" terminal_port="8000">
<interfaces/>
</node>

View File

@ -1,13 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pm_password": "pm_pass",
"prov_mac_address": "%(address)s",
"terminal_port": 8000
}
}

View File

@ -1,11 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
prov_mac_address="%(address)s"
terminal_port="8000"
/>

View File

@ -1,25 +0,0 @@
{
"node": {
"cpus": 8,
"id": %(node_id)s,
"instance_uuid": null,
"interfaces": [
{
"address": "%(address)s",
"datapath_id": null,
"id": %(interface_id)s,
"port_no": null
}
],
"local_gb": 128,
"memory_mb": 8192,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pxe_config_path": null,
"service_host": "host",
"task_state": null,
"terminal_port": 8000,
"updated_at": null,
"uuid": "%(node_uuid)s"
}
}

View File

@ -1,6 +0,0 @@
<?xml version='1.0' encoding='UTF-8'?>
<node instance_uuid="None" pm_address="10.1.2.3" task_state="None" uuid="%(node_uuid)s" pxe_config_path="None" cpus="8" updated_at="None" memory_mb="8192" service_host="host" local_gb="128" id="%(node_id)s" pm_user="pm_user" terminal_port="8000">
<interfaces>
<interface datapath_id="None" id="%(interface_id)s" port_no="None" address="%(address)s"/>
</interfaces>
</node>

View File

@ -1,27 +0,0 @@
{
"nodes": [
{
"cpus": 8,
"id": %(node_id)s,
"instance_uuid": null,
"interfaces": [
{
"address": "%(address)s",
"datapath_id": null,
"id": %(interface_id)s,
"port_no": null
}
],
"local_gb": 128,
"memory_mb": 8192,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pxe_config_path": null,
"service_host": "host",
"task_state": null,
"terminal_port": 8000,
"updated_at": null,
"uuid": "%(node_uuid)s"
}
]
}

View File

@ -1,8 +0,0 @@
<?xml version='1.0' encoding='UTF-8'?>
<nodes>
<node instance_uuid="None" pm_address="10.1.2.3" task_state="None" uuid="%(node_uuid)s" pxe_config_path="None" cpus="8" updated_at="None" memory_mb="8192" service_host="host" local_gb="128" id="%(node_id)s" pm_user="pm_user" terminal_port="8000">
<interfaces>
<interface datapath_id="None" id="%(interface_id)s" port_no="None" address="%(address)s"/>
</interfaces>
</node>
</nodes>

View File

@ -1,5 +0,0 @@
{
"remove_interface": {
"address": "%(address)s"
}
}

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<remove_interface
address="%(address)s"
/>

View File

@ -1,25 +0,0 @@
{
"node": {
"cpus": 8,
"id": %(node_id)s,
"instance_uuid": null,
"interfaces": [
{
"address": "%(address)s",
"datapath_id": null,
"id": %(interface_id)s,
"port_no": null
}
],
"local_gb": 128,
"memory_mb": 8192,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pxe_config_path": null,
"service_host": "host",
"task_state": null,
"terminal_port": 8000,
"updated_at": null,
"uuid": "%(node_uuid)s"
}
}

View File

@ -1,6 +0,0 @@
<?xml version='1.0' encoding='UTF-8'?>
<node instance_uuid="None" pm_address="10.1.2.3" task_state="None" uuid="%(node_uuid)s" pxe_config_path="None" cpus="8" updated_at="None" memory_mb="8192" service_host="host" local_gb="128" id="%(node_id)s" pm_user="pm_user" terminal_port="8000">
<interfaces>
<interface datapath_id="None" id="%(interface_id)s" port_no="None" address="%(address)s"/>
</interfaces>
</node>

View File

@ -1,5 +0,0 @@
{
"add_interface": {
"address": "%(address)s"
}
}

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<add_interface
address="%(address)s"
/>

View File

@ -1,8 +0,0 @@
{
"interface": {
"id": %(interface_id)s,
"address": "aa:aa:aa:aa:aa:aa",
"datapath_id": null,
"port_no": null
}
}

View File

@ -1,7 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<interface
id="%(interface_id)s"
address="aa:aa:aa:aa:aa:aa"
datapath_id="None"
port_no="None"
/>

View File

@ -1,12 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pm_password": "pm_pass",
"terminal_port": 8000
}
}

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
/>

View File

@ -1,14 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"terminal_port": 8000,
"instance_uuid": null,
"id": %(node_id)s,
"interfaces": []
}
}

View File

@ -1,13 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
instance_uuid="None"
id="%(node_id)s">
<interfaces/>
</node>

View File

@ -1,13 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"pm_password": "pm_pass",
"prov_mac_address": "%(address)s",
"terminal_port": 8000
}
}

View File

@ -1,11 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
prov_mac_address="%(address)s"
terminal_port="8000"
/>

View File

@ -1,19 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"terminal_port": 8000,
"instance_uuid": null,
"id": %(node_id)s,
"interfaces": [{
"id": %(interface_id)s,
"address": "%(address)s",
"datapath_id": null,
"port_no": null
}]
}
}

View File

@ -1,19 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
instance_uuid="None"
id="%(node_id)s">
<interfaces>
<interface
id="%(interface_id)s"
address="%(address)s"
datapath_id="None"
port_no="None"/>
</interfaces>
</node>

View File

@ -1,19 +0,0 @@
{
"nodes": [{
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"terminal_port": 8000,
"instance_uuid": null,
"id": %(node_id)s,
"interfaces": [{
"id": %(interface_id)s,
"address": "%(address)s",
"datapath_id": null,
"port_no": null
}]
}]
}

View File

@ -1,17 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<nodes>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
instance_uuid="None"
id="%(node_id)s">
<interfaces>
<interface id="%(interface_id)s" address="%(address)s" datapath_id="None" port_no="None"/>
</interfaces>
</node>
</nodes>

View File

@ -1,5 +0,0 @@
{
"remove_interface": {
"address": "%(address)s"
}
}

View File

@ -1,4 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<remove_interface
address="%(address)s"
/>

View File

@ -1,19 +0,0 @@
{
"node": {
"service_host": "host",
"cpus": 8,
"memory_mb": 8192,
"local_gb": 128,
"pm_address": "10.1.2.3",
"pm_user": "pm_user",
"terminal_port": 8000,
"instance_uuid": null,
"id": %(node_id)s,
"interfaces": [{
"id": %(interface_id)s,
"address": "%(address)s",
"datapath_id": null,
"port_no": null
}]
}
}

View File

@ -1,15 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<node
service_host="host"
cpus="8"
memory_mb="8192"
local_gb="128"
pm_address="10.1.2.3"
pm_user="pm_user"
terminal_port="8000"
instance_uuid="None"
id="%(node_id)s">
<interfaces>
<interface id="%(interface_id)s" address="%(address)s" datapath_id="None" port_no="None"/>
</interfaces>
</node>

View File

@ -66,7 +66,6 @@ from nova.tests.integrated import api_samples_test_base
from nova.tests.integrated import integrated_helpers
from nova.tests.objects import test_network
from nova.tests import utils as test_utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova import utils
from nova.volume import cinder
@ -138,6 +137,8 @@ class ApiSamplesTrap(ApiSampleTestBaseV2):
# removed) soon.
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-baremetal-ext-status')
do_not_approve_additions.append('os-baremetal-nodes')
tests = self._get_extensions_tested()
extensions = self._get_extensions()
@ -2952,120 +2953,6 @@ class CellsCapacitySampleXmlTest(CellsCapacitySampleJsonTest):
ctype = 'xml'
class BareMetalNodesJsonTest(ApiSampleTestBaseV2, bm_db_base.BMDBTestCase):
extension_name = ('nova.api.openstack.compute.contrib.baremetal_nodes.'
'Baremetal_nodes')
def _get_subs(self):
subs = {}
return subs
def _create_node(self):
response = self._do_post("os-baremetal-nodes",
"baremetal-node-create-req",
{})
subs = self._get_subs()
subs.update({'node_id': '(?P<id>\d+)'})
return self._verify_response("baremetal-node-create-resp", subs,
response, 200)
def _create_node_with_address(self):
address = '12:34:56:78:90:ab'
req_subs = {'address': address}
response = self._do_post("os-baremetal-nodes",
"baremetal-node-create-with-address-req",
req_subs)
subs = self._get_subs()
subs.update({'node_id': '(?P<id>\d+)',
'interface_id': '\d+',
'address': address,
})
self._verify_response("baremetal-node-create-with-address-resp",
subs, response, 200)
def test_create_node(self):
self._create_node()
def test_create_node_with_address(self):
self._create_node_with_address()
def test_list_nodes(self):
node_id = self._create_node()
interface_id = self._add_interface(node_id)
response = self._do_get('os-baremetal-nodes')
subs = self._get_subs()
subs.update({'node_id': node_id,
'interface_id': interface_id,
'address': 'aa:aa:aa:aa:aa:aa',
})
self._verify_response('baremetal-node-list-resp', subs,
response, 200)
def test_show_node(self):
node_id = self._create_node()
interface_id = self._add_interface(node_id)
response = self._do_get('os-baremetal-nodes/%s' % node_id)
subs = self._get_subs()
subs.update({'node_id': node_id,
'interface_id': interface_id,
'address': 'aa:aa:aa:aa:aa:aa',
})
self._verify_response('baremetal-node-show-resp', subs, response, 200)
def test_delete_node(self):
node_id = self._create_node()
response = self._do_delete("os-baremetal-nodes/%s" % node_id)
self.assertEqual(response.status_code, 202)
def _add_interface(self, node_id):
response = self._do_post("os-baremetal-nodes/%s/action" % node_id,
"baremetal-node-add-interface-req",
{'address': 'aa:aa:aa:aa:aa:aa'})
subs = {'interface_id': r'(?P<id>\d+)'}
return self._verify_response("baremetal-node-add-interface-resp", subs,
response, 200)
def test_add_interface(self):
node_id = self._create_node()
self._add_interface(node_id)
def test_remove_interface(self):
node_id = self._create_node()
self._add_interface(node_id)
response = self._do_post("os-baremetal-nodes/%s/action" % node_id,
"baremetal-node-remove-interface-req",
{'address': 'aa:aa:aa:aa:aa:aa'})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
class BareMetalNodesXmlTest(BareMetalNodesJsonTest):
ctype = 'xml'
class BareMetalExtStatusJsonTest(BareMetalNodesJsonTest):
extension_name = ('nova.api.openstack.compute.contrib.'
'baremetal_ext_status.Baremetal_ext_status')
def _get_flags(self):
f = super(BareMetalExtStatusJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# BareMetalExtStatus extension also needs BareMetalNodes to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.baremetal_nodes.'
'Baremetal_nodes')
return f
def _get_subs(self):
vanilla_regexes = self._get_regexes()
subs = {'node_uuid': vanilla_regexes['uuid']}
return subs
class BareMetalExtStatusXmlTest(BareMetalExtStatusJsonTest):
ctype = 'xml'
class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.'
'block_device_mapping_v2_boot.'

View File

@ -1,51 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bare-metal DB test base class."""
from oslo.config import cfg
from nova import context as nova_context
from nova import test
from nova.virt.baremetal.db import migration as bm_migration
from nova.virt.baremetal.db.sqlalchemy import session as bm_session
_DB_CACHE = None
CONF = cfg.CONF
CONF.import_opt('sql_connection',
'nova.virt.baremetal.db.sqlalchemy.session',
group='baremetal')
class Database(test.Database):
def post_migrations(self):
pass
class BMDBTestCase(test.TestCase):
def setUp(self):
super(BMDBTestCase, self).setUp()
self.flags(sql_connection='sqlite://', group='baremetal')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(bm_session, bm_migration,
sql_connection=CONF.baremetal.sql_connection,
sqlite_db=None,
sqlite_clean_db=None)
self.useFixture(_DB_CACHE)
self.context = nova_context.get_admin_context()

View File

@ -1,56 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-metal DB testcase for BareMetalInterface
"""
from oslo.db import exception as db_exc
from nova import exception
from nova.tests.virt.baremetal.db import base
from nova.virt.baremetal import db
class BareMetalInterfaceTestCase(base.BMDBTestCase):
def test_unique_address(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(db_exc.DBError,
db.bm_interface_create,
self.context, 2, '11:11:11:11:11:11', '0x2', 2)
# succeed after delete pif1
db.bm_interface_destroy(self.context, pif1_id)
pif2_id = db.bm_interface_create(self.context, 2, '11:11:11:11:11:11',
'0x2', 2)
self.assertIsNotNone(pif2_id)
def test_unique_vif_uuid(self):
pif1_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
pif2_id = db.bm_interface_create(self.context, 2, '22:22:22:22:22:22',
'0x2', 2)
db.bm_interface_set_vif_uuid(self.context, pif1_id, 'AAAA')
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif2_id, 'AAAA')
def test_vif_not_found(self):
pif_id = db.bm_interface_create(self.context, 1, '11:11:11:11:11:11',
'0x1', 1)
self.assertRaises(exception.NovaException,
db.bm_interface_set_vif_uuid,
self.context, pif_id + 1, 'AAAA')

View File

@ -1,191 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Bare-Metal DB testcase for BareMetalNode
"""
from nova import exception
from nova.tests.virt.baremetal.db import base
from nova.tests.virt.baremetal.db import utils
from nova.virt.baremetal import db
class BareMetalNodesTestCase(base.BMDBTestCase):
def _create_nodes(self):
nodes = [
utils.new_bm_node(pm_address='0', service_host="host1",
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='1', service_host="host2",
instance_uuid='A',
memory_mb=100000, cpus=100, local_gb=10000),
utils.new_bm_node(pm_address='2', service_host="host2",
memory_mb=1000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='3', service_host="host2",
memory_mb=1000, cpus=2, local_gb=1000),
utils.new_bm_node(pm_address='4', service_host="host2",
memory_mb=2000, cpus=1, local_gb=1000),
utils.new_bm_node(pm_address='5', service_host="host2",
memory_mb=2000, cpus=2, local_gb=1000),
]
self.ids = []
for n in nodes:
ref = db.bm_node_create(self.context, n)
self.ids.append(ref['id'])
def test_get_all(self):
r = db.bm_node_get_all(self.context)
self.assertEqual(r, [])
self._create_nodes()
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 6)
def test_get(self):
self._create_nodes()
r = db.bm_node_get(self.context, self.ids[0])
self.assertEqual(r['pm_address'], '0')
r = db.bm_node_get(self.context, self.ids[1])
self.assertEqual(r['pm_address'], '1')
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, -1)
def test_get_by_service_host(self):
self._create_nodes()
r = db.bm_node_get_all(self.context, service_host=None)
self.assertEqual(len(r), 6)
r = db.bm_node_get_all(self.context, service_host="host1")
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '0')
r = db.bm_node_get_all(self.context, service_host="host2")
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('1', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
r = db.bm_node_get_all(self.context, service_host="host3")
self.assertEqual(r, [])
def test_get_associated(self):
self._create_nodes()
r = db.bm_node_get_associated(self.context, service_host=None)
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['pm_address'], '1')
r = db.bm_node_get_unassociated(self.context, service_host=None)
self.assertEqual(len(r), 5)
pmaddrs = [x['pm_address'] for x in r]
self.assertIn('0', pmaddrs)
self.assertIn('2', pmaddrs)
self.assertIn('3', pmaddrs)
self.assertIn('4', pmaddrs)
self.assertIn('5', pmaddrs)
def test_destroy(self):
self._create_nodes()
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_destroy_with_interfaces(self):
self._create_nodes()
if_a_id = db.bm_interface_create(self.context, self.ids[0],
'aa:aa:aa:aa:aa:aa', None, None)
if_b_id = db.bm_interface_create(self.context, self.ids[0],
'bb:bb:bb:bb:bb:bb', None, None)
if_x_id = db.bm_interface_create(self.context, self.ids[1],
'11:22:33:44:55:66', None, None)
db.bm_node_destroy(self.context, self.ids[0])
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_a_id)
self.assertRaises(
exception.NovaException,
db.bm_interface_get,
self.context, if_b_id)
# Another node's interface is not affected
if_x = db.bm_interface_get(self.context, if_x_id)
self.assertEqual(self.ids[1], if_x['bm_node_id'])
self.assertRaises(
exception.NodeNotFound,
db.bm_node_get,
self.context, self.ids[0])
r = db.bm_node_get_all(self.context)
self.assertEqual(len(r), 5)
def test_find_free(self):
self._create_nodes()
fn = db.bm_node_find_free(self.context, 'host2')
self.assertEqual(fn['pm_address'], '2')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=500, cpus=2, local_gb=100)
self.assertEqual(fn['pm_address'], '3')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=1001, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=1, local_gb=1000)
self.assertEqual(fn['pm_address'], '4')
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1000)
self.assertEqual(fn['pm_address'], '5')
# check memory_mb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2001, cpus=2, local_gb=1000)
self.assertIsNone(fn)
# check cpus
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=3, local_gb=1000)
self.assertIsNone(fn)
# check local_gb
fn = db.bm_node_find_free(self.context, 'host2',
memory_mb=2000, cpus=2, local_gb=1001)
self.assertIsNone(fn)

View File

@ -1,53 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bare-metal test utils."""
from nova import test
from nova.virt.baremetal.db.sqlalchemy import models as bm_models
def new_bm_node(**kwargs):
h = bm_models.BareMetalNode()
h.id = kwargs.pop('id', None)
h.uuid = kwargs.pop('uuid', None)
h.service_host = kwargs.pop('service_host', None)
h.instance_uuid = kwargs.pop('instance_uuid', None)
h.cpus = kwargs.pop('cpus', 1)
h.memory_mb = kwargs.pop('memory_mb', 1024)
h.local_gb = kwargs.pop('local_gb', 64)
h.pm_address = kwargs.pop('pm_address', '192.168.1.1')
h.pm_user = kwargs.pop('pm_user', 'ipmi_user')
h.pm_password = kwargs.pop('pm_password', 'ipmi_password')
h.task_state = kwargs.pop('task_state', None)
h.terminal_port = kwargs.pop('terminal_port', 8000)
if len(kwargs) > 0:
raise test.TestingException("unknown field: %s"
% ','.join(kwargs.keys()))
return h
def new_bm_interface(**kwargs):
x = bm_models.BareMetalInterface()
x.id = kwargs.pop('id', None)
x.bm_node_id = kwargs.pop('bm_node_id', None)
x.address = kwargs.pop('address', None)
x.datapath_id = kwargs.pop('datapath_id', None)
x.port_no = kwargs.pop('port_no', None)
x.vif_uuid = kwargs.pop('vif_uuid', None)
if len(kwargs) > 0:
raise test.TestingException("unknown field: %s"
% ','.join(kwargs.keys()))
return x

View File

@ -1,24 +0,0 @@
[unit_tests]
# Set up any number of databases to test concurrently.
# The "name" used in the test is the config variable key.
sqlite=sqlite://
#sqlitefile=sqlite:///test_baremetal_migrations_utils.db
#mysql=mysql+mysqldb://user:pass@localhost/test_baremetal_migrations_utils
#postgresql=postgresql+psycopg2://user:pass@localhost/test_migrations_utils
[migration_dbs]
# Migration DB details are listed separately as they can't be connected to
# concurrently. These databases can't be the same as above
# Note, sqlite:// is in-memory and unique each time it is spawned.
# However file sqlite's are not unique.
sqlite=sqlite://
#sqlitefile=sqlite:///test_baremetal_migrations.db
#mysql=mysql+mysqldb://user:pass@localhost/test_baremetal_migrations
#postgresql=postgresql+psycopg2://user:pass@localhost/test_baremetal_migrations
[walk_style]
snake_walk=yes
downgrade=yes

View File

@ -1,541 +0,0 @@
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
import mock
import mox
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova import db as main_db
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
flavor_extra_specs=['cpu_arch:x86_64', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver.driver, fake.FakeDriver)
self.assertIsInstance(self.driver.vif_driver, fake.FakeVifDriver)
self.assertIsInstance(self.driver.volume_driver, fake.FakeVolumeDriver)
self.assertIsInstance(self.driver.firewall_driver,
fake.FakeFirewallDriver)
def test_driver_capabilities(self):
self.assertTrue(self.driver.capabilities['has_imagecache'])
self.assertFalse(self.driver.capabilities['supports_recreate'])
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(fake_virt.FakeVirtAPI())
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None, ephemeral=True):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
if ephemeral:
result['instance'] = utils.get_test_instance()
else:
flavor = utils.get_test_flavor(options={'ephemeral_gb': 0})
result['instance'] = utils.get_test_instance(flavor=flavor)
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
context=self.context,
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), result['instance'])
instance.node = result['node']['uuid']
result['rebuild_params'] = dict(
context=self.context,
instance=instance,
image_meta=utils.get_test_image_info(None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
admin_password='test_pass',
bdms={},
detach_block_devices=self.mox.CreateMockAnything(),
attach_block_devices=self.mox.CreateMockAnything(),
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertEqual(instance['default_ephemeral_device'], '/dev/sda1')
def test_set_default_ephemeral_device(self):
instance = objects.Instance(context=self.context)
instance.system_metadata = flavors.save_flavor_info(
{}, flavors.get_default_flavor())
instance.system_metadata['instance_type_ephemeral_gb'] = 1
with mock.patch.object(instance, 'save') as mock_save:
self.driver._set_default_ephemeral_device(instance)
mock_save.assert_called_once_with()
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
def test_spawn_no_ephemeral_ok(self):
node = self._create_node(ephemeral=False)
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertIsNone(instance['default_ephemeral_device'])
def _test_rebuild(self, ephemeral):
node = self._create_node(ephemeral=ephemeral)
self.driver.spawn(**node['spawn_params'])
after_spawn = db.bm_node_get(self.context, node['node']['id'])
instance = node['rebuild_params']['instance']
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self.driver.rebuild(preserve_ephemeral=ephemeral,
**node['rebuild_params'])
after_rebuild = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(after_rebuild['task_state'], baremetal_states.ACTIVE)
self.assertEqual(after_rebuild['preserve_ephemeral'], ephemeral)
self.assertEqual(after_spawn['instance_uuid'],
after_rebuild['instance_uuid'])
def test_rebuild_ok(self):
self._test_rebuild(ephemeral=False)
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(ephemeral=True)
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertIsNone(row['task_state'])
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_prepared(self):
node = self._create_node()
def update_2prepared(context, node, instance, state):
row = db.bm_node_get(context, node['id'])
self.assertEqual(row['task_state'], baremetal_states.BUILDING)
db.bm_node_update(
context, node['id'],
{'task_state': baremetal_states.PREPARED})
self.mox.StubOutWithMock(fake.FakeDriver, 'activate_node')
self.mox.StubOutWithMock(bm_driver, '_update_state')
bm_driver._update_state(
self.context,
mox.IsA(node['node']),
node['instance'],
baremetal_states.PREPARED).WithSideEffects(update_2prepared)
fake.FakeDriver.activate_node(
self.context,
mox.IsA(node['node']),
node['instance']).AndRaise(test.TestingException)
bm_driver._update_state(
self.context,
mox.IsA(node['node']),
node['instance'],
baremetal_states.ERROR).AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.PREPARED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_spawn_destroy_images_on_deploy(self):
node = self._create_node()
self.driver.driver.destroy_images = mock.MagicMock()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
instance = main_db.instance_get_by_uuid(self.context,
node['instance']['uuid'])
self.assertIsNotNone(instance)
self.assertEqual(1, self.driver.driver.destroy_images.call_count)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertIsNone(row['instance_uuid'])
self.assertIsNone(row['instance_name'])
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['supported_instances'],
'[["x86_64", "baremetal", "hvm"]]')
self.assertEqual(resources['stats'],
'{"cpu_arch": "x86_64", "baremetal_driver": '
'"nova.virt.baremetal.fake.FakeDriver", '
'"test_spec": "test_value"}')
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
stats = jsonutils.loads(resources['stats'])
self.assertEqual(stats['test_spec'], 'test_value')
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
self.assertEqual(0, len(self.driver.get_available_nodes(refresh=True)))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
def test_attach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'attach_volume')
self.driver.volume_driver.attach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.attach_volume(None, connection_info, instance, mountpoint)
def test_detach_volume(self):
connection_info = {'_fake_connection_info': None}
instance = utils.get_test_instance()
mountpoint = '/dev/sdd'
self.mox.StubOutWithMock(self.driver.volume_driver, 'detach_volume')
self.driver.volume_driver.detach_volume(connection_info,
instance,
mountpoint)
self.mox.ReplayAll()
self.driver.detach_volume(connection_info, instance, mountpoint)
def test_attach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'attach_volume')
self.driver.attach_volume(None, connection_info_1, instance,
'/dev/sde')
self.driver.attach_volume(None, connection_info_2, instance,
'/dev/sdf')
self.mox.ReplayAll()
self.driver._attach_block_devices(instance, block_device_info)
def test_detach_block_devices(self):
connection_info_1 = {'_fake_connection_info_1': None}
connection_info_2 = {'_fake_connection_info_2': None}
block_device_mapping = [{'connection_info': connection_info_1,
'mount_device': '/dev/sde'},
{'connection_info': connection_info_2,
'mount_device': '/dev/sdf'}]
block_device_info = {'block_device_mapping': block_device_mapping}
instance = utils.get_test_instance()
self.mox.StubOutWithMock(self.driver, 'detach_volume')
self.driver.detach_volume(connection_info_1, instance, '/dev/sde')
self.driver.detach_volume(connection_info_2, instance, '/dev/sdf')
self.mox.ReplayAll()
self.driver._detach_block_devices(instance, block_device_info)

View File

@ -1,120 +0,0 @@
# Copyright 2013 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for baremetal iBoot power manager."""
from nova import exception
from nova import test
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import iboot_pdu
class BareMetalIbootPDUTestCase(test.TestCase):
def setUp(self):
super(BareMetalIbootPDUTestCase, self).setUp()
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='192.168.1.254',
pm_user='foo',
pm_password='bar')
self.pm = iboot_pdu.IBootManager(node=self.node)
def test_construct(self):
self.assertEqual(self.pm.address, '192.168.1.254')
self.assertEqual(self.pm.port, 9100)
self.assertEqual(self.pm.relay_id, 1)
self.assertEqual(self.pm.user, 'foo')
self.assertEqual(self.pm.password, 'bar')
def test_construct_with_port_and_relay(self):
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='192.168.1.254:1234,8',
pm_user='foo',
pm_password='bar')
self.pm = iboot_pdu.IBootManager(node=self.node)
self.assertEqual(self.pm.address, '192.168.1.254')
self.assertEqual(self.pm.port, 1234)
self.assertEqual(self.pm.relay_id, 8)
self.assertEqual(self.pm.user, 'foo')
self.assertEqual(self.pm.password, 'bar')
def test_construct_with_invalid_port(self):
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='192.168.1.254:not_a_number',
pm_user='foo',
pm_password='bar')
self.assertRaises(exception.InvalidParameterValue,
iboot_pdu.IBootManager, node=self.node)
def test_construct_with_relay_id(self):
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='192.168.1.254:1234,not_a_number',
pm_user='foo',
pm_password='bar')
self.assertRaises(exception.InvalidParameterValue,
iboot_pdu.IBootManager, node=self.node)
def test_activate_node(self):
self.mox.StubOutWithMock(self.pm, '_create_connection')
self.mox.StubOutWithMock(self.pm, '_switch')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._create_connection().AndReturn(True)
self.pm._switch(1, True).AndReturn(True)
self.pm.is_power_on().AndReturn(True)
self.mox.ReplayAll()
self.pm.activate_node()
self.mox.VerifyAll()
def test_deactivate_node(self):
self.mox.StubOutWithMock(self.pm, '_create_connection')
self.mox.StubOutWithMock(self.pm, '_switch')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._create_connection().AndReturn(True)
self.pm.is_power_on().AndReturn(True)
self.pm._switch(1, False).AndReturn(True)
self.pm.is_power_on().AndReturn(False)
self.mox.ReplayAll()
self.pm.deactivate_node()
self.mox.VerifyAll()
def test_reboot_node(self):
self.mox.StubOutWithMock(self.pm, '_create_connection')
self.mox.StubOutWithMock(self.pm, '_switch')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._create_connection().AndReturn(True)
self.pm._switch(1, False).AndReturn(True)
self.pm._switch(1, True).AndReturn(True)
self.pm.is_power_on().AndReturn(True)
self.mox.ReplayAll()
self.pm.reboot_node()
self.mox.VerifyAll()
def test_is_power_on(self):
self.mox.StubOutWithMock(self.pm, '_create_connection')
self.mox.StubOutWithMock(self.pm, '_get_relay')
self.pm._create_connection().AndReturn(True)
self.pm._get_relay(1).AndReturn(True)
self.mox.ReplayAll()
self.pm.is_power_on()
self.mox.VerifyAll()

View File

@ -1,242 +0,0 @@
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for baremetal IPMI power manager."""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import test
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import ipmi
from nova.virt.baremetal import utils as bm_utils
CONF = cfg.CONF
class BareMetalIPMITestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalIPMITestCase, self).setUp()
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='fake-address',
pm_user='fake-user',
pm_password='fake-password')
self.ipmi = ipmi.IPMI(self.node)
def test_construct(self):
self.assertEqual(self.ipmi.node_id, 123)
self.assertEqual(self.ipmi.address, 'fake-address')
self.assertEqual(self.ipmi.user, 'fake-user')
self.assertEqual(self.ipmi.password, 'fake-password')
def test_make_password_file(self):
pw_file = ipmi._make_password_file(self.node['pm_password'])
try:
self.assertTrue(os.path.isfile(pw_file))
self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0o777, 0o600)
with open(pw_file, "r") as f:
pm_password = f.read()
self.assertEqual(pm_password, self.node['pm_password'])
finally:
os.unlink(pw_file)
def test_make_empty_password_file(self):
pw_file = ipmi._make_password_file('')
try:
self.assertTrue(os.path.isfile(pw_file))
self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0o777, 0o600)
with open(pw_file, "rb") as f:
pm_password = f.read()
self.assertEqual(b"\0", pm_password)
finally:
os.unlink(pw_file)
def test_exec_ipmitool(self):
pw_file = '/tmp/password_file'
self.mox.StubOutWithMock(ipmi, '_make_password_file')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
ipmi._make_password_file(self.ipmi.password).AndReturn(pw_file)
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.ipmi.address,
'-U', self.ipmi.user,
'-f', pw_file,
'A', 'B', 'C',
]
utils.execute(*args, attempts=3).AndReturn(('', ''))
bm_utils.unlink_without_raise(pw_file).AndReturn(None)
self.mox.ReplayAll()
self.ipmi._exec_ipmitool('A B C')
self.mox.VerifyAll()
def test_is_power_on_ok(self):
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
res = self.ipmi.is_power_on()
self.assertEqual(res, True)
self.mox.VerifyAll()
def test_is_power_no_answer(self):
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Fake reply\n"])
self.mox.ReplayAll()
res = self.ipmi.is_power_on()
self.assertIsNone(res)
self.mox.VerifyAll()
def test_power_already_on(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
def test_power_on_ok(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
def test_power_on_fail(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
def test_power_on_max_retries(self):
self.flags(ipmi_power_retry=2, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
self.assertEqual(self.ipmi.retries, 3)
def test_power_off_ok(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.ipmi._exec_ipmitool("power off").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.ACTIVE
self.ipmi._power_off()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.DELETED)
def test_get_console_pid_path(self):
self.flags(terminal_pid_dir='/tmp', group='baremetal')
path = ipmi._get_console_pid_path(self.ipmi.node_id)
self.assertEqual(path, '/tmp/%s.pid' % self.ipmi.node_id)
def test_console_pid(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write("12345\n")
self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
bm_utils.unlink_without_raise(path)
self.mox.VerifyAll()
self.assertEqual(pid, 12345)
def test_console_pid_nan(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write("hello world\n")
self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
bm_utils.unlink_without_raise(path)
self.mox.VerifyAll()
self.assertIsNone(pid)
def test_console_pid_file_not_found(self):
pid_path = ipmi._get_console_pid_path(self.ipmi.node_id)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(pid_path).AndReturn(False)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
self.mox.VerifyAll()
self.assertIsNone(pid)

View File

@ -1,420 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import time
import mock
import mox
from oslo.utils import units
from nova.cmd import baremetal_deploy_helper as bmdh
from nova.openstack.common import log as logging
from nova import test
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.virt.baremetal import db as bm_db
bmdh.LOG = logging.getLogger('nova.virt.baremetal.deploy_helper')
_PXECONF_DEPLOY = """
default deploy
label deploy
kernel deploy_kernel
append initrd=deploy_ramdisk
ipappend 3
label boot
kernel kernel
append initrd=ramdisk root=${ROOT}
"""
_PXECONF_BOOT = """
default boot
label deploy
kernel deploy_kernel
append initrd=deploy_ramdisk
ipappend 3
label boot
kernel kernel
append initrd=ramdisk root=UUID=12345678-1234-1234-1234-1234567890abcdef
"""
class WorkerTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(WorkerTestCase, self).setUp()
self.worker = bmdh.Worker()
# Make tearDown() fast
self.worker.queue_timeout = 0.1
self.worker.start()
def tearDown(self):
if self.worker.isAlive():
self.worker.stop = True
self.worker.join(timeout=1)
super(WorkerTestCase, self).tearDown()
def wait_queue_empty(self, timeout):
for _ in xrange(int(timeout / 0.1)):
if bmdh.QUEUE.empty():
break
time.sleep(0.1)
def test_run_calls_deploy(self):
"""Check all queued requests are passed to deploy()."""
history = []
def fake_deploy(**params):
history.append(params)
self.stubs.Set(bmdh, 'deploy', fake_deploy)
self.mox.StubOutWithMock(bm_db, 'bm_node_update')
# update is called twice inside Worker.run
for i in range(6):
bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
for (dep_id, params) in enumerate(params_list):
bmdh.QUEUE.put((dep_id, params))
self.wait_queue_empty(1)
self.assertEqual(params_list, history)
self.mox.VerifyAll()
def test_run_with_failing_deploy(self):
"""Check a worker keeps on running even if deploy() raises
an exception.
"""
history = []
def fake_deploy(**params):
history.append(params)
# always fail
raise Exception('test')
self.stubs.Set(bmdh, 'deploy', fake_deploy)
self.mox.StubOutWithMock(bm_db, 'bm_node_update')
# update is called twice inside Worker.run
for i in range(6):
bm_db.bm_node_update(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
params_list = [{'fake1': ''}, {'fake2': ''}, {'fake3': ''}]
for (dep_id, params) in enumerate(params_list):
bmdh.QUEUE.put((dep_id, params))
self.wait_queue_empty(1)
self.assertEqual(params_list, history)
self.mox.VerifyAll()
class PhysicalWorkTestCase(test.NoDBTestCase):
def setUp(self):
super(PhysicalWorkTestCase, self).setUp()
def noop(*args, **kwargs):
pass
self.stubs.Set(time, 'sleep', noop)
def _deploy_mox(self):
self.mox.StubOutWithMock(bmdh, 'get_dev')
self.mox.StubOutWithMock(bmdh, 'get_image_mb')
self.mox.StubOutWithMock(bmdh, 'discovery')
self.mox.StubOutWithMock(bmdh, 'login_iscsi')
self.mox.StubOutWithMock(bmdh, 'logout_iscsi')
self.mox.StubOutWithMock(bmdh, 'make_partitions')
self.mox.StubOutWithMock(bmdh, 'is_block_device')
self.mox.StubOutWithMock(bmdh, 'dd')
self.mox.StubOutWithMock(bmdh, 'mkswap')
self.mox.StubOutWithMock(bmdh, 'block_uuid')
self.mox.StubOutWithMock(bmdh, 'switch_pxe_config')
self.mox.StubOutWithMock(bmdh, 'notify')
def test_deploy_no_ephemeral(self):
address = '127.0.0.1'
port = 3306
iqn = 'iqn.xyz'
lun = 1
image_path = '/tmp/xyz/image'
pxe_config_path = '/tmp/abc/pxeconfig'
root_mb = 128
swap_mb = 64
ephemeral_mb = 0
dev = '/dev/fake'
root_part = '/dev/fake-part1'
swap_part = '/dev/fake-part2'
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
self._deploy_mox()
bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
bmdh.discovery(address, port)
bmdh.login_iscsi(address, port, iqn)
bmdh.is_block_device(dev).AndReturn(True)
bmdh.make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
bmdh.is_block_device(root_part).AndReturn(True)
bmdh.is_block_device(swap_part).AndReturn(True)
bmdh.dd(image_path, root_part)
bmdh.mkswap(swap_part)
bmdh.block_uuid(root_part).AndReturn(root_uuid)
bmdh.logout_iscsi(address, port, iqn)
bmdh.switch_pxe_config(pxe_config_path, root_uuid)
bmdh.notify(address, 10000)
self.mox.ReplayAll()
bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb, ephemeral_mb)
self.mox.VerifyAll()
def test_deploy_with_ephemeral(self):
address = '127.0.0.1'
port = 3306
iqn = 'iqn.xyz'
lun = 1
image_path = '/tmp/xyz/image'
pxe_config_path = '/tmp/abc/pxeconfig'
root_mb = 128
swap_mb = 64
ephemeral_mb = 256
dev = '/dev/fake'
ephemeral_part = '/dev/fake-part1'
swap_part = '/dev/fake-part2'
root_part = '/dev/fake-part3'
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
self._deploy_mox()
self.mox.StubOutWithMock(bmdh, 'mkfs_ephemeral')
bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
bmdh.discovery(address, port)
bmdh.login_iscsi(address, port, iqn)
bmdh.is_block_device(dev).AndReturn(True)
bmdh.make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
bmdh.is_block_device(root_part).AndReturn(True)
bmdh.is_block_device(swap_part).AndReturn(True)
bmdh.is_block_device(ephemeral_part).AndReturn(True)
bmdh.dd(image_path, root_part)
bmdh.mkswap(swap_part)
bmdh.mkfs_ephemeral(ephemeral_part)
bmdh.block_uuid(root_part).AndReturn(root_uuid)
bmdh.logout_iscsi(address, port, iqn)
bmdh.switch_pxe_config(pxe_config_path, root_uuid)
bmdh.notify(address, 10000)
self.mox.ReplayAll()
bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb, ephemeral_mb)
self.mox.VerifyAll()
def test_deploy_preserve_ephemeral(self):
address = '127.0.0.1'
port = 3306
iqn = 'iqn.xyz'
lun = 1
image_path = '/tmp/xyz/image'
pxe_config_path = '/tmp/abc/pxeconfig'
root_mb = 128
swap_mb = 64
ephemeral_mb = 128
dev = '/dev/fake'
ephemeral_part = '/dev/fake-part1'
swap_part = '/dev/fake-part2'
root_part = '/dev/fake-part3'
root_uuid = '12345678-1234-1234-12345678-12345678abcdef'
self._deploy_mox()
self.mox.StubOutWithMock(bmdh, 'mkfs_ephemeral')
bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
bmdh.discovery(address, port)
bmdh.login_iscsi(address, port, iqn)
bmdh.is_block_device(dev).AndReturn(True)
bmdh.make_partitions(dev, root_mb, swap_mb, ephemeral_mb)
bmdh.is_block_device(root_part).AndReturn(True)
bmdh.is_block_device(swap_part).AndReturn(True)
bmdh.is_block_device(ephemeral_part).AndReturn(True)
bmdh.dd(image_path, root_part)
bmdh.mkswap(swap_part)
bmdh.block_uuid(root_part).AndReturn(root_uuid)
bmdh.logout_iscsi(address, port, iqn)
bmdh.switch_pxe_config(pxe_config_path, root_uuid)
bmdh.notify(address, 10000)
self.mox.ReplayAll()
bmdh.deploy(address, port, iqn, lun, image_path, pxe_config_path,
root_mb, swap_mb, ephemeral_mb, True)
self.mox.VerifyAll()
def test_always_logout_iscsi(self):
"""logout_iscsi() must be called once login_iscsi() is called."""
address = '127.0.0.1'
port = 3306
iqn = 'iqn.xyz'
lun = 1
image_path = '/tmp/xyz/image'
pxe_config_path = '/tmp/abc/pxeconfig'
root_mb = 128
swap_mb = 64
ephemeral_mb = 256
dev = '/dev/fake'
self.mox.StubOutWithMock(bmdh, 'get_dev')
self.mox.StubOutWithMock(bmdh, 'get_image_mb')
self.mox.StubOutWithMock(bmdh, 'discovery')
self.mox.StubOutWithMock(bmdh, 'login_iscsi')
self.mox.StubOutWithMock(bmdh, 'logout_iscsi')
self.mox.StubOutWithMock(bmdh, 'work_on_disk')
class TestException(Exception):
pass
bmdh.get_dev(address, port, iqn, lun).AndReturn(dev)
bmdh.get_image_mb(image_path).AndReturn(1) # < root_mb
bmdh.discovery(address, port)
bmdh.login_iscsi(address, port, iqn)
bmdh.work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path,
False).AndRaise(TestException)
bmdh.logout_iscsi(address, port, iqn)
self.mox.ReplayAll()
self.assertRaises(TestException,
bmdh.deploy,
address, port, iqn, lun, image_path,
pxe_config_path, root_mb, swap_mb, ephemeral_mb)
class WorkOnDiskTestCase(test.NoDBTestCase):
def setUp(self):
super(WorkOnDiskTestCase, self).setUp()
self.image_path = '/tmp/xyz/image'
self.root_mb = 128
self.swap_mb = 64
self.ephemeral_mb = 256
self.dev = '/dev/fake'
self.ephemeral_part = '/dev/fake-part1'
self.swap_part = '/dev/fake-part2'
self.root_part = '/dev/fake-part3'
self.m_ibd = mock.Mock()
self.m_mp = mock.Mock()
self.stubs.Set(bmdh, 'is_block_device', self.m_ibd)
self.stubs.Set(bmdh, 'make_partitions', self.m_mp)
def test_no_parent_device(self):
self.m_ibd.return_value = False
self.assertRaises(bmdh.BareMetalDeployException,
bmdh.work_on_disk,
self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.image_path, False)
self.m_ibd.assert_called_once_with(self.dev)
self.assertFalse(self.m_mp.called)
def test_no_root_partition(self):
self.m_ibd.side_effect = [True, False]
calls = [mock.call(self.dev),
mock.call(self.root_part)]
self.assertRaises(bmdh.BareMetalDeployException,
bmdh.work_on_disk,
self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.image_path, False)
self.assertEqual(self.m_ibd.call_args_list, calls)
self.m_mp.assert_called_once_with(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb)
def test_no_swap_partition(self):
self.m_ibd.side_effect = [True, True, False]
calls = [mock.call(self.dev),
mock.call(self.root_part),
mock.call(self.swap_part)]
self.assertRaises(bmdh.BareMetalDeployException,
bmdh.work_on_disk,
self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.image_path, False)
self.assertEqual(self.m_ibd.call_args_list, calls)
self.m_mp.assert_called_once_with(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb)
def test_no_ephemeral_partition(self):
self.m_ibd.side_effect = [True, True, True, False]
calls = [mock.call(self.dev),
mock.call(self.root_part),
mock.call(self.swap_part),
mock.call(self.ephemeral_part)]
self.assertRaises(bmdh.BareMetalDeployException,
bmdh.work_on_disk,
self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.image_path, False)
self.assertEqual(self.m_ibd.call_args_list, calls)
self.m_mp.assert_called_once_with(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb)
class SwitchPxeConfigTestCase(test.NoDBTestCase):
def setUp(self):
super(SwitchPxeConfigTestCase, self).setUp()
(fd, self.fname) = tempfile.mkstemp()
os.write(fd, _PXECONF_DEPLOY)
os.close(fd)
def tearDown(self):
os.unlink(self.fname)
super(SwitchPxeConfigTestCase, self).tearDown()
def test_switch_pxe_config(self):
bmdh.switch_pxe_config(self.fname,
'12345678-1234-1234-1234-1234567890abcdef')
with open(self.fname, 'r') as f:
pxeconf = f.read()
self.assertEqual(pxeconf, _PXECONF_BOOT)
class OtherFunctionTestCase(test.NoDBTestCase):
def test_get_dev(self):
expected = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
actual = bmdh.get_dev('1.2.3.4', 5678, 'iqn.fake', 9)
self.assertEqual(expected, actual)
def test_get_image_mb(self):
size = None
def fake_getsize(path):
return size
self.stubs.Set(os.path, 'getsize', fake_getsize)
size = 0
self.assertEqual(bmdh.get_image_mb('x'), 0)
size = 1
self.assertEqual(bmdh.get_image_mb('x'), 1)
size = units.Mi
self.assertEqual(bmdh.get_image_mb('x'), 1)
size = units.Mi + 1
self.assertEqual(bmdh.get_image_mb('x'), 2)

View File

@ -1,29 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.cmd import baremetal_manage as bm_man
from nova.tests.virt.baremetal.db import base as bm_db_base
class BareMetalDbCommandsTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDbCommandsTestCase, self).setUp()
self.commands = bm_man.BareMetalDbCommands()
def test_sync_and_version(self):
self.commands.sync()
v = self.commands.version()
self.assertTrue(v > 0)

View File

@ -1,645 +0,0 @@
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal pxe driver."""
import os
import mox
from oslo.config import cfg
from oslo.db import exception as db_exc
from testtools import matchers
from nova import exception
from nova import objects
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import pxe
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.pxe.PXE',
flavor_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalPXETestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalPXETestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = pxe.PXE(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info()
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
# File injection is off by default, but we should continue to test it
# until it is removed.
CONF.set_override('use_file_injection', True, 'baremetal')
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class PXEClassMethodsTestCase(BareMetalPXETestCase):
def test_build_pxe_config(self):
args = {
'deployment_id': 'aaa',
'deployment_key': 'bbb',
'deployment_iscsi_iqn': 'ccc',
'deployment_aki_path': 'ddd',
'deployment_ari_path': 'eee',
'aki_path': 'fff',
'ari_path': 'ggg',
'network_info': self.test_network_info,
}
config = pxe.build_pxe_config(**args)
self.assertThat(config, matchers.StartsWith('default deploy'))
# deploy bits are in the deploy section
start = config.index('label deploy')
end = config.index('label boot')
self.assertThat(config[start:end], matchers.MatchesAll(
matchers.Contains('kernel ddd'),
matchers.Contains('initrd=eee'),
matchers.Contains('deployment_id=aaa'),
matchers.Contains('deployment_key=bbb'),
matchers.Contains('iscsi_target_iqn=ccc'),
matchers.Not(matchers.Contains('kernel fff')),
))
# boot bits are in the boot section
start = config.index('label boot')
self.assertThat(config[start:], matchers.MatchesAll(
matchers.Contains('kernel fff'),
matchers.Contains('initrd=ggg'),
matchers.Not(matchers.Contains('kernel ddd')),
))
def test_build_pxe_network_config(self):
self.flags(
pxe_network_config=True,
group='baremetal',
)
net = utils.get_test_network_info(1)
config = pxe.build_pxe_network_config(net)
self.assertIn('eth0:off', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_pxe_network_config(net)
self.assertIn('eth0:off', config)
self.assertIn('eth1:off', config)
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = pxe.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_build_network_config_static_parameters(self):
self.flags(use_ipv6=True)
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal'
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['cidr'] = '10.1.1.0/24'
net[0]['network']['subnets'][0]['gateway']['address'] = '10.1.1.1'
net[0]['network']['subnets'][0]['dns'][0]['address'] = '10.1.1.2'
net[0]['network']['subnets'][0]['dns'][1]['address'] = '10.1.1.3'
net[0]['network']['subnets'][1]['cidr'] = 'fc00::/7'
net[0]['network']['subnets'][1]['ips'][0]['address'] = 'fc00::1'
net[0]['network']['subnets'][1]['gateway']['address'] = 'fc00::2'
config = pxe.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('gateway 10.1.1.1', config)
self.assertIn('dns-nameservers 10.1.1.2 10.1.1.3', config)
self.assertIn('iface eth0 inet6 static', config)
self.assertIn('address fc00::1', config)
self.assertIn('netmask 7', config)
self.assertIn('gateway fc00::2', config)
def test_image_dir_path(self):
self.assertEqual(
os.path.join(CONF.instances_path, 'instance-00000001'),
pxe.get_image_dir_path(self.instance))
def test_image_file_path(self):
self.assertEqual(
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'),
pxe.get_image_file_path(self.instance))
def test_pxe_config_file_path(self):
self.instance['uuid'] = 'aaaa-bbbb-cccc'
self.assertEqual(
os.path.join(CONF.baremetal.tftp_root,
'aaaa-bbbb-cccc', 'config'),
pxe.get_pxe_config_file_path(self.instance))
def test_pxe_mac_path(self):
self.assertEqual(
os.path.join(CONF.baremetal.tftp_root,
'pxelinux.cfg', '01-23-45-67-89-ab'),
pxe.get_pxe_mac_path('23:45:67:89:AB'))
def test_get_instance_deploy_ids(self):
self.instance['extra_specs'] = {
'baremetal:deploy_kernel_id': 'aaaa',
'baremetal:deploy_ramdisk_id': 'bbbb',
}
self.flags(deploy_kernel="fail", group='baremetal')
self.flags(deploy_ramdisk="fail", group='baremetal')
self.assertEqual('aaaa', pxe.get_deploy_aki_id(self.instance))
self.assertEqual('bbbb', pxe.get_deploy_ari_id(self.instance))
def test_get_default_deploy_ids(self):
self.instance['extra_specs'] = {}
self.flags(deploy_kernel="aaaa", group='baremetal')
self.flags(deploy_ramdisk="bbbb", group='baremetal')
self.assertEqual('aaaa', pxe.get_deploy_aki_id(self.instance))
self.assertEqual('bbbb', pxe.get_deploy_ari_id(self.instance))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(40960, sizes[0])
self.assertEqual(1024, sizes[1])
def test_swap_not_zero(self):
# override swap to 0
flavor = utils.get_test_flavor(self.context)
flavor['swap'] = 0
self.instance = utils.get_test_instance(self.context, flavor)
sizes = pxe.get_partition_sizes(self.instance)
self.assertEqual(40960, sizes[0])
self.assertEqual(1, sizes[1])
def test_get_tftp_image_info(self):
flavor = utils.get_test_flavor()
# Raises an exception when options are neither specified
# on the instance nor in configuration file
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# Test that other non-true values also raise an exception
self.flags(deploy_kernel='', deploy_ramdisk='', group='baremetal')
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# Even if the instance includes kernel_id and ramdisk_id,
# we still need deploy_kernel_id and deploy_ramdisk_id.
# If those aren't present in instance[], and not specified in
# config file, then we raise an exception.
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
# If an instance doesn't specify deploy_kernel_id or deploy_ramdisk_id,
# but defaults are set in the config file, we should use those.
# Here, we confirm both that all four values were set
# and that the proper paths are getting set for all of them
self.flags(deploy_kernel='cccc', deploy_ramdisk='dddd',
group='baremetal')
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = pxe.get_tftp_image_info(self.instance, flavor)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
'ramdisk': ['bbbb', os.path.join(base, 'ramdisk')],
'deploy_kernel': ['cccc', os.path.join(base, 'deploy_kernel')],
'deploy_ramdisk': ['dddd',
os.path.join(base, 'deploy_ramdisk')],
}
self.assertEqual(expected, res)
# If deploy_kernel_id and deploy_ramdisk_id are specified on
# image extra_specs, this should override any default configuration.
# Note that it is passed on the 'instance' object, despite being
# inherited from the flavor_extra_specs table.
extra_specs = {
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
}
flavor['extra_specs'] = extra_specs
res = pxe.get_tftp_image_info(self.instance, flavor)
self.assertEqual('eeee', res['deploy_kernel'][0])
self.assertEqual('ffff', res['deploy_ramdisk'][0])
# However, if invalid values are passed on the image extra_specs,
# this should still raise an exception.
extra_specs = {
'baremetal:deploy_kernel_id': '',
'baremetal:deploy_ramdisk_id': '',
}
flavor['extra_specs'] = extra_specs
self.assertRaises(exception.NovaException,
pxe.get_tftp_image_info,
self.instance, flavor)
class PXEPrivateMethodsTestCase(BareMetalPXETestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(address_list, macs)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
self.instance['ramdisk_id'] = 'bbbb'
flavor = utils.get_test_flavor()
extra_specs = {
'baremetal:deploy_kernel_id': 'cccc',
'baremetal:deploy_ramdisk_id': 'dddd',
}
flavor['extra_specs'] = extra_specs
image_info = pxe.get_tftp_image_info(self.instance, flavor)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os, 'unlink')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(pxe.get_image_dir_path(self.instance)).AndReturn(True)
disk_path = os.path.join(
pxe.get_image_dir_path(self.instance), 'disk')
os.unlink(disk_path).AndReturn(None)
os.path.exists(disk_path).AndReturn(True)
os.path.exists(pxe.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
# NOTE(deva): we could also test this method by stubbing
# nova.virt.disk.api._inject_*_into_fs
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = pxe.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=pxe.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files, # this is what we're really testing
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(pxe.get_image_file_path(self.instance))
bm_utils.rmtree_without_raise(pxe.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_dhcp_options_for_instance(self):
self._create_node()
self.mox.ReplayAll()
expected = [{'opt_name': 'bootfile-name',
'opt_value': CONF.baremetal.pxe_bootfile_name},
{'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server', 'opt_value': CONF.my_ip}]
res = self.driver.dhcp_options_for_instance(self.instance)
self.assertEqual(expected.sort(), res.sort())
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.sort()
image_info = {
'deploy_kernel': [None, 'aaaa'],
'deploy_ramdisk': [None, 'bbbb'],
'kernel': [None, 'cccc'],
'ramdisk': [None, 'dddd'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
pxe_config = 'this is a fake pxe config'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
pxe.get_image_file_path(self.instance)
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(bm_utils, 'random_alnum')
self.mox.StubOutWithMock(pxe, 'build_pxe_config')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0, 0))
bm_utils.random_alnum(32).AndReturn('alnum')
pxe.build_pxe_config(
self.node['id'], 'alnum', iqn,
'aaaa', 'bbbb', 'cccc', 'dddd',
self.test_network_info).AndReturn(pxe_config)
bm_utils.write_to_file(pxe_path, pxe_config)
for mac in macs:
bm_utils.create_link_without_raise(
pxe_path, pxe.get_pxe_mac_path(mac))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
flavor = objects.Flavor(
context=self.context,
extra_specs={
'baremetal:deploy_kernel_id': 'eeee',
'baremetal:deploy_ramdisk_id': 'ffff',
})
self.instance['uuid'] = 'fake-uuid'
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
objects.Flavor.get_by_id(
self.context, self.instance['instance_type_id']).AndReturn(
flavor)
# create the config file
bm_utils.write_to_file(mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink and link the 2 interfaces
for i in range(2):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.create_link_without_raise(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root))
# unlink all 2 interfaces, 4 images, and the config file
for i in range(7):
bm_utils.unlink_without_raise(mox.Or(
mox.StrContains('fake-uuid'),
mox.StrContains(CONF.baremetal.tftp_root)))
bm_utils.rmtree_without_raise(mox.StrContains('fake-uuid'))
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertIsNotNone(row['deploy_key'])
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
pxe_path = pxe.get_pxe_config_file_path(self.instance)
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
extra_specs = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
pxe.get_tftp_image_info(self.instance, extra_specs).\
AndRaise(exception.NovaException)
bm_utils.unlink_without_raise(pxe_path)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, 'fake-uuid'))
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
self.flags(pxe_deploy_timeout=1, group='baremetal')
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test timeout
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)

View File

@ -1,398 +0,0 @@
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal tilera driver."""
import os
import mox
from oslo.config import cfg
from oslo.db import exception as db_exc
from nova import exception
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import tilera
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.tilera.Tilera',
flavor_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalTileraTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalTileraTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = tilera.Tilera(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info()
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class TileraClassMethodsTestCase(BareMetalTileraTestCase):
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0]['network']['subnets'][0]['ips'][0]['address'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
tilera.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
tilera.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_tilera_nfs_path(self):
self._create_node()
self.node['id'] = '123'
tilera_nfs_dir = "fs_" + self.node['id']
self.assertEqual(
tilera.get_tilera_nfs_path(self.node['id']),
os.path.join(CONF.baremetal.tftp_root,
tilera_nfs_dir))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
flavor = utils.get_test_flavor(self.context)
flavor['swap'] = 0
self.instance = utils.get_test_instance(self.context, flavor)
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
# Tilera case needs only kernel_id.
self.instance['kernel_id'] = 'aaaa'
self.instance['uuid'] = 'fake-uuid'
# Here, we confirm both that kernel_id was set
# and that the proper paths are getting set for all of them
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = tilera.get_tftp_image_info(self.instance)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
}
self.assertEqual(res, expected)
class TileraPrivateMethodsTestCase(BareMetalTileraTestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
image_info = tilera.get_tftp_image_info(self.instance)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os, 'unlink')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(tilera.get_image_dir_path(self.instance)).AndReturn(True)
disk_path = os.path.join(
tilera.get_image_dir_path(self.instance), 'disk')
os.path.exists(disk_path).AndReturn(True)
os.unlink(disk_path).AndReturn(None)
os.path.exists(tilera.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = tilera.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(mox.IgnoreArg()).AndReturn(True)
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=tilera.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files,
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(tilera, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
tilera.get_tftp_image_info(self.instance).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(tilera.get_image_file_path(
self.instance))
bm_utils.rmtree_without_raise(tilera.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
image_info = {
'kernel': [None, 'cccc'],
}
self.instance['uuid'] = 'fake-uuid'
tilera.get_tilera_nfs_path(self.instance)
tilera.get_image_file_path(self.instance)
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(tilera, 'get_partition_sizes')
tilera.get_tftp_image_info(self.instance).AndReturn(image_info)
tilera.get_partition_sizes(self.instance).AndReturn((0, 0))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
tilera.get_tilera_nfs_path(self.instance)
tilera.get_image_file_path(self.instance)
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.driver.activate_bootloader(self.context, self.node, self.instance,
network_info=self.test_network_info)
row = db.bm_node_get(self.context, 1)
self.assertIsNotNone(row['deploy_key'])
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertIsNone(row['deploy_key'])
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.node['id'] = 'fake-node-id'
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
tilera.get_tilera_nfs_path(self.node['id'])
tilera.get_tftp_image_info(self.instance).\
AndRaise(exception.NovaException)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)

View File

@ -1,141 +0,0 @@
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for baremetal PDU power manager."""
from oslo.config import cfg
from nova import test
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import tilera_pdu
from nova.virt.baremetal import utils as bm_utils
CONF = cfg.CONF
class BareMetalPduTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalPduTestCase, self).setUp()
self.flags(tile_power_wait=0, group='baremetal')
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='fake-address',
pm_user='fake-user',
pm_password='fake-password')
self.tilera_pdu = tilera_pdu.Pdu(self.node)
self.tile_pdu_on = 1
self.tile_pdu_off = 2
self.tile_pdu_status = 9
def test_construct(self):
self.assertEqual(self.tilera_pdu.node_id, 123)
self.assertEqual(self.tilera_pdu.address, 'fake-address')
self.assertEqual(self.tilera_pdu.user, 'fake-user')
self.assertEqual(self.tilera_pdu.password, 'fake-password')
def test_exec_pdutool(self):
self.flags(tile_pdu_mgr='fake-pdu-mgr', group='baremetal')
self.flags(tile_pdu_ip='fake-address', group='baremetal')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
args = [
'fake-pdu-mgr',
'fake-address',
self.tile_pdu_on,
]
utils.execute(*args).AndReturn('')
self.mox.ReplayAll()
self.tilera_pdu._exec_pdutool(self.tile_pdu_on)
self.mox.VerifyAll()
def test_is_power(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu._is_power(self.tile_pdu_on)
self.mox.VerifyAll()
def test_power_already_on(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ACTIVE)
def test_power_on_ok(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ACTIVE)
def test_power_on_fail(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ERROR)
def test_power_on_max_retries(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ERROR)
def test_power_off_ok(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_off).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.ACTIVE
self.tilera_pdu._power_off()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.DELETED)

View File

@ -1,77 +0,0 @@
# coding=utf-8
# Copyright 2012,2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal utils."""
import errno
import os
import tempfile
from nova import test
from nova.virt.baremetal import utils
from nova.virt import images
class BareMetalUtilsTestCase(test.NoDBTestCase):
def test_random_alnum(self):
s = utils.random_alnum(10)
self.assertEqual(len(s), 10)
s = utils.random_alnum(100)
self.assertEqual(len(s), 100)
def test_unlink(self):
self.mox.StubOutWithMock(os, "unlink")
os.unlink("/fake/path")
self.mox.ReplayAll()
utils.unlink_without_raise("/fake/path")
self.mox.VerifyAll()
def test_unlink_ENOENT(self):
self.mox.StubOutWithMock(os, "unlink")
os.unlink("/fake/path").AndRaise(OSError(errno.ENOENT))
self.mox.ReplayAll()
utils.unlink_without_raise("/fake/path")
self.mox.VerifyAll()
def test_create_link(self):
self.mox.StubOutWithMock(os, "symlink")
os.symlink("/fake/source", "/fake/link")
self.mox.ReplayAll()
utils.create_link_without_raise("/fake/source", "/fake/link")
self.mox.VerifyAll()
def test_create_link_EEXIST(self):
self.mox.StubOutWithMock(os, "symlink")
os.symlink("/fake/source", "/fake/link").AndRaise(
OSError(errno.EEXIST))
self.mox.ReplayAll()
utils.create_link_without_raise("/fake/source", "/fake/link")
self.mox.VerifyAll()
def test_cache_image_with_clean(self):
self.mox.StubOutWithMock(images, "fetch_to_raw")
temp_f, temp_file = tempfile.mkstemp()
images.fetch_to_raw(None, None, temp_file, None, None)
self.mox.ReplayAll()
utils.cache_image(None, temp_file, None, None, None, clean=True)
self.mox.VerifyAll()
self.assertFalse(os.path.exists(temp_file))

View File

@ -1,392 +0,0 @@
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal virtual power driver."""
import mox
from oslo.config import cfg
from nova import exception
from nova.openstack.common import processutils
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import common as connection
from nova.virt.baremetal import db
from nova.virt.baremetal import virtual_power_driver
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.pxe.PXE',
flavor_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager=
'nova.virt.baremetal.virtual_power_driver.VirtualPowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
virtual_power_ssh_host=None,
virtual_power_type='vbox',
virtual_power_host_user=None,
virtual_power_host_pass=None,
virtual_power_host_key=None,
group='baremetal',
)
class BareMetalVPDTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalVPDTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
self.nic_info = [
{'address': '11:11:11:11:11:11', 'datapath_id': '0x1',
'port_no': 1},
{'address': '22:22:22:22:22:22', 'datapath_id': '0x2',
'port_no': 2},
]
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
def _create_pm(self):
self.pm = virtual_power_driver.VirtualPowerManager(
node=self.node,
instance=self.instance)
return self.pm
class VPDMissingOptionsTestCase(BareMetalVPDTestCase):
def test_get_conn_missing_options(self):
self.flags(virtual_power_ssh_host=None, group="baremetal")
self.flags(virtual_power_host_user=None, group="baremetal")
self.flags(virtual_power_host_pass=None, group="baremetal")
self._create_node()
self._create_pm()
self._conn = None
self.assertRaises(exception.NovaException,
self.pm._get_conn)
self._conn = None
self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
self.assertRaises(exception.NovaException,
self.pm._get_conn)
self._conn = None
self.flags(virtual_power_host_user='user', group="baremetal")
self.assertRaises(exception.NovaException,
self.pm._get_conn)
class VPDClassMethodsTestCase(BareMetalVPDTestCase):
def setUp(self):
super(VPDClassMethodsTestCase, self).setUp()
self.flags(virtual_power_ssh_host='127.0.0.1', group="baremetal")
self.flags(virtual_power_host_user='user', group="baremetal")
self.flags(virtual_power_host_pass='password', group="baremetal")
def test_get_conn_success_pass(self):
self._create_node()
self._create_pm()
self._conn = self.pm._get_conn()
self.mox.StubOutWithMock(connection, 'ssh_connect')
connection.ssh_connect(mox.IsA(self._conn)).AndReturn(True)
self.mox.ReplayAll()
self.pm._set_connection()
self.assertEqual(self.pm.connection_data.host, '127.0.0.1')
self.assertEqual(self.pm.connection_data.username, 'user')
self.assertEqual(self.pm.connection_data.password, 'password')
self.assertIsNone(self.pm.connection_data.keyfile)
self.mox.VerifyAll()
def test_get_conn_success_key(self):
self.flags(virtual_power_host_pass='', group="baremetal")
self.flags(virtual_power_host_key='/id_rsa_file.txt',
group="baremetal")
self._create_node()
self._create_pm()
self._conn = self.pm._get_conn()
self.mox.StubOutWithMock(connection, 'ssh_connect')
connection.ssh_connect(mox.IsA(self._conn)).AndReturn(True)
self.mox.ReplayAll()
self.pm._set_connection()
self.assertEqual(self.pm.connection_data.host, '127.0.0.1')
self.assertEqual(self.pm.connection_data.username, 'user')
self.assertEqual(self.pm.connection_data.password, '')
self.assertEqual(self.pm.connection_data.keyfile, '/id_rsa_file.txt')
self.mox.VerifyAll()
def test_get_full_node_list(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_run_command')
cmd = self.pm._vp_cmd.list_cmd
self.pm._run_command(cmd).AndReturn("testNode")
self.mox.ReplayAll()
name = self.pm._get_full_node_list()
self.assertEqual(name, 'testNode')
self.mox.VerifyAll()
def test_check_for_node(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_get_full_node_list')
self.pm._get_full_node_list().\
AndReturn(["testNode"])
self.mox.StubOutWithMock(self.pm, '_run_command')
cmd = self.pm._vp_cmd.get_node_macs.replace('{_NodeName_}', 'testNode')
self.pm._run_command(cmd).\
AndReturn(["111111111111", "ffeeddccbbaa"])
self.mox.ReplayAll()
name = self.pm._check_for_node()
self.assertEqual(name, '"testNode"')
self.mox.VerifyAll()
def test_check_for_node_not_found(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_get_full_node_list')
self.pm._get_full_node_list().AndReturn(["testNode"])
self.mox.StubOutWithMock(self.pm, '_run_command')
cmd = self.pm._vp_cmd.get_node_macs.replace('{_NodeName_}', 'testNode')
self.pm._run_command(cmd).AndReturn(["ffeeddccbbaa"])
self.mox.ReplayAll()
name = self.pm._check_for_node()
self.assertEqual(name, '')
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn('"testNode"')
self.pm._run_command(self.pm._vp_cmd.start_cmd).AndReturn("Started")
self.pm.is_power_on().AndReturn(True)
self.mox.ReplayAll()
state = self.pm.activate_node()
self.assertEqual(state, 'active')
self.mox.VerifyAll()
def test_activate_node_fail(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn('"testNode"')
self.pm._run_command(self.pm._vp_cmd.start_cmd).AndReturn("Started")
self.pm.is_power_on().AndReturn(False)
self.mox.ReplayAll()
state = self.pm.activate_node()
self.assertEqual(state, 'error')
self.mox.VerifyAll()
def test_deactivate_node(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn('"testNode"')
self.pm.is_power_on().AndReturn(True)
self.pm._run_command(self.pm._vp_cmd.stop_cmd).AndReturn("Stopped")
self.pm.is_power_on().AndReturn(False)
self.mox.ReplayAll()
state = self.pm.deactivate_node()
self.assertEqual(state, 'deleted')
self.mox.VerifyAll()
def test_deactivate_node_fail(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn('"testNode"')
self.pm.is_power_on().AndReturn(True)
self.pm._run_command(self.pm._vp_cmd.stop_cmd).AndReturn("Stopped")
self.pm.is_power_on().AndReturn(True)
self.mox.ReplayAll()
state = self.pm.deactivate_node()
self.assertEqual(state, 'error')
self.mox.VerifyAll()
def test_reboot_node(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn(['"testNode"'])
self.pm._run_command(self.pm._vp_cmd.reboot_cmd).AndReturn("Restarted")
self.pm.is_power_on().AndReturn(True)
self.mox.ReplayAll()
state = self.pm.reboot_node()
self.assertEqual(state, 'active')
self.mox.VerifyAll()
def test_reboot_node_fail(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.mox.StubOutWithMock(self.pm, 'is_power_on')
self.pm._check_for_node().AndReturn(['"testNode"'])
self.pm._run_command(self.pm._vp_cmd.reboot_cmd).AndReturn("Restarted")
self.pm.is_power_on().AndReturn(False)
self.mox.ReplayAll()
state = self.pm.reboot_node()
self.assertEqual(state, 'error')
self.mox.VerifyAll()
def test_is_power_on(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.pm._check_for_node().AndReturn(['"testNode"'])
self.pm._run_command(self.pm._vp_cmd.list_running_cmd).\
AndReturn(['"testNode"'])
self.pm._matched_name = 'testNode'
self.mox.ReplayAll()
state = self.pm.is_power_on()
self.assertEqual(state, True)
self.mox.VerifyAll()
def test_is_power_on_fail(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.pm._check_for_node().AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NodeNotFound, self.pm.is_power_on)
self.mox.VerifyAll()
def test_is_power_on_match_subname(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(self.pm, '_run_command')
self.pm._check_for_node().AndReturn(['"testNode"'])
self.pm._run_command(self.pm._vp_cmd.list_running_cmd).\
AndReturn(['"testNode01"'])
self.pm._matched_name = '"testNode"'
self.mox.ReplayAll()
state = self.pm.is_power_on()
self.assertEqual(state, False)
self.mox.VerifyAll()
def test_run_command(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_set_connection')
self.mox.StubOutWithMock(processutils, 'ssh_execute')
self.pm._set_connection().AndReturn(True)
processutils.ssh_execute(None, '/usr/bin/VBoxManage test return',
check_exit_code=True).AndReturn(("test\nreturn", ""))
self.pm._matched_name = 'testNode'
self.mox.ReplayAll()
result = self.pm._run_command("test return")
self.assertEqual(result, ['test', 'return'])
self.mox.VerifyAll()
def test_run_command_raises_exception(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_set_connection')
self.mox.StubOutWithMock(processutils, 'ssh_execute')
self.pm._set_connection().AndReturn(True)
processutils.ssh_execute(None, '/usr/bin/VBoxManage test return',
check_exit_code=True).\
AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
result = self.pm._run_command("test return")
self.assertEqual(result, [])
self.mox.VerifyAll()
def test_activate_node_with_exception(self):
self._create_node()
self._create_pm()
self.mox.StubOutWithMock(self.pm, '_check_for_node')
self.mox.StubOutWithMock(processutils, 'ssh_execute')
self.pm._check_for_node().AndReturn(['"testNode"'])
self.pm._check_for_node().AndReturn(['"testNode"'])
processutils.ssh_execute('test', '/usr/bin/VBoxManage startvm ',
check_exit_code=True).\
AndRaise(processutils.ProcessExecutionError)
processutils.ssh_execute('test', '/usr/bin/VBoxManage list runningvms',
check_exit_code=True).\
AndRaise(processutils.ProcessExecutionError)
self.mox.ReplayAll()
self.pm._connection = 'test'
state = self.pm.activate_node()
self.assertEqual(state, 'error')
self.mox.VerifyAll()

View File

@ -1,294 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal volume driver."""
from oslo.config import cfg
from nova import exception
from nova import test
from nova.virt.baremetal import volume_driver
from nova.virt import fake
from nova.virt.libvirt import volume as libvirt_volume
CONF = cfg.CONF
SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001
System information:
Driver: iscsi
State: ready
I_T nexus information:
I_T nexus: 8
Initiator: iqn.1993-08.org.debian:01:7780c6a16b4
Connection: 0
IP Address: 172.17.12.10
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00010000
SCSI SN: beaf10
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00010001
SCSI SN: beaf11
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000001
Backing store flags:
Account information:
ACL information:
ALL
Target 2: iqn.2010-10.org.openstack:volume-00000002
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00020000
SCSI SN: beaf20
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00020001
SCSI SN: beaf21
Size: 2147 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/nova-volumes/volume-00000002
Backing store flags:
Account information:
ACL information:
ALL
Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET f42410000
SCSI SN: beaf10000010
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET f42410001
SCSI SN: beaf10000011
Size: 1074 MB, Block size: 512
Online: Yes
Removable media: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\
iqn.2010-10.org.openstack:volume-00000001-lun-1
Backing store flags:
Account information:
ACL information:
ALL
"""
def fake_show_tgtadm():
return SHOW_OUTPUT
class BareMetalVolumeTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalVolumeTestCase, self).setUp()
self.stubs.Set(volume_driver, '_show_tgtadm', fake_show_tgtadm)
def test_list_backingstore_path(self):
l = volume_driver._list_backingstore_path()
self.assertEqual(len(l), 3)
self.assertIn('/dev/nova-volumes/volume-00000001', l)
self.assertIn('/dev/nova-volumes/volume-00000002', l)
self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-'
'iqn.2010-10.org.openstack:volume-00000001-lun-1', l)
def test_get_next_tid(self):
tid = volume_driver._get_next_tid()
self.assertEqual(1000002, tid)
def test_find_tid_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc')
self.assertEqual(1000001, tid)
def test_find_tid_not_found(self):
tid = volume_driver._find_tid(
'iqn.2010-10.org.openstack.baremetal:1000002-dev.vdc')
self.assertIsNone(tid)
def test_get_iqn(self):
self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal')
iqn = volume_driver._get_iqn('instname', '/dev/vdx')
self.assertEqual('iqn.2012-12.a.b:instname-dev-vdx', iqn)
class FakeConf(object):
def __init__(self, source_path):
self.source_path = source_path
class BareMetalLibVirtVolumeDriverTestCase(test.TestCase):
def setUp(self):
super(BareMetalLibVirtVolumeDriverTestCase, self).setUp()
self.flags(volume_drivers=[
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'fake2=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
], group='libvirt')
self.driver = volume_driver.LibvirtVolumeDriver(fake.FakeVirtAPI())
self.disk_info = {
'dev': 'vdc',
'bus': 'baremetal',
'type': 'baremetal',
}
self.connection_info = {'driver_volume_type': 'fake'}
self.mount_point = '/dev/vdc'
self.mount_device = 'vdc'
self.source_path = '/dev/sdx'
self.instance = {'uuid': '12345678-1234-1234-1234-123467890123456',
'name': 'instance-00000001'}
self.fixed_ips = [{'address': '10.2.3.4'},
{'address': '172.16.17.18'},
]
self.iqn = 'iqn.fake:instance-00000001-dev-vdc'
self.tid = 100
def test_init_loads_volume_drivers(self):
self.assertIsInstance(self.driver.volume_drivers['fake'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertIsInstance(self.driver.volume_drivers['fake2'],
libvirt_volume.LibvirtFakeVolumeDriver)
self.assertEqual(len(self.driver.volume_drivers), 2)
def test_fake_connect_volume(self):
"""Check connect_volume returns without exceptions."""
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_ok(self):
fake_driver = self.driver.volume_drivers['fake']
self.mox.StubOutWithMock(fake_driver, 'connect_volume')
fake_driver.connect_volume(self.connection_info, self.disk_info)
self.mox.ReplayAll()
self.driver._connect_volume(self.connection_info,
self.disk_info)
def test_volume_driver_method_driver_type_not_found(self):
self.connection_info['driver_volume_type'] = 'qwerty'
self.assertRaises(exception.VolumeDriverNotFound,
self.driver._connect_volume,
self.connection_info,
self.disk_info)
def test_publish_iscsi(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_get_next_tid')
self.mox.StubOutWithMock(volume_driver, '_create_iscsi_export_tgtadm')
self.mox.StubOutWithMock(volume_driver, '_allow_iscsi_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._get_next_tid().AndReturn(self.tid)
volume_driver._create_iscsi_export_tgtadm(self.source_path,
self.tid,
self.iqn)
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[0]['address'])
volume_driver._allow_iscsi_tgtadm(self.tid,
self.fixed_ips[1]['address'])
self.mox.ReplayAll()
self.driver._publish_iscsi(self.instance,
self.mount_point,
self.fixed_ips,
self.source_path)
def test_depublish_iscsi_ok(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_depublish_iscsi_do_nothing_if_tid_is_not_found(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(None)
self.mox.ReplayAll()
self.driver._depublish_iscsi(self.instance, self.mount_point)
def test_attach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_fixed_ips')
self.mox.StubOutWithMock(self.driver, '_connect_volume')
self.mox.StubOutWithMock(self.driver, '_publish_iscsi')
volume_driver._get_fixed_ips(self.instance).AndReturn(self.fixed_ips)
self.driver._connect_volume(self.connection_info, self.disk_info).\
AndReturn(FakeConf(self.source_path))
self.driver._publish_iscsi(self.instance, self.mount_point,
self.fixed_ips, self.source_path)
self.mox.ReplayAll()
self.driver.attach_volume(self.connection_info,
self.instance,
self.mount_point)
def test_detach_volume(self):
self.mox.StubOutWithMock(volume_driver, '_get_iqn')
self.mox.StubOutWithMock(volume_driver, '_find_tid')
self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm')
self.mox.StubOutWithMock(self.driver, '_disconnect_volume')
volume_driver._get_iqn(self.instance['name'], self.mount_point).\
AndReturn(self.iqn)
volume_driver._find_tid(self.iqn).AndReturn(self.tid)
volume_driver._delete_iscsi_export_tgtadm(self.tid)
self.driver._disconnect_volume(self.connection_info,
self.mount_device)
self.mox.ReplayAll()
self.driver.detach_volume(self.connection_info,
self.instance,
self.mount_point)

View File

@ -1,17 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import driver
BareMetalDriver = driver.BareMetalDriver

View File

@ -1,34 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Possible baremetal node states for instances.
Compute instance baremetal states represent the state of an instance as it
pertains to a user or administrator. When combined with task states
(task_states.py), a better picture can be formed regarding the instance's
health.
"""
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETED = 'deleted'
ERROR = 'error'
PREPARED = 'prepared'

View File

@ -1,84 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import baremetal_states
class NodeDriver(object):
def __init__(self, virtapi):
self.virtapi = virtapi
def cache_images(self, context, node, instance, **kwargs):
raise NotImplementedError()
def destroy_images(self, context, node, instance):
raise NotImplementedError()
def activate_bootloader(self, context, node, instance, **kwargs):
raise NotImplementedError()
def deactivate_bootloader(self, context, node, instance):
raise NotImplementedError()
def activate_node(self, context, node, instance):
"""For operations after power on."""
raise NotImplementedError()
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
raise NotImplementedError()
def get_console_output(self, node, instance):
raise NotImplementedError()
def dhcp_options_for_instance(self, instance):
"""Optional override to return the DHCP options to use for instance.
If no DHCP options are needed, this should not be overridden or None
should be returned.
"""
return None
class PowerManager(object):
def __init__(self, **kwargs):
self.state = baremetal_states.DELETED
pass
def activate_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def reboot_node(self):
self.state = baremetal_states.ACTIVE
return self.state
def deactivate_node(self):
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
"""Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
def start_console(self):
pass
def stop_console(self):
pass

View File

@ -1,66 +0,0 @@
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONNECTION_TIMEOUT = 60
class ConnectionFailed(exception.NovaException):
msg_fmt = _('Connection failed')
class Connection(object):
def __init__(self, host, username, password, port=22, keyfile=None):
self.host = host
self.username = username
self.password = password
self.port = port
self.keyfile = keyfile
def ssh_connect(connection):
"""Method to connect to remote system using ssh protocol.
:param connection: a Connection object.
:returns: paramiko.SSHClient -- an active ssh connection.
:raises: ConnectionFailed
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(connection.host,
username=connection.username,
password=connection.password,
port=connection.port,
key_filename=connection.keyfile,
timeout=CONNECTION_TIMEOUT)
LOG.debug("SSH connection with %s established successfully." %
connection.host)
# send TCP keepalive packets every 20 seconds
ssh.get_transport().set_keepalive(20)
return ssh
except Exception:
LOG.exception(_('Connection error'))
raise ConnectionFailed()

View File

@ -1,16 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal.db.api import * # noqa

View File

@ -1,148 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.virt.baremetal.db
namespace. Call these functions from nova.virt.baremetal.db namespace, not
the nova.virt.baremetal.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:baremetal_db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to
use, like: `sqlite:///var/lib/nova/nova.sqlite`.
"""
from oslo.config import cfg
from nova import utils
# NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet
# because utils.LazyPluggable doesn't support reading from
# option groups. See bug #1093043.
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for bare-metal database'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(db_opts, baremetal_group)
IMPL = utils.LazyPluggable(
'db_backend',
config_group='baremetal',
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api')
def bm_node_get_all(context, service_host=None):
return IMPL.bm_node_get_all(context,
service_host=service_host)
def bm_node_get_associated(context, service_host=None):
return IMPL.bm_node_get_associated(context,
service_host=service_host)
def bm_node_get_unassociated(context, service_host=None):
return IMPL.bm_node_get_unassociated(context,
service_host=service_host)
def bm_node_find_free(context, service_host=None,
memory_mb=None, cpus=None, local_gb=None):
return IMPL.bm_node_find_free(context,
service_host=service_host,
memory_mb=memory_mb,
cpus=cpus,
local_gb=local_gb)
def bm_node_get(context, bm_node_id):
return IMPL.bm_node_get(context, bm_node_id)
def bm_node_get_by_instance_uuid(context, instance_uuid):
return IMPL.bm_node_get_by_instance_uuid(context,
instance_uuid)
def bm_node_get_by_node_uuid(context, node_uuid):
return IMPL.bm_node_get_by_node_uuid(context, node_uuid)
def bm_node_create(context, values):
return IMPL.bm_node_create(context, values)
def bm_node_destroy(context, bm_node_id):
return IMPL.bm_node_destroy(context, bm_node_id)
def bm_node_update(context, bm_node_id, values):
return IMPL.bm_node_update(context, bm_node_id, values)
def bm_node_associate_and_update(context, node_uuid, values):
return IMPL.bm_node_associate_and_update(context, node_uuid, values)
def bm_interface_get(context, if_id):
return IMPL.bm_interface_get(context, if_id)
def bm_interface_get_all(context):
return IMPL.bm_interface_get_all(context)
def bm_interface_destroy(context, if_id):
return IMPL.bm_interface_destroy(context, if_id)
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
return IMPL.bm_interface_create(context, bm_node_id, address,
datapath_id, port_no)
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
return IMPL.bm_interface_set_vif_uuid(context, if_id, vif_uuid)
def bm_interface_get_by_vif_uuid(context, vif_uuid):
return IMPL.bm_interface_get_by_vif_uuid(context, vif_uuid)
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id)

View File

@ -1,40 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
from nova import utils
IMPL = utils.LazyPluggable(
'db_backend',
config_group='baremetal',
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration')
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
def db_version():
"""Display the current database version."""
return IMPL.db_version()
def db_initial_version():
"""The starting version for the database."""
return IMPL.db_initial_version()

View File

@ -1,328 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import uuid
from oslo.db import exception as db_exc
from oslo.utils import timeutils
import six
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import null
import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.i18n import _
from nova.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models
from nova.virt.baremetal.db.sqlalchemy import session as db_session
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or db_session.get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and nova.context.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _save(ref, session=None):
if not session:
session = db_session.get_session()
# We must not call ref.save() with session=None, otherwise NovaBase
# uses nova-db's session, which cannot access bm-db.
ref.save(session=session)
def _build_node_order_by(query):
query = query.order_by(asc(models.BareMetalNode.memory_mb))
query = query.order_by(asc(models.BareMetalNode.cpus))
query = query.order_by(asc(models.BareMetalNode.local_gb))
return query
@sqlalchemy_api.require_admin_context
def bm_node_get_all(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_associated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid != null())
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_unassociated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid == null())
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
query = query.filter(models.BareMetalNode.instance_uuid == null())
if service_host:
query = query.filter_by(service_host=service_host)
if cpus is not None:
query = query.filter(models.BareMetalNode.cpus >= cpus)
if memory_mb is not None:
query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
if local_gb is not None:
query = query.filter(models.BareMetalNode.local_gb >= local_gb)
query = _build_node_order_by(query)
return query.first()
@sqlalchemy_api.require_admin_context
def bm_node_get(context, bm_node_id):
# bm_node_id may be passed as a string. Convert to INT to improve DB perf.
bm_node_id = int(bm_node_id)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
first()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_instance_uuid(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_node_uuid(context, bm_node_uuid):
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(uuid=bm_node_uuid).\
first()
if not result:
raise exception.NodeNotFoundByUUID(node_uuid=bm_node_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_create(context, values):
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
bm_node_ref = models.BareMetalNode()
bm_node_ref.update(values)
_save(bm_node_ref)
return bm_node_ref
@sqlalchemy_api.require_admin_context
def bm_node_update(context, bm_node_id, values):
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update(values)
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_node_associate_and_update(context, node_uuid, values):
"""Associate an instance to a node safely
Associate an instance to a node only if that node is not yet associated.
Allow the caller to set any other fields they require in the same
operation. For example, this is used to set the node's task_state to
BUILDING at the beginning of driver.spawn().
"""
if 'instance_uuid' not in values:
raise exception.NovaException(_(
"instance_uuid must be supplied to bm_node_associate_and_update"))
session = db_session.get_session()
with session.begin():
query = model_query(context, models.BareMetalNode,
session=session, read_deleted="no").\
filter_by(uuid=node_uuid)
count = query.filter_by(instance_uuid=None).\
update(values, synchronize_session=False)
if count != 1:
raise exception.NovaException(_(
"Failed to associate instance %(i_uuid)s to baremetal node "
"%(n_uuid)s.") % {'i_uuid': values['instance_uuid'],
'n_uuid': node_uuid})
ref = query.first()
return ref
@sqlalchemy_api.require_admin_context
def bm_node_destroy(context, bm_node_id):
# First, delete all interfaces belonging to the node.
# Delete physically since these have unique columns.
session = db_session.get_session()
with session.begin():
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
delete()
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_interface_get(context, if_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(id=if_id).\
first()
if not result:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all(context):
query = model_query(context, models.BareMetalInterface,
read_deleted="no")
return query.all()
@sqlalchemy_api.require_admin_context
def bm_interface_destroy(context, if_id):
# Delete physically since it has unique columns
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(id=if_id).\
delete()
@sqlalchemy_api.require_admin_context
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
ref = models.BareMetalInterface()
ref.bm_node_id = bm_node_id
ref.address = address
ref.datapath_id = datapath_id
ref.port_no = port_no
_save(ref)
return ref.id
@sqlalchemy_api.require_admin_context
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
session = db_session.get_session()
with session.begin():
bm_interface = model_query(context, models.BareMetalInterface,
read_deleted="no", session=session).\
filter_by(id=if_id).\
with_lockmode('update').\
first()
if not bm_interface:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
bm_interface.vif_uuid = vif_uuid
try:
session.add(bm_interface)
session.flush()
except db_exc.DBError as e:
# TODO(deva): clean up when db layer raises DuplicateKeyError
if six.text_type(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
"already in use") % vif_uuid)
raise
@sqlalchemy_api.require_admin_context
def bm_interface_get_by_vif_uuid(context, vif_uuid):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(vif_uuid=vif_uuid).\
first()
if not result:
raise exception.NovaException(_("Baremetal virtual interface %s "
"not found") % vif_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
all()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result

View File

@ -1,20 +0,0 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
repository_id=nova_bm
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
version_table=migrate_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# because you don't have that database installed - it is ignored and the
# commit continues, perhaps ending successfully.
# Databases in this list MUST compile successfully during a commit, or the
# entire commit will fail. List the databases your application will actually
# be using to ensure your updates to that database work properly.
# This must be a list; example: ['postgres','sqlite']
required_dbs=[]

View File

@ -1,113 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Index, Integer, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bm_nodes = Table('bm_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('cpus', Integer),
Column('memory_mb', Integer),
Column('local_gb', Integer),
Column('pm_address', String(length=255)),
Column('pm_user', String(length=255)),
Column('pm_password', String(length=255)),
Column('service_host', String(length=255)),
Column('prov_mac_address', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('registration_status', String(length=16)),
Column('task_state', String(length=255)),
Column('prov_vlan_id', Integer),
Column('terminal_port', Integer),
mysql_engine='InnoDB',
)
bm_interfaces = Table('bm_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('bm_node_id', Integer),
Column('address', String(length=255), unique=True),
Column('datapath_id', String(length=255)),
Column('port_no', Integer),
Column('vif_uuid', String(length=36), unique=True),
mysql_engine='InnoDB',
)
bm_pxe_ips = Table('bm_pxe_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255), unique=True),
Column('bm_node_id', Integer),
Column('server_address', String(length=255), unique=True),
mysql_engine='InnoDB',
)
bm_deployments = Table('bm_deployments', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('bm_node_id', Integer),
Column('key', String(length=255)),
Column('image_path', String(length=255)),
Column('pxe_config_path', String(length=255)),
Column('root_mb', Integer),
Column('swap_mb', Integer),
mysql_engine='InnoDB',
)
bm_nodes.create()
bm_interfaces.create()
bm_pxe_ips.create()
bm_deployments.create()
Index('idx_bm_nodes_service_host_deleted',
bm_nodes.c.service_host, bm_nodes.c.deleted)\
.create(migrate_engine)
Index('idx_bm_nodes_instance_uuid_deleted',
bm_nodes.c.instance_uuid, bm_nodes.c.deleted)\
.create(migrate_engine)
Index('idx_bm_nodes_hmcld',
bm_nodes.c.service_host, bm_nodes.c.memory_mb, bm_nodes.c.cpus,
bm_nodes.c.local_gb, bm_nodes.c.deleted)\
.create(migrate_engine)
Index('idx_bm_interfaces_bm_node_id_deleted',
bm_interfaces.c.bm_node_id, bm_interfaces.c.deleted)\
.create(migrate_engine)
Index('idx_bm_pxe_ips_bm_node_id_deleted',
bm_pxe_ips.c.bm_node_id, bm_pxe_ips.c.deleted)\
.create(migrate_engine)
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from 001_init is unsupported.')

View File

@ -1,66 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Index, MetaData, Table
from sqlalchemy import Integer, String, DateTime, Boolean
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bm_nodes = Table('bm_nodes', meta, autoload=True)
image_path = Column('image_path', String(length=255))
pxe_config_path = Column('pxe_config_path', String(length=255))
deploy_key = Column('deploy_key', String(length=255))
root_mb = Column('root_mb', Integer())
swap_mb = Column('swap_mb', Integer())
for c in [image_path, pxe_config_path, deploy_key, root_mb, swap_mb]:
bm_nodes.create_column(c)
deploy_key_idx = Index('deploy_key_idx', bm_nodes.c.deploy_key)
deploy_key_idx.create(migrate_engine)
bm_deployments = Table('bm_deployments', meta, autoload=True)
bm_deployments.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bm_nodes = Table('bm_nodes', meta, autoload=True)
for c in ['image_path', 'pxe_config_path', 'deploy_key', 'root_mb',
'swap_mb']:
bm_nodes.drop_column(c)
bm_deployments = Table('bm_deployments', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('bm_node_id', Integer),
Column('key', String(length=255)),
Column('image_path', String(length=255)),
Column('pxe_config_path', String(length=255)),
Column('root_mb', Integer),
Column('swap_mb', Integer),
mysql_engine='InnoDB',
)
bm_deployments.create()

View File

@ -1,37 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, String, Table, Index
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
uuid_col = Column('uuid', String(36))
t.create_column(uuid_col)
uuid_ux = Index('uuid_ux', t.c.uuid, unique=True)
uuid_ux.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
t.drop_column('uuid')

View File

@ -1,33 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
name_col = Column('instance_name', String(255))
t.create_column(name_col)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
t.drop_column('instance_name')

View File

@ -1,33 +0,0 @@
# Copyright (c) 2013 NTT DOCOMO, INC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, String, Integer, MetaData, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
nodes.drop_column('prov_vlan_id')
nodes.drop_column('registration_status')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
nodes.create_column(Column('prov_vlan_id', Integer))
nodes.create_column(Column('registration_status', String(length=16)))

View File

@ -1,87 +0,0 @@
# Copyright (c) 2013 NTT DOCOMO, INC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db import exception as db_exc
from sqlalchemy import MetaData, Table, exists
from sqlalchemy import sql
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
ifs = Table('bm_interfaces', meta, autoload=True)
q = sql.select([nodes.c.id, nodes.c.prov_mac_address],
from_obj=nodes)
# Iterate all elements before starting insert since IntegrityError
# may disturb the iteration.
node_address = {}
for node_id, address in q.execute():
node_address[node_id] = address
i = ifs.insert()
for node_id, address in node_address.iteritems():
try:
i.execute({'bm_node_id': node_id, 'address': address})
except db_exc.DBError:
# TODO(ekudryashova): replace by DBReferenceError when db layer
# raise it.
# The address is registered in both bm_nodes and bm_interfaces.
# It is expected.
pass
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
ifs = Table('bm_interfaces', meta, autoload=True)
subq = exists().where(sql.and_(
ifs.c.bm_node_id == nodes.c.id,
ifs.c.address == nodes.c.prov_mac_address))
ifs.delete().where(subq).execute()
# NOTE(arata):
# In fact, this downgrade may not return the db to the previous state.
# It seems to be not so match a problem, so this is just for memo.
#
# Think these two state before upgrading:
#
# (A) address 'x' is duplicate
# bm_nodes.prov_mac_address='x'
# bm_interfaces.address=['x', 'y']
#
# (B) no address is duplicate
# bm_nodes.prov_mac_address='x'
# bm_interfaces.address=['y']
#
# Upgrading them results in the same state:
#
# bm_nodes.prov_mac_address='x'
# bm_interfaces.address=['x', 'y']
#
# Downgrading this results in B, even if the actual initial status was A
# Of course we can change it to downgrade to B, but then we cannot
# downgrade to A; it is an exclusive choice since we do not have
# information about the initial state.

View File

@ -1,34 +0,0 @@
# Copyright (c) 2013 NTT DOCOMO, INC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
nodes.drop_column('prov_mac_address')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
nodes = Table('bm_nodes', meta, autoload=True)
nodes.create_column(Column('prov_mac_address', String(length=255)))
# NOTE(arata): The values held by prov_mac_address are lost in upgrade.
# So downgrade has no other choice but to set the column to NULL.

View File

@ -1,59 +0,0 @@
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
table_name = 'bm_pxe_ips'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
table.drop()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
bm_pxe_ips = Table(table_name, meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255), unique=True),
Column('bm_node_id', Integer),
Column('server_address',
String(length=255), unique=True),
mysql_engine='InnoDB',
)
bm_pxe_ips.create()
Index(
'idx_bm_pxe_ips_bm_node_id_deleted',
bm_pxe_ips.c.bm_node_id,
bm_pxe_ips.c.deleted
).create(migrate_engine)

View File

@ -1,34 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Integer, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
ephemeral_mb_col = Column('ephemeral_mb', Integer)
t.create_column(ephemeral_mb_col)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table('bm_nodes', meta, autoload=True)
t.drop_column('ephemeral_mb')

View File

@ -1,53 +0,0 @@
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, Boolean, Table
from sqlalchemy.sql import expression
COLUMN_NAME = 'preserve_ephemeral'
TABLE_NAME = 'bm_nodes'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table(TABLE_NAME, meta, autoload=True)
default = (expression.text('0') if migrate_engine.name == 'sqlite'
else expression.text('false'))
preserve_ephemeral_col = Column(COLUMN_NAME, Boolean,
server_default=default)
t.create_column(preserve_ephemeral_col)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
t = Table(TABLE_NAME, meta, autoload=True)
# NOTE(rpodolyaka): SQLite doesn't have native BOOLEAN type, so it's
# emulated by adding a CHECK constraint. We must
# explicitly omit that constraint here so we don't
# receive 'no such column' error when dropping the
# column
if migrate_engine.name == 'sqlite':
t.constraints = set([
c
for c in t.constraints
if not (hasattr(c, 'sqltext') and COLUMN_NAME in str(c.sqltext))
])
t.drop_column(COLUMN_NAME)

View File

@ -1,86 +0,0 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
import sqlalchemy
from nova import exception
from nova.i18n import _
from nova.virt.baremetal.db.sqlalchemy import session
INIT_VERSION = 0
_REPOSITORY = None
def db_sync(version=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.NovaException(_("version should be an integer"))
current_version = db_version()
repository = _find_migrate_repo()
if version is None or version > current_version:
return versioning_api.upgrade(session.get_engine(), repository,
version)
else:
return versioning_api.downgrade(session.get_engine(), repository,
version)
def db_version():
repository = _find_migrate_repo()
try:
return versioning_api.db_version(session.get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = session.get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(INIT_VERSION)
return versioning_api.db_version(session.get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.NovaException(
_("Upgrade DB using Essex release first."))
def db_initial_version():
return INIT_VERSION
def db_version_control(version=None):
repository = _find_migrate_repo()
versioning_api.version_control(session.get_engine(), repository, version)
return version
def _find_migrate_repo():
"""Get the path for the migrate repository."""
global _REPOSITORY
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'migrate_repo')
assert os.path.exists(path)
if _REPOSITORY is None:
_REPOSITORY = Repository(path)
return _REPOSITORY

View File

@ -1,67 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for baremetal data.
"""
from sqlalchemy import Column, Boolean, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey, Text
from nova.db.sqlalchemy import models
BASE = declarative_base()
class BareMetalNode(BASE, models.NovaBase):
"""Represents a bare metal node."""
__tablename__ = 'bm_nodes'
id = Column(Integer, primary_key=True)
deleted = Column(Boolean, default=False)
uuid = Column(String(36))
service_host = Column(String(255))
instance_uuid = Column(String(36))
instance_name = Column(String(255))
cpus = Column(Integer)
memory_mb = Column(Integer)
local_gb = Column(Integer)
preserve_ephemeral = Column(Boolean)
pm_address = Column(Text)
pm_user = Column(Text)
pm_password = Column(Text)
task_state = Column(String(255))
terminal_port = Column(Integer)
image_path = Column(String(255))
pxe_config_path = Column(String(255))
deploy_key = Column(String(255))
# root_mb, swap_mb and ephemeral_mb are cached flavor values for the
# current deployment not attributes of the node.
root_mb = Column(Integer)
swap_mb = Column(Integer)
ephemeral_mb = Column(Integer)
class BareMetalInterface(BASE, models.NovaBase):
__tablename__ = 'bm_interfaces'
id = Column(Integer, primary_key=True)
deleted = Column(Boolean, default=False)
bm_node_id = Column(Integer, ForeignKey('bm_nodes.id'))
address = Column(String(255), unique=True)
datapath_id = Column(String(255))
port_no = Column(Integer)
vif_uuid = Column(String(36), unique=True)

View File

@ -1,66 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend."""
from oslo.config import cfg
from oslo.db.sqlalchemy import session as db_session
from nova import paths
opts = [
cfg.StrOpt('sql_connection',
default=('sqlite:///' +
paths.state_path_def('baremetal_nova.sqlite')),
help='The SQLAlchemy connection string used to connect to the '
'bare-metal database'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(CONF.baremetal.sql_connection,
**dict(CONF.database.iteritems()))
return _FACADE
def get_session(autocommit=True, expire_on_commit=False, **kwargs):
"""Return a SQLAlchemy session."""
facade = _create_facade_lazily()
return facade.get_session(autocommit=autocommit,
expire_on_commit=expire_on_commit, **kwargs)
def get_engine():
"""Return a SQLAlchemy engine."""
facade = _create_facade_lazily()
return facade.get_engine()

View File

@ -1,69 +0,0 @@
General Bare-metal Provisioning README
======================================
:Authors:
[USC/ISI] Mikyung Kang <mkkang@isi.edu>, David Kang <dkang@isi.edu>
[NTT DOCOMO] Ken Igarashi <igarashik@nttdocomo.co.jp>
[VirtualTech Japan Inc.] Arata Notsu <notsu@virtualtech.jp>
:Date: 2012-08-02
:Version: 2012.8
:Wiki: http://wiki.openstack.org/GeneralBareMetalProvisioningFramework
Code changes
------------
::
nova/nova/virt/baremetal/*
nova/nova/virt/driver.py
nova/nova/tests/baremetal/*
nova/nova/tests/compute/test_compute.py
nova/nova/compute/manager.py
nova/nova/compute/resource_tracker.py
nova/nova/manager.py
nova/nova/scheduler/driver.py
nova/nova/scheduler/filter_scheduler.py
nova/nova/scheduler/host_manager.py
nova/nova/scheduler/baremetal_host_manager.py
nova/bin/bm_deploy_server
nova/bin/nova-bm-manage
Additional setting for bare-metal provisioning [nova.conf]
----------------------------------------------------------
::
# baremetal database connection
baremetal_sql_connection = mysql://$ID:$Password@$IP/nova_bm
# baremetal compute driver
compute_driver = nova.virt.baremetal.driver.BareMetalDriver
baremetal_driver = {nova.virt.baremetal.tilera.Tilera | nova.virt.baremetal.pxe.PXE}
power_manager = {nova.virt.baremetal.tilera_pdu.Pdu | nova.virt.baremetal.ipmi.Ipmi}
# flavor_extra_specs this baremetal compute
flavor_extra_specs = cpu_arch:{tilepro64 | x86_64 | arm}
# TFTP root
baremetal_tftp_root = /tftpboot
# baremetal scheduler host manager
scheduler_host_manager = nova.scheduler.baremetal_host_manager.BaremetalHostManager
Non-PXE (Tilera) Bare-metal Provisioning
----------------------------------------
1. tilera-bm-instance-creation.rst
2. tilera-bm-installation.rst
PXE Bare-metal Provisioning
---------------------------
1. pxe-bm-instance-creation.rst
2. pxe-bm-installation.rst

View File

@ -1,571 +0,0 @@
# coding=utf-8
#
# Copyright (c) 2012 NTT DOCOMO, INC
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for Bare-metal platform.
"""
from oslo.config import cfg
from oslo.utils import excutils
from oslo.utils import importutils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import hvtype
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import imagecache
LOG = logging.getLogger(__name__)
opts = [
cfg.StrOpt('vif_driver',
default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver',
help='Baremetal VIF driver.'),
cfg.StrOpt('volume_driver',
default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver',
help='Baremetal volume driver.'),
cfg.ListOpt('flavor_extra_specs',
default=[],
help='A list of additional capabilities corresponding to '
'flavor_extra_specs for this compute '
'host to advertise. Valid entries are name=value, pairs '
'For example, "key1:val1, key2:val2"'),
cfg.StrOpt('driver',
default='nova.virt.baremetal.pxe.PXE',
help='Baremetal driver back-end (pxe or tilera)'),
cfg.StrOpt('power_manager',
default='nova.virt.baremetal.ipmi.IPMI',
help='Baremetal power management method'),
cfg.StrOpt('tftp_root',
default='/tftpboot',
help='Baremetal compute node\'s tftp root path'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.NoopFirewallDriver.__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid):
ctx = nova_context.get_admin_context()
node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid)
if node['service_host'] != CONF.host:
LOG.error(_("Request for baremetal node %s "
"sent to wrong service host") % instance_uuid)
raise exception.InstanceNotFound(instance_id=instance_uuid)
return node
def _update_state(context, node, instance, state):
"""Update the node state in baremetal DB
If instance is not supplied, reset the instance_uuid field for this node.
"""
values = {'task_state': state}
if not instance:
values['instance_uuid'] = None
values['instance_name'] = None
db.bm_node_update(context, node['id'], values)
def get_power_manager(**kwargs):
cls = importutils.import_class(CONF.baremetal.power_manager)
return cls(**kwargs)
class BareMetalDriver(driver.ComputeDriver):
"""BareMetal hypervisor driver."""
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
}
def __init__(self, virtapi, read_only=False):
super(BareMetalDriver, self).__init__(virtapi)
self.driver = importutils.import_object(
CONF.baremetal.driver, virtapi)
self.vif_driver = importutils.import_object(
CONF.baremetal.vif_driver)
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER)
self.volume_driver = importutils.import_object(
CONF.baremetal.volume_driver, virtapi)
self.image_cache_manager = imagecache.ImageCacheManager()
extra_specs = {}
extra_specs["baremetal_driver"] = CONF.baremetal.driver
for pair in CONF.baremetal.flavor_extra_specs:
keyval = pair.split(':', 1)
keyval[0] = keyval[0].strip()
keyval[1] = keyval[1].strip()
extra_specs[keyval[0]] = keyval[1]
self.extra_specs = extra_specs
if 'cpu_arch' not in extra_specs:
LOG.info(
_('cpu_arch is not found in flavor_extra_specs'))
self.supported_instances = []
else:
self.supported_instances = [(
arch.canonicalize(extra_specs['cpu_arch']),
hvtype.BAREMETAL,
vm_mode.HVM
), ]
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def init_host(self, host):
LOG.warn(_LW('The baremetal driver is deprecated in Juno and will be '
'removed before the next release. Please plan to '
'transition to Ironic as soon as possible. See '
'https://wiki.openstack.org/wiki/Ironic for more '
'information'))
def get_hypervisor_type(self):
return 'baremetal'
def get_hypervisor_version(self):
# TODO(deva): define the version properly elsewhere
return 1
def list_instances(self):
l = []
context = nova_context.get_admin_context()
for node in db.bm_node_get_associated(context, service_host=CONF.host):
l.append(node['instance_name'])
return l
def _require_node(self, instance):
"""Get a node's uuid out of a manager instance dict.
The compute manager is meant to know the node uuid, so missing uuid
a significant issue - it may mean we've been passed someone elses data.
"""
node_uuid = instance.get('node')
if not node_uuid:
raise exception.NovaException(_(
"Baremetal node id not supplied to driver for %r")
% instance['uuid'])
return node_uuid
def _attach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.attach_volume(None,
connection_info, instance, mountpoint)
def _detach_block_devices(self, instance, block_device_info):
block_device_mapping = driver.\
block_device_info_get_mapping(block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mountpoint = vol['mount_device']
self.detach_volume(
connection_info, instance, mountpoint)
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(
instance, network_info)
self.firewall_driver.prepare_instance_filter(
instance, network_info)
self.firewall_driver.apply_instance_filter(
instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(
instance, network_info)
def deallocate_networks_on_reschedule(self, instance):
return True
def macs_for_instance(self, instance):
context = nova_context.get_admin_context()
node_uuid = self._require_node(instance)
node = db.bm_node_get_by_node_uuid(context, node_uuid)
ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
return set(iface['address'] for iface in ifaces)
def _set_default_ephemeral_device(self, instance):
flavor = flavors.extract_flavor(instance)
if flavor['ephemeral_gb']:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
node_uuid = self._require_node(instance)
self._set_default_ephemeral_device(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
# allocates this node before we begin provisioning it.
node = db.bm_node_associate_and_update(context, node_uuid,
{'instance_uuid': instance['uuid'],
'instance_name': instance['hostname'],
'task_state': baremetal_states.BUILDING,
'preserve_ephemeral': False})
self._spawn(node, context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=block_device_info)
def _spawn(self, node, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
try:
self._plug_vifs(instance, network_info, context=context)
self._attach_block_devices(instance, block_device_info)
self._start_firewall(instance, network_info)
# Caching images is both CPU and I/O expensive. When running many
# machines from a single nova-compute server, deploys of multiple
# machines can easily thrash the nova-compute server - unlike a
# virt hypervisor which is limited by CPU for VMs, baremetal only
# uses CPU and I/O when deploying. By only downloading one image
# at a time we serialise rather than thrashing, which leads to a
# lower average time-to-complete during overload situations, and
# a (relatively) insignificant delay for compute servers which
# have sufficient IOPS to handle multiple concurrent image
# conversions.
with lockutils.lock('nova-baremetal-cache-images', external=True):
self.driver.cache_images(
context, node, instance,
admin_password=admin_password,
image_meta=image_meta,
injected_files=injected_files,
network_info=network_info,
)
self.driver.activate_bootloader(context, node, instance,
network_info=network_info)
# NOTE(deva): ensure node is really off before we turn it on
# fixes bug https://code.launchpad.net/bugs/1178919
self.power_off(instance, node)
self.power_on(context, instance, network_info, block_device_info,
node)
_update_state(context, node, instance, baremetal_states.PREPARED)
self.driver.activate_node(context, node, instance)
_update_state(context, node, instance, baremetal_states.ACTIVE)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Error deploying instance %(instance)s "
"on baremetal node %(node)s.") %
{'instance': instance['uuid'],
'node': node['uuid']})
# Do not set instance=None yet. This prevents another
# spawn() while we are cleaning up.
_update_state(context, node, instance, baremetal_states.ERROR)
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED)
else:
# We no longer need the image since we successfully deployed.
self.driver.destroy_images(context, node, instance)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None, recreate=False,
block_device_info=None, preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: Security context.
:param instance: Instance object.
:param image_meta: Image object returned by nova.image.glance that
defines the image from which to boot this instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param bdms: block-device-mappings to use for rebuild
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
:param recreate: True if instance should be recreated with same disk.
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild.
"""
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = self._require_node(instance)
node = db.bm_node_get_by_node_uuid(context, node_uuid)
db.bm_node_update(
context, node['id'],
{'task_state': baremetal_states.BUILDING,
'preserve_ephemeral': preserve_ephemeral}
)
self._spawn(node, context, instance, image_meta, injected_files,
admin_password, network_info=network_info,
block_device_info=block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
ctx = nova_context.get_admin_context()
pm = get_power_manager(node=node, instance=instance)
state = pm.reboot_node()
if pm.state != baremetal_states.ACTIVE:
raise exception.InstanceRebootFailure(_(
"Baremetal power manager failed to restart node "
"for instance %r") % instance['uuid'])
_update_state(ctx, node, instance, state)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
context = nova_context.get_admin_context()
try:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
except exception.InstanceNotFound:
LOG.warning(_("Destroy called on non-existing instance %s")
% instance['uuid'])
return
try:
self.driver.deactivate_node(context, node, instance)
self.power_off(instance, node)
self.driver.deactivate_bootloader(context, node, instance)
self.driver.destroy_images(context, node, instance)
self._detach_block_devices(instance, block_device_info)
self._stop_firewall(instance, network_info)
self._unplug_vifs(instance, network_info)
_update_state(context, node, None, baremetal_states.DELETED)
except Exception as e:
with excutils.save_and_reraise_exception():
try:
LOG.error(_("Error from baremetal driver "
"during destroy: %s") % e)
_update_state(context, node, instance,
baremetal_states.ERROR)
except Exception:
LOG.error(_("Error while recording destroy failure in "
"baremetal database: %s") % e)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed."""
pass
def power_off(self, instance, timeout=0, retry_interval=0, node=None):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.deactivate_node()
if pm.state != baremetal_states.DELETED:
raise exception.InstancePowerOffFailure(_(
"Baremetal power manager failed to stop node "
"for instance %r") % instance['uuid'])
pm.stop_console()
def power_on(self, context, instance, network_info, block_device_info=None,
node=None):
"""Power on the specified instance."""
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
if pm.state != baremetal_states.ACTIVE:
raise exception.InstancePowerOnFailure(_(
"Baremetal power manager failed to start node "
"for instance %r") % instance['uuid'])
pm.start_console()
def get_volume_connector(self, instance):
return self.volume_driver.get_volume_connector(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self.volume_driver.attach_volume(connection_info,
instance, mountpoint)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self.volume_driver.detach_volume(connection_info,
instance, mountpoint)
def get_info(self, instance):
inst_uuid = instance.get('uuid')
node = _get_baremetal_node_by_instance_uuid(inst_uuid)
pm = get_power_manager(node=node, instance=instance)
# NOTE(deva): Power manager may not be able to determine power state
# in which case it may return "None" here.
ps = pm.is_power_on()
if ps:
pstate = power_state.RUNNING
elif ps is False:
pstate = power_state.SHUTDOWN
else:
pstate = power_state.NOSTATE
return {'state': pstate,
'max_mem': node['memory_mb'],
'mem': node['memory_mb'],
'num_cpu': node['cpus'],
'cpu_time': 0}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
return True
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
return True
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def _node_resource(self, node):
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
vcpus = node['cpus']
memory_mb = node['memory_mb']
local_gb = node['local_gb']
if node['instance_uuid']:
vcpus_used = node['cpus']
memory_mb_used = node['memory_mb']
local_gb_used = node['local_gb']
dic = {'vcpus': vcpus,
'memory_mb': memory_mb,
'local_gb': local_gb,
'vcpus_used': vcpus_used,
'memory_mb_used': memory_mb_used,
'local_gb_used': local_gb_used,
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': str(node['uuid']),
'cpu_info': 'baremetal cpu',
'supported_instances':
jsonutils.dumps(self.supported_instances),
'stats': jsonutils.dumps(self.extra_specs)
}
return dic
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def get_available_resource(self, nodename):
context = nova_context.get_admin_context()
resource = {}
try:
node = db.bm_node_get_by_node_uuid(context, nodename)
resource = self._node_resource(node)
except exception.NodeNotFoundByUUID:
pass
return resource
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
def unfilter_instance(self, instance_ref, network_info):
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def plug_vifs(self, instance, network_info):
"""Plugin VIFs into networks."""
self._plug_vifs(instance, network_info)
def _plug_vifs(self, instance, network_info, context=None):
if not context:
context = nova_context.get_admin_context()
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
if node:
pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id'])
for pif in pifs:
if pif['vif_uuid']:
db.bm_interface_set_vif_uuid(context, pif['id'], None)
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info):
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def get_console_output(self, context, instance):
node = _get_baremetal_node_by_instance_uuid(instance.uuid)
return self.driver.get_console_output(node, instance)
def get_available_nodes(self, refresh=False):
context = nova_context.get_admin_context()
return [str(n['uuid']) for n in
db.bm_node_get_all(context, service_host=CONF.host)]
def dhcp_options_for_instance(self, instance):
return self.driver.dhcp_options_for_instance(instance)

View File

@ -1,82 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.baremetal import base
from nova.virt import firewall
class FakeDriver(base.NodeDriver):
def cache_images(self, context, node, instance, **kwargs):
pass
def destroy_images(self, context, node, instance):
pass
def activate_bootloader(self, context, node, instance, **kwargs):
pass
def deactivate_bootloader(self, context, node, instance):
pass
def activate_node(self, context, node, instance):
"""For operations after power on."""
pass
def deactivate_node(self, context, node, instance):
"""For operations before power off."""
pass
def get_console_output(self, node, instance):
return 'fake\nconsole\noutput for instance %s' % instance.id
class FakePowerManager(base.PowerManager):
def __init__(self, **kwargs):
super(FakePowerManager, self).__init__(**kwargs)
class FakeFirewallDriver(firewall.NoopFirewallDriver):
def __init__(self):
super(FakeFirewallDriver, self).__init__()
class FakeVifDriver(object):
def __init__(self):
super(FakeVifDriver, self).__init__()
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
pass
class FakeVolumeDriver(object):
def __init__(self, virtapi):
super(FakeVolumeDriver, self).__init__()
self.virtapi = virtapi
self._initiator = "fake_initiator"
def attach_volume(self, connection_info, instance, mountpoint):
pass
def detach_volume(self, connection_info, instance, mountpoint):
pass

View File

@ -1,128 +0,0 @@
# Copyright 2013 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# iBoot Power Driver
from oslo.utils import importutils
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
iboot = importutils.try_import('iboot')
LOG = logging.getLogger(__name__)
class IBootManager(base.PowerManager):
"""iBoot Power Driver for Baremetal Nova Compute
This PowerManager class provides a mechanism for controlling power state
via an iBoot capable device (tested with an iBoot G2).
Requires installation of python-iboot:
https://github.com/darkip/python-iboot
"""
def __init__(self, **kwargs):
node = kwargs.pop('node', {})
addr_relay = str(node['pm_address']).split(',')
if len(addr_relay) > 1:
try:
self.relay_id = int(addr_relay[1])
except ValueError:
msg = _("iboot PDU relay ID must be an integer.")
raise exception.InvalidParameterValue(msg)
else:
self.relay_id = 1
addr_port = addr_relay[0].split(':')
self.address = addr_port[0]
if len(addr_port) > 1:
try:
self.port = int(addr_port[1])
except ValueError:
msg = _("iboot PDU port must be an integer.")
raise exception.InvalidParameterValue(msg)
else:
self.port = 9100
self.user = str(node['pm_user'])
self.password = str(node['pm_password'])
instance = kwargs.pop('instance', {})
self.node_name = instance.get('hostname', "")
self.state = None
self.conn = None
def _create_connection(self):
if not self.conn:
self.conn = iboot.iBootInterface(self.address, self.user,
self.password, port=self.port,
num_relays=self.relay_id)
return self.conn
def _switch(self, relay_id, enabled):
return self.conn.switch(relay_id, enabled)
def _get_relay(self, relay_id):
return self.conn.get_relays()[relay_id - 1]
def activate_node(self):
LOG.info(_("activate_node name %s"), self.node_name)
self._create_connection()
self._switch(self.relay_id, True)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def reboot_node(self):
LOG.info(_("reboot_node: %s"), self.node_name)
self._create_connection()
self._switch(self.relay_id, False)
self._switch(self.relay_id, True)
if self.is_power_on():
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
return self.state
def deactivate_node(self):
LOG.info(_("deactivate_node name %s"), self.node_name)
self._create_connection()
if self.is_power_on():
self._switch(self.relay_id, False)
if self.is_power_on():
self.state = baremetal_states.ERROR
else:
self.state = baremetal_states.DELETED
return self.state
def is_power_on(self):
LOG.debug("Checking if %s is running", self.node_name)
self._create_connection()
return self._get_relay(self.relay_id)

View File

@ -1,303 +0,0 @@
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import utils as bm_utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help='Path to baremetal terminal program'),
cfg.StrOpt('terminal_cert_dir',
help='Path to baremetal terminal SSL cert(PEM)'),
cfg.StrOpt('terminal_pid_dir',
default=paths.state_path_def('baremetal/console'),
help='Path to directory stores pidfiles of baremetal_terminal'),
cfg.IntOpt('ipmi_power_retry',
default=10,
help='Maximal number of retries for IPMI operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
def _make_password_file(password):
fd, path = tempfile.mkstemp()
os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR)
with os.fdopen(fd, "w") as f:
# NOTE(r-mibu): Since ipmitool hangs with an empty password file,
# we have to write '\0' if password was empty.
# see https://bugs.launchpad.net/nova/+bug/1237802 for more details
f.write(password or b"\0")
return path
def _get_console_pid_path(node_id):
name = "%s.pid" % node_id
path = os.path.join(CONF.baremetal.terminal_pid_dir, name)
return path
def _get_console_pid(node_id):
pid_path = _get_console_pid_path(node_id)
if os.path.exists(pid_path):
with open(pid_path, 'r') as f:
pid_str = f.read()
try:
return int(pid_str)
except ValueError:
LOG.warn(_("pid file %s does not contain any pid"), pid_path)
return None
class IPMI(base.PowerManager):
"""IPMI Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via IPMI calls. It also provides serial console access
where available.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
def _exec_ipmitool(self, command):
args = ['ipmitool',
'-I',
'lanplus',
'-H',
self.address,
'-U',
self.user,
'-f']
pwfile = _make_password_file(self.password)
try:
args.append(pwfile)
args.extend(command.split(" "))
out, err = utils.execute(*args, attempts=3)
LOG.debug("ipmitool stdout: '%(out)s', stderr: '%(err)s'",
{'out': out, 'err': err})
return out, err
finally:
bm_utils.unlink_without_raise(pwfile)
def _power_on(self):
"""Turn the power to this node ON."""
def _wait_for_power_on():
"""Called at an interval until the node's power is on."""
if self.is_power_on():
self.state = baremetal_states.ACTIVE
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power on failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_on_called:
self._exec_ipmitool("power on")
self.power_on_called = True
except Exception:
LOG.exception(_("IPMI power on failed"))
self.retries = 0
self.power_on_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on)
timer.start(interval=1.0).wait()
def _power_off(self):
"""Turn the power to this node OFF."""
def _wait_for_power_off():
"""Called at an interval until the node's power is off."""
if self.is_power_on() is False:
self.state = baremetal_states.DELETED
raise loopingcall.LoopingCallDone()
if self.retries > CONF.baremetal.ipmi_power_retry:
LOG.error(_("IPMI power off failed after %d tries") % (
CONF.baremetal.ipmi_power_retry))
self.state = baremetal_states.ERROR
raise loopingcall.LoopingCallDone()
try:
self.retries += 1
if not self.power_off_called:
self._exec_ipmitool("power off")
self.power_off_called = True
except Exception:
LOG.exception(_("IPMI power off failed"))
self.retries = 0
self.power_off_called = False
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off)
timer.start(interval=1.0).wait()
def _set_pxe_for_next_boot(self):
try:
self._exec_ipmitool("chassis bootdev pxe options=persistent")
except Exception:
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
"""Turns the power to node ON.
Sets node next-boot to PXE and turns the power on,
waiting up to ipmi_power_retry/2 seconds for confirmation
that the power is on.
:returns: One of baremetal_states.py, representing the new state.
"""
if self.is_power_on() and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node.
Turns the power off, sets next-boot to PXE, and turns the power on.
Each action waits up to ipmi_power_retry/2 seconds for confirmation
that the power state has changed.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF.
Turns the power off, and waits up to ipmi_power_retry/2 seconds
for confirmation that the power is off.
:returns: One of baremetal_states.py, representing the new state.
"""
self._power_off()
return self.state
def is_power_on(self):
"""Check if the power is currently on.
:returns: True if on; False if off; None if unable to determine.
"""
# NOTE(deva): string matching based on
# http://ipmitool.cvs.sourceforge.net/
# viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c
res = self._exec_ipmitool("power status")[0]
if res == ("Chassis Power is on\n"):
return True
elif res == ("Chassis Power is off\n"):
return False
return None
def start_console(self):
if not self.port:
return
args = []
args.append(CONF.baremetal.terminal)
if CONF.baremetal.terminal_cert_dir:
args.append("-c")
args.append(CONF.baremetal.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(self.port))
args.append("--background=%s" % _get_console_pid_path(self.node_id))
args.append("-s")
try:
pwfile = _make_password_file(self.password)
ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \
" -I lanplus -U %(user)s -f %(pwfile)s sol activate" \
% {'uid': os.getuid(),
'gid': os.getgid(),
'address': self.address,
'user': self.user,
'pwfile': pwfile,
}
args.append(ipmi_args)
# Run shellinaboxd without pipes. Otherwise utils.execute() waits
# infinitely since shellinaboxd does not close passed fds.
x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args]
x.append('</dev/null')
x.append('>/dev/null')
x.append('2>&1')
utils.execute(' '.join(x), shell=True)
finally:
bm_utils.unlink_without_raise(pwfile)
def stop_console(self):
console_pid = _get_console_pid(self.node_id)
if console_pid:
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', '-TERM', str(console_pid),
run_as_root=True,
check_exit_code=[0, 99])
bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id))

View File

@ -1,17 +0,0 @@
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
{% for ifc in interfaces -%}
auto {{ ifc.name }}
iface {{ ifc.name }} inet dhcp
{% if use_ipv6 -%}
iface {{ ifc.name }} inet6 dhcp
{%- endif %}
{%- endfor %}

View File

@ -1,27 +0,0 @@
# Injected by Nova on instance boot
#
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
{% for ifc in interfaces -%}
auto {{ ifc.name }}
iface {{ ifc.name }} inet static
address {{ ifc.address }}
netmask {{ ifc.netmask }}
gateway {{ ifc.gateway }}
{%- if ifc.dns %}
dns-nameservers {{ ifc.dns }}
{%- endif %}
{% if use_ipv6 -%}
iface {{ ifc.name }} inet6 static
address {{ ifc.address_v6 }}
netmask {{ ifc.netmask_v6 }}
gateway {{ ifc.gateway_v6 }}
{%- endif %}
{%- endfor %}

View File

@ -1,503 +0,0 @@
# Copyright 2012,2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for PXE bare-metal nodes.
"""
import datetime
import os
import jinja2
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.utils import timeutils
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
cfg.BoolOpt('use_file_injection',
help='If True, enable file injection for network info, '
'files and admin password',
default=False),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.BoolOpt('pxe_network_config',
help='If set, pass the network configuration details to the '
'initramfs via cmdline.',
default=False),
cfg.StrOpt('pxe_bootfile_name',
help='This gets passed to Neutron as the bootfile dhcp '
'parameter.',
default='pxelinux.0'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
CONF.import_opt('use_ipv6', 'nova.netconf')
def build_pxe_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
template = None
if not CONF.use_ipv6:
template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
else:
template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
"[%(netmask_v6)s]::%(name)s:off")
net_config = [template % iface for iface in interfaces]
return ' '.join(net_config)
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug("Building PXE config for deployment %s.", deployment_id)
network_config = None
if network_info and CONF.baremetal.pxe_network_config:
network_config = build_pxe_network_config(network_info)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
'pxe_network_config': network_config,
}
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.pxe_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '${ROOT}'})
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_deploy_aki_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel)
def get_deploy_ari_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
flavor = flavors.extract_flavor(instance)
root_mb = flavor['root_gb'] * 1024
swap_mb = flavor['swap']
ephemeral_mb = flavor['ephemeral_gb'] * 1024
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb, ephemeral_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance, flavor):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
flavor['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor)
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver."""
def __init__(self, virtapi):
super(PXE, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug("Fetching kernel and ramdisk for instance %s",
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug("Fetching image %(ami)s for instance %(name)s",
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id'],
clean=True,
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we don't modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug("Injecting files into image for instance %(name)s",
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=utils.instance_meta(instance),
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, flavor)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
if CONF.baremetal.use_file_injection:
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def dhcp_options_for_instance(self, instance):
return [{'opt_name': 'bootfile-name',
'opt_value': CONF.baremetal.pxe_bootfile_name},
{'opt_name': 'server-ip-address',
'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server',
'opt_value': CONF.my_ip}
]
def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
image_info = get_tftp_image_info(instance, flavor)
(root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': pxe_config_file_path,
'root_mb': root_mb,
'swap_mb': swap_mb,
'ephemeral_mb': ephemeral_mb})
pxe_config = build_pxe_config(
node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
bm_utils.unlink_without_raise(mac_path)
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
# NOTE(danms): the flavor extra_specs do not need to be
# present/correct at deactivate time, so pass something empty
# to avoid an extra lookup
flavor = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
try:
image_info = get_tftp_image_info(instance, flavor)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
else:
for mac in macs:
bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete."""
locals = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
raise loopingcall.LoopingCallDone()
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING
and locals['started'] is False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid'])
raise loopingcall.LoopingCallDone()
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if (CONF.baremetal.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if locals['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.baremetal.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass

View File

@ -1,11 +0,0 @@
default deploy
label deploy
kernel {{ pxe_options.deployment_aki_path }}
append initrd={{ pxe_options.deployment_ari_path }} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn={{ pxe_options.deployment_iscsi_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} troubleshoot=0 {{ pxe_options.pxe_append_params|default("", true) }}
ipappend 3
label boot
kernel {{ pxe_options.aki_path }}
append initrd={{ pxe_options.ari_path }} root={{ ROOT }} ro {{ pxe_options.pxe_append_params|default("", true) }} {{ pxe_options.pxe_network_config|default("", true) }}

View File

@ -1,351 +0,0 @@
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for Tilera bare-metal nodes.
"""
import base64
import os
import jinja2
from oslo.config import cfg
from oslo.db import exception as db_exc
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('net_config_template', 'nova.virt.baremetal.pxe',
group='baremetal')
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance):
flavor = flavors.extract_flavor(instance)
root_mb = flavor['root_gb'] * 1024
swap_mb = flavor['swap']
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_tftp_image_info(instance):
"""Generate the paths for tftp files for this instance.
Raises NovaException if
- instance does not contain kernel_id
"""
image_info = {
'kernel': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate Tilera bootloader. "
"The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class Tilera(base.NodeDriver):
"""Tilera bare metal driver."""
def __init__(self, virtapi):
super(Tilera, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug("Fetching kernel and ramdisk for instance %s",
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug("Fetching image %(ami)s for instance %(name)s",
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id'],
clean=True,
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug("Injecting files into image for instance %(name)s",
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=utils.instance_meta(instance),
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance, network_info):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this::
/tftpboot/
./{uuid}/
kernel
./fs_node_id/
"""
get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
tilera_nfs_path = get_tilera_nfs_path(node['id'])
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': tilera_nfs_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
if os.path.exists(image_file_path) and \
os.path.exists(tilera_nfs_path):
utils.execute('mount', '-o', 'loop', image_file_path,
tilera_nfs_path, run_as_root=True)
def deactivate_bootloader(self, context, node, instance):
"""Delete Tilera bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
tilera_nfs_path = get_tilera_nfs_path(node['id'])
if os.path.ismount(tilera_nfs_path):
utils.execute('rpc.mountd', run_as_root=True)
utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
try:
self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
if os.path.exists(os.path.join(CONF.baremetal.tftp_root,
instance['uuid'])):
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data):
"""Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
rule_path = CONF.baremetal.tftp_root + "/iptables_rule"
if user_data is not None:
open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip)
def activate_node(self, context, node, instance):
"""Wait for Tilera deployment to complete."""
locals = {'error': '', 'started': False}
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and
locals['started'] is False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.BUILDING,
baremetal_states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid'])
node_ip = node['pm_address']
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass

View File

@ -1,169 +0,0 @@
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal PDU power manager.
"""
import time
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
opts = [
cfg.StrOpt('tile_pdu_ip',
default='10.0.100.1',
help='IP address of tilera pdu'),
cfg.StrOpt('tile_pdu_mgr',
default='/tftpboot/pdu_mgr',
help='Management script for tilera pdu'),
cfg.IntOpt('tile_pdu_off',
default=2,
help='Power status of tilera PDU is OFF'),
cfg.IntOpt('tile_pdu_on',
default=1,
help='Power status of tilera PDU is ON'),
cfg.IntOpt('tile_pdu_status',
default=9,
help='Power status of tilera PDU'),
cfg.IntOpt('tile_power_wait',
default=9,
help='Wait time in seconds until check the result '
'after tilera power operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
class Pdu(base.PowerManager):
"""PDU Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via PDU calls.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to PDU"))
if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to PDU"))
if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to PDU"))
if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to PDU"))
def _exec_pdutool(self, mode):
"""Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if mode == CONF.baremetal.tile_pdu_status:
try:
utils.execute('ping', '-c1', self.address,
check_exit_code=True)
return CONF.baremetal.tile_pdu_on
except processutils.ProcessExecutionError:
return CONF.baremetal.tile_pdu_off
else:
try:
utils.execute(CONF.baremetal.tile_pdu_mgr,
CONF.baremetal.tile_pdu_ip, mode)
time.sleep(CONF.baremetal.tile_power_wait)
return mode
except processutils.ProcessExecutionError:
LOG.exception(_("PDU failed"))
def _is_power(self, state):
out_err = self._exec_pdutool(CONF.baremetal.tile_pdu_status)
return out_err == state
def _power_on(self):
"""Turn the power to this node ON."""
try:
self._exec_pdutool(CONF.baremetal.tile_pdu_on)
if self._is_power(CONF.baremetal.tile_pdu_on):
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
except Exception:
self.state = baremetal_states.ERROR
LOG.exception(_("PDU power on failed"))
def _power_off(self):
"""Turn the power to this node OFF."""
try:
self._exec_pdutool(CONF.baremetal.tile_pdu_off)
if self._is_power(CONF.baremetal.tile_pdu_off):
self.state = baremetal_states.DELETED
else:
self.state = baremetal_states.ERROR
except Exception:
self.state = baremetal_states.ERROR
LOG.exception(_("PDU power off failed"))
def activate_node(self):
"""Turns the power to node ON."""
if (self._is_power(CONF.baremetal.tile_pdu_on)
and self.state == baremetal_states.ACTIVE):
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node."""
self._power_off()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
def is_power_on(self):
return self._is_power(CONF.baremetal.tile_pdu_on)

View File

@ -1,132 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import shutil
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.disk import api as disk_api
from nova.virt import images
LOG = logging.getLogger(__name__)
def cache_image(context, target, image_id, user_id, project_id, clean=False):
if clean and os.path.exists(target):
os.unlink(target)
if not os.path.exists(target):
images.fetch_to_raw(context, image_id, target, user_id, project_id)
def inject_into_image(image, key, net, metadata, admin_password, files,
partition, use_cow=False):
try:
if os.path.exists(image):
disk_api.inject_data(image, key, net, metadata, admin_password,
files, partition, use_cow)
else:
LOG.warning(_('Image %s not found on disk storage. '
'Continue without injecting data'), image)
except Exception as e:
LOG.warn(_("Failed to inject data into image %(image)s. "
"Error: %(e)s"), {'image': image, 'e': e})
def unlink_without_raise(path):
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
LOG.warn(_("Failed to unlink %(path)s, error: %(e)s"),
{'path': path, 'e': e})
def rmtree_without_raise(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
except OSError as e:
LOG.warn(_("Failed to remove dir %(path)s, error: %(e)s"),
{'path': path, 'e': e})
def write_to_file(path, contents):
with open(path, 'w') as f:
f.write(contents)
def create_link_without_raise(source, link):
try:
os.symlink(source, link)
except OSError as e:
if e.errno == errno.EEXIST:
return
else:
LOG.warn(_("Failed to create symlink from %(source)s to %(link)s"
", error: %(e)s"),
{'source': source, 'link': link, 'e': e})
def random_alnum(count):
import random
import string
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(count))
def map_network_interfaces(network_info, use_ipv6=False):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption about ordering
if not isinstance(network_info, list):
network_info = [network_info]
interfaces = []
for id, vif in enumerate(network_info):
address_v6 = gateway_v6 = netmask_v6 = None
address_v4 = gateway_v4 = netmask_v4 = dns_v4 = None
if use_ipv6:
subnets_v6 = [s for s in vif['network']['subnets']
if s['version'] == 6]
if len(subnets_v6):
address_v6 = subnets_v6[0]['ips'][0]['address']
netmask_v6 = subnets_v6[0].as_netaddr()._prefixlen
gateway_v6 = subnets_v6[0]['gateway']['address']
subnets_v4 = [s for s in vif['network']['subnets']
if s['version'] == 4]
if len(subnets_v4):
address_v4 = subnets_v4[0]['ips'][0]['address']
netmask_v4 = subnets_v4[0].as_netaddr().netmask
gateway_v4 = subnets_v4[0]['gateway']['address']
dns_v4 = ' '.join([x['address'] for x in subnets_v4[0]['dns']])
interface = {
'name': 'eth%d' % id,
'address': address_v4,
'gateway': gateway_v4,
'netmask': netmask_v4,
'dns': dns_v4,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
return interfaces

Some files were not shown because too many files have changed in this diff Show More