Adds Tilera back-end for baremetal

blueprint add-tilera-to-baremetal.

The baremetal driver is a hypervisor driver for Openstack Nova
Compute. Within the Openstack framework, it has the same role
as the drivers for other hypervisors (libvirt, xen, etc). With
this patch set of tilera-backend, provisioning and management
of non-PXE tilera physical hardware is accomplished using
common cloud APIs and tools.

Change-Id: I356c06a07ace463779e3b84836d5000331e24814
Co-authored-by: Mikyung Kang <mkkang@isi.edu>
Co-authored-by: David Kang <dkang@isi.edu>
Co-authored-by: Ken Igarashi <igarashik@nttdocomo.co.jp>
Co-authored-by: Arata Notsu <notsu@virtualtech.jp>
Co-authored-by: Devananda van der Veen <devananda.vdv@gmail.com>
This commit is contained in:
Mikyung Kang 2013-03-26 17:58:04 -04:00
parent 123826e516
commit a27b7f99fe
5 changed files with 1113 additions and 1 deletions

View File

@ -0,0 +1,397 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for baremetal tilera driver."""
import os
from oslo.config import cfg
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.tests.baremetal.db import base as bm_db_base
from nova.tests.baremetal.db import utils as bm_db_utils
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import tilera
from nova.virt.baremetal import utils as bm_utils
from nova.virt.disk import api as disk_api
from nova.virt import fake as fake_virt
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.tilera.Tilera',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalTileraTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalTileraTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = tilera.Tilera(fake_virt.FakeVirtAPI())
fake_image.stub_out_image_service(self.stubs)
self.addCleanup(fake_image.FakeImageService_reset)
self.context = utils.get_test_admin_context()
self.test_block_device_info = None,
self.instance = utils.get_test_instance()
self.test_network_info = utils.get_test_network_info(),
self.node_info = bm_db_utils.new_bm_node(
service_host='test_host',
cpus=4,
memory_mb=2048,
prov_mac_address='11:11:11:11:11:11',
)
self.nic_info = [
{'address': '22:22:22:22:22:22', 'datapath_id': '0x1',
'port_no': 1},
{'address': '33:33:33:33:33:33', 'datapath_id': '0x2',
'port_no': 2},
]
def _create_node(self):
self.node = db.bm_node_create(self.context, self.node_info)
for nic in self.nic_info:
db.bm_interface_create(
self.context,
self.node['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
self.instance['node'] = self.node['id']
self.spawn_params = dict(
admin_password='test_pass',
block_device_info=self.test_block_device_info,
context=self.context,
image_meta=utils.get_test_image_info(None,
self.instance),
injected_files=[('/fake/path', 'hello world')],
instance=self.instance,
network_info=self.test_network_info,
)
class TileraClassMethodsTestCase(BareMetalTileraTestCase):
def test_build_network_config(self):
net = utils.get_test_network_info(1)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertNotIn('eth1', config)
net = utils.get_test_network_info(2)
config = tilera.build_network_config(net)
self.assertIn('eth0', config)
self.assertIn('eth1', config)
def test_build_network_config_dhcp(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet dhcp', config)
self.assertNotIn('address 1.2.3.4', config)
def test_build_network_config_static(self):
self.flags(
net_config_template='$pybasedir/nova/virt/baremetal/'
'net-static.ubuntu.template',
group='baremetal',
)
net = utils.get_test_network_info()
net[0][1]['ips'][0]['ip'] = '1.2.3.4'
config = tilera.build_network_config(net)
self.assertIn('iface eth0 inet static', config)
self.assertIn('address 1.2.3.4', config)
def test_image_dir_path(self):
self.assertEqual(
tilera.get_image_dir_path(self.instance),
os.path.join(CONF.instances_path, 'instance-00000001'))
def test_image_file_path(self):
self.assertEqual(
tilera.get_image_file_path(self.instance),
os.path.join(
CONF.instances_path, 'instance-00000001', 'disk'))
def test_tilera_nfs_path(self):
self._create_node()
self.node['id'] = '123'
tilera_nfs_dir = "fs_" + self.node['id']
self.assertEqual(
tilera.get_tilera_nfs_path(self.node['id']),
os.path.join(CONF.baremetal.tftp_root,
tilera_nfs_dir))
def test_get_partition_sizes(self):
# default "kinda.big" instance
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1024)
def test_swap_not_zero(self):
# override swap to 0
instance_type = utils.get_test_instance_type(self.context)
instance_type['swap'] = 0
self.instance = utils.get_test_instance(self.context, instance_type)
sizes = tilera.get_partition_sizes(self.instance)
self.assertEqual(sizes[0], 40960)
self.assertEqual(sizes[1], 1)
def test_get_tftp_image_info(self):
# Tilera case needs only kernel_id.
self.instance['kernel_id'] = 'aaaa'
self.instance['uuid'] = 'fake-uuid'
# Here, we confirm both that kernel_id was set
# and that the proper paths are getting set for all of them
base = os.path.join(CONF.baremetal.tftp_root, self.instance['uuid'])
res = tilera.get_tftp_image_info(self.instance)
expected = {
'kernel': ['aaaa', os.path.join(base, 'kernel')],
}
self.assertEqual(res, expected)
class TileraPrivateMethodsTestCase(BareMetalTileraTestCase):
def test_collect_mac_addresses(self):
self._create_node()
address_list = [nic['address'] for nic in self.nic_info]
address_list.append(self.node_info['prov_mac_address'])
address_list.sort()
macs = self.driver._collect_mac_addresses(self.context, self.node)
self.assertEqual(macs, address_list)
def test_cache_tftp_images(self):
self.instance['kernel_id'] = 'aaaa'
image_info = tilera.get_tftp_image_info(self.instance)
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(os.path.join(CONF.baremetal.tftp_root,
self.instance['uuid'])).AndReturn(True)
for uuid, path in [image_info[label] for label in image_info]:
os.path.exists(path).AndReturn(True)
self.mox.ReplayAll()
self.driver._cache_tftp_images(
self.context, self.instance, image_info)
self.mox.VerifyAll()
def test_cache_image(self):
self.mox.StubOutWithMock(os, 'makedirs')
self.mox.StubOutWithMock(os.path, 'exists')
os.makedirs(tilera.get_image_dir_path(self.instance)).\
AndReturn(True)
os.path.exists(tilera.get_image_file_path(self.instance)).\
AndReturn(True)
self.mox.ReplayAll()
image_meta = utils.get_test_image_info(
self.context, self.instance)
self.driver._cache_image(
self.context, self.instance, image_meta)
self.mox.VerifyAll()
def test_inject_into_image(self):
self._create_node()
files = []
self.instance['hostname'] = 'fake hostname'
files.append(('/etc/hostname', 'fake hostname'))
self.instance['key_data'] = 'fake ssh key'
net_info = utils.get_test_network_info(1)
net = tilera.build_network_config(net_info)
admin_password = 'fake password'
self.mox.StubOutWithMock(disk_api, 'inject_data')
disk_api.inject_data(
admin_password=admin_password,
image=tilera.get_image_file_path(self.instance),
key='fake ssh key',
metadata=None,
partition=None,
net=net,
files=files,
).AndReturn(True)
self.mox.ReplayAll()
self.driver._inject_into_image(
self.context, self.node, self.instance,
network_info=net_info,
admin_password=admin_password,
injected_files=None)
self.mox.VerifyAll()
class TileraPublicMethodsTestCase(BareMetalTileraTestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(tilera, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
tilera.get_tftp_image_info(self.instance).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
self.driver._inject_into_image(self.context, self.node, self.instance,
self.test_network_info, None, '')
self.mox.ReplayAll()
self.driver.cache_images(
self.context, self.node, self.instance,
admin_password='',
image_meta=[],
injected_files=None,
network_info=self.test_network_info,
)
self.mox.VerifyAll()
def test_destroy_images(self):
self._create_node()
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
bm_utils.unlink_without_raise(tilera.get_image_file_path(
self.instance))
bm_utils.rmtree_without_raise(tilera.get_image_dir_path(self.instance))
self.mox.ReplayAll()
self.driver.destroy_images(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_bootloader_passes_details(self):
self._create_node()
macs = [nic['address'] for nic in self.nic_info]
macs.append(self.node_info['prov_mac_address'])
macs.sort()
image_info = {
'kernel': [None, 'cccc'],
}
self.instance['uuid'] = 'fake-uuid'
iqn = "iqn-%s" % self.instance['uuid']
tilera_config = 'this is a fake tilera config'
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(tilera, 'get_partition_sizes')
tilera.get_tftp_image_info(self.instance).AndReturn(image_info)
tilera.get_partition_sizes(self.instance).AndReturn((0, 0))
self.mox.ReplayAll()
self.driver.activate_bootloader(self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_and_deactivate_bootloader(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
tilera_path = tilera.get_tilera_nfs_path(self.instance)
image_path = tilera.get_image_file_path(self.instance)
self.mox.ReplayAll()
# activate and deactivate the bootloader
# and check the deployment task_state in the database
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.driver.activate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is not None)
self.driver.deactivate_bootloader(self.context, self.node,
self.instance)
row = db.bm_node_get(self.context, 1)
self.assertTrue(row['deploy_key'] is None)
self.mox.VerifyAll()
def test_deactivate_bootloader_for_nonexistent_instance(self):
self._create_node()
self.node['id'] = 'fake-node-id'
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
self.mox.StubOutWithMock(tilera, 'get_tftp_image_info')
self.mox.StubOutWithMock(self.driver, '_collect_mac_addresses')
tilera_path = tilera.get_tilera_nfs_path(self.node['id'])
tilera.get_tftp_image_info(self.instance).\
AndRaise(exception.NovaException)
self.driver._collect_mac_addresses(self.context, self.node).\
AndRaise(db_exc.DBError)
self.mox.ReplayAll()
self.driver.deactivate_bootloader(
self.context, self.node, self.instance)
self.mox.VerifyAll()
def test_activate_node(self):
self._create_node()
self.instance['uuid'] = 'fake-uuid'
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYING,
'instance_uuid': 'fake-uuid'})
# test DEPLOYDONE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.DEPLOYDONE})
self.driver.activate_node(self.context, self.node, self.instance)
# test no deploy -- state is just ACTIVE
db.bm_node_update(self.context, 1,
{'task_state': baremetal_states.ACTIVE})
self.driver.activate_node(self.context, self.node, self.instance)
# test node gone
db.bm_node_destroy(self.context, 1)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.activate_node,
self.context, self.node, self.instance)

View File

@ -0,0 +1,141 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for baremetal PDU power manager."""
from oslo.config import cfg
from nova import test
from nova.tests.baremetal.db import utils as bm_db_utils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import tilera_pdu
from nova.virt.baremetal import utils as bm_utils
CONF = cfg.CONF
class BareMetalPduTestCase(test.TestCase):
def setUp(self):
super(BareMetalPduTestCase, self).setUp()
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='fake-address',
pm_user='fake-user',
pm_password='fake-password')
self.tilera_pdu = tilera_pdu.Pdu(self.node)
self.tile_pdu_on = 1
self.tile_pdu_off = 2
self.tile_pdu_status = 9
def test_construct(self):
self.assertEqual(self.tilera_pdu.node_id, 123)
self.assertEqual(self.tilera_pdu.address, 'fake-address')
self.assertEqual(self.tilera_pdu.user, 'fake-user')
self.assertEqual(self.tilera_pdu.password, 'fake-password')
def test_exec_pdutool(self):
self.flags(tile_pdu_mgr='fake-pdu-mgr', group='baremetal')
self.flags(tile_pdu_ip='fake-address', group='baremetal')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
args = [
'fake-pdu-mgr',
'fake-address',
self.tile_pdu_on,
]
utils.execute(*args).AndReturn('')
self.mox.ReplayAll()
self.tilera_pdu._exec_pdutool(self.tile_pdu_on)
self.mox.VerifyAll()
def test_is_power(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu._is_power(self.tile_pdu_on)
self.mox.VerifyAll()
def test_power_already_on(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ACTIVE)
def test_power_on_ok(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_on)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ACTIVE)
def test_power_on_fail(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ERROR)
def test_power_on_max_retries(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_on).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.DELETED
self.tilera_pdu._power_on()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.ERROR)
def test_power_off_ok(self):
self.mox.StubOutWithMock(self.tilera_pdu, '_exec_pdutool')
self.tilera_pdu._exec_pdutool(self.tile_pdu_off).AndReturn(None)
self.tilera_pdu._exec_pdutool(self.tile_pdu_status).AndReturn(
self.tile_pdu_off)
self.mox.ReplayAll()
self.tilera_pdu.state = baremetal_states.ACTIVE
self.tilera_pdu._power_off()
self.mox.VerifyAll()
self.assertEqual(self.tilera_pdu.state, baremetal_states.DELETED)

View File

@ -40,7 +40,7 @@ Additional setting for bare-metal provisioning [nova.conf]
# baremetal compute driver
compute_driver = nova.virt.baremetal.driver.BareMetalDriver
baremetal_driver = {nova.virt.baremetal.tilera.TILERA | nova.virt.baremetal.pxe.PXE}
baremetal_driver = {nova.virt.baremetal.tilera.Tilera | nova.virt.baremetal.pxe.PXE}
power_manager = {nova.virt.baremetal.tilera_pdu.Pdu | nova.virt.baremetal.ipmi.Ipmi}
# instance_type_extra_specs this baremetal compute

399
nova/virt/baremetal/tilera.py Executable file
View File

@ -0,0 +1,399 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for Tilera bare-metal nodes.
"""
import base64
import os
from oslo.config import cfg
from nova.compute import instance_types
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
tilera_opts = [
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(tilera_opts, baremetal_group)
CONF.import_opt('use_ipv6', 'nova.netconf')
CHEETAH = None
def _get_cheetah():
global CHEETAH
if CHEETAH is None:
from Cheetah import Template
CHEETAH = Template.Template
return CHEETAH
def build_network_config(network_info):
try:
assert isinstance(network_info, list)
except AssertionError:
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if CONF.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
searchList=[
{'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6,
}
]))
return network_config
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_tilera_nfs_path(node_id):
"""Generate the path for an instances Tilera nfs."""
tilera_nfs_dir = "fs_" + str(node_id)
return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir)
def get_partition_sizes(instance):
instance_type = instance_types.extract_instance_type(instance)
root_mb = instance_type['root_gb'] * 1024
swap_mb = instance_type['swap']
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_tftp_image_info(instance):
"""
Generate the paths for tftp files for this instance.
Raises NovaException if
- instance does not contain kernel_id
"""
image_info = {
'kernel': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
except KeyError as e:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate Tilera bootloader. "
"The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class Tilera(base.NodeDriver):
"""Tilera bare metal driver."""
def __init__(self, virtapi):
super(Tilera, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
macs.add(db.bm_node_get(context, node['id'])['prov_mac_address'])
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for Tilera booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
tftp_image_info = get_tftp_image_info(instance)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance):
"""Configure Tilera boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
./fs_node_id/
"""
image_info = get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
tilera_nfs_path = get_tilera_nfs_path(node['id'])
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': tilera_nfs_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
if os.path.exists(image_file_path) and \
os.path.exists(tilera_nfs_path):
utils.execute('mount', '-o', 'loop', image_file_path,
tilera_nfs_path, run_as_root=True)
def deactivate_bootloader(self, context, node, instance):
"""Delete Tilera bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
tilera_nfs_path = get_tilera_nfs_path(node['id'])
if os.path.ismount(tilera_nfs_path):
utils.execute('rpc.mountd', run_as_root=True)
utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)
try:
image_info = get_tftp_image_info(instance)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
if os.path.exists(os.path.join(CONF.baremetal.tftp_root,
instance['uuid'])):
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def _iptables_set(self, node_ip, user_data):
"""
Sets security setting (iptables:port) if needed.
iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP
/tftpboot/iptables_rule script sets iptables rule on the given node.
"""
rule_path = CONF.baremetal.tftp_root + "/iptables_rule"
if user_data is not None:
open_ip = base64.b64decode(user_data)
utils.execute(rule_path, node_ip, open_ip)
def activate_node(self, context, node, instance):
"""Wait for Tilera deployment to complete."""
locals = {'error': '', 'started': False}
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and
locals['started'] == False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.BUILDING,
baremetal_states.ACTIVE):
LOG.info(_("Tilera deploy completed for instance %s")
% instance['uuid'])
node_ip = node['pm_address']
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
except Exception as ex:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("Tilera deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass

175
nova/virt/baremetal/tilera_pdu.py Executable file
View File

@ -0,0 +1,175 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright (c) 2011-2013 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Baremetal PDU power manager.
"""
import time
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
opts = [
cfg.StrOpt('tile_pdu_ip',
default='10.0.100.1',
help='ip address of tilera pdu'),
cfg.StrOpt('tile_pdu_mgr',
default='/tftpboot/pdu_mgr',
help='management script for tilera pdu'),
cfg.IntOpt('tile_pdu_off',
default=2,
help='power status of tilera PDU is OFF'),
cfg.IntOpt('tile_pdu_on',
default=1,
help='power status of tilera PDU is ON'),
cfg.IntOpt('tile_pdu_status',
default=9,
help='power status of tilera PDU'),
cfg.IntOpt('tile_power_wait',
default=9,
help='wait time in seconds until check the result '
'after tilera power operations'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
LOG = logging.getLogger(__name__)
class Pdu(base.PowerManager):
"""PDU Power Driver for Baremetal Nova Compute
This PowerManager class provides mechanism for controlling the power state
of physical hardware via PDU calls.
"""
def __init__(self, node, **kwargs):
self.state = None
self.retries = None
self.node_id = node['id']
self.address = node['pm_address']
self.user = node['pm_user']
self.password = node['pm_password']
self.port = node['terminal_port']
if self.node_id == None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to PDU"))
if self.address == None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to PDU"))
if self.user == None:
raise exception.InvalidParameterValue(_("User not supplied "
"to PDU"))
if self.password == None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to PDU"))
def _exec_pdutool(self, mode):
"""
Changes power state of the given node.
According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be
changed. /tftpboot/pdu_mgr script handles power management of
PDU (Power Distribution Unit).
"""
if mode == CONF.baremetal.tile_pdu_status:
try:
utils.execute('ping', '-c1', self.address,
check_exit_code=True)
return CONF.baremetal.tile_pdu_on
except exception.ProcessExecutionError:
return CONF.baremetal.tile_pdu_off
else:
try:
out = utils.execute(CONF.baremetal.tile_pdu_mgr,
CONF.baremetal.tile_pdu_ip, mode)
time.sleep(CONF.baremetal.tile_power_wait)
return mode
except exception.ProcessExecutionError:
LOG.exception(_("PDU failed"))
def _is_power(self, state):
out_err = self._exec_pdutool(CONF.baremetal.tile_pdu_status)
return out_err == state
def _power_on(self):
"""Turn the power to this node ON."""
try:
self._exec_pdutool(CONF.baremetal.tile_pdu_on)
if self._is_power(CONF.baremetal.tile_pdu_on):
self.state = baremetal_states.ACTIVE
else:
self.state = baremetal_states.ERROR
except Exception:
self.state = baremetal_states.ERROR
LOG.exception(_("PDU power on failed"))
def _power_off(self):
"""Turn the power to this node OFF."""
try:
self._exec_pdutool(CONF.baremetal.tile_pdu_off)
if self._is_power(CONF.baremetal.tile_pdu_off):
self.state = baremetal_states.DELETED
else:
self.state = baremetal_states.ERROR
except Exception:
self.state = baremetal_states.ERROR
LOG.exception(_("PDU power off failed"))
def activate_node(self):
"""Turns the power to node ON."""
if (self._is_power(CONF.baremetal.tile_pdu_on)
and self.state == baremetal_states.ACTIVE):
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
self._power_on()
return self.state
def reboot_node(self):
"""Cycles the power to a node."""
self._power_off()
self._power_on()
return self.state
def deactivate_node(self):
"""Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
def is_power_on(self):
return self._is_power(CONF.baremetal.tile_pdu_on)
def start_console(self):
pass
def stop_console(self):
pass