Deprecate Ironic compute driver and sched manager

Updates the Ironic compute driver and scheduler manager so that they
subclass the now in-Nova-tree driver classes. This should de-duplicate
code maintenance across Nova and Ironic and also provide end users
an easier upgrade path (with Deprecation log messages).

This patch also removes the old Ironic compute driver tests and classes
since those now live in Nova.

This patch also removes a line from tox.ini which was copying the second
set of unit test outputs; those no longer exist, since we are no longer
running unit tests on nova code in this tree.

Co-Authored-By: Dan Prince <dprince@redhat.com>
Co-Authored-By: Devananda van der Veen <devananda.vdv@gmail.com>

Change-Id: Ic194f779eca470e4a4dd8a3fbd31ca5a6f6b4f7e
This commit is contained in:
Michael Davies 2014-09-08 19:44:50 +00:00 committed by Devananda van der Veen
parent 95d1262450
commit 93edd4f380
23 changed files with 28 additions and 3629 deletions

View File

@ -1,42 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import ironic.nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
class BaremetalNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
pass
class BaremetalHostManager(bbhm.BaseBaremetalHostManager):
"""Bare-Metal HostManager class."""
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return BaremetalNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)

View File

@ -1,57 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
from nova.openstack.common import jsonutils
from nova.scheduler import host_manager
class BaseBaremetalNodeState(host_manager.HostState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
self.vcpus_total = compute['vcpus']
self.vcpus_used = compute['vcpus_used']
self.free_ram_mb = compute['free_ram_mb']
self.total_usable_ram_mb = compute['memory_mb']
self.free_disk_mb = compute['free_disk_gb'] * 1024
stats = compute.get('stats', '{}')
self.stats = jsonutils.loads(stats)
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_used = self.vcpus_total
class BaseBaremetalHostManager(host_manager.HostManager):
"""Base class for Baremetal and Ironic HostManager classes."""
def host_state_cls(self, host, node, **kwargs):
"""Factory function to create a new HostState. May be overridden
in subclasses to extend functionality.
"""
return BaseBaremetalNodeState(host, node, **kwargs)

View File

@ -1,51 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactCoreFilter(filters.BaseHostFilter):
"""Exact Core Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact number of CPU cores."""
instance_type = filter_properties.get('instance_type')
if not instance_type:
return True
if not host_state.vcpus_total:
# Fail safe
LOG.warning(_("VCPUs not set; assuming CPU collection broken"))
return False
required_vcpus = instance_type['vcpus']
usable_vcpus = host_state.vcpus_total - host_state.vcpus_used
if required_vcpus != usable_vcpus:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_vcpus)s cores of usable vcpu, it has "
"%(usable_vcpus)s.",
{'host_state': host_state,
'requested_vcpus': required_vcpus,
'usable_vcpus': usable_vcpus})
return False
return True

View File

@ -1,41 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactDiskFilter(filters.BaseHostFilter):
"""Exact Disk Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact amount of disk available."""
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb']) +
instance_type['swap'])
if requested_disk != host_state.free_disk_mb:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_disk)s MB usable disk, it "
"has %(usable_disk_mb)s.",
{'host_state': host_state,
'requested_disk': requested_disk,
'usable_disk_mb': host_state.free_disk_mb})
return False
return True

View File

@ -1,38 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ExactRamFilter(filters.BaseHostFilter):
"""Exact RAM Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has the exact amount of RAM available."""
instance_type = filter_properties.get('instance_type')
requested_ram = instance_type['memory_mb']
if requested_ram != host_state.free_ram_mb:
LOG.debug("%(host_state)s does not have exactly "
"%(requested_ram)s MB usable RAM, it has "
"%(usable_ram)s.",
{'host_state': host_state,
'requested_ram': requested_ram,
'usable_ram': host_state.free_ram_mb})
return False
return True

View File

@ -1,5 +1,6 @@
# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011-2014 OpenStack Foundation # Copyright (c) 2011-2014 OpenStack Foundation
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved. # All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may # Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -15,79 +16,28 @@
# under the License. # under the License.
""" """
Ironic host manager. A scheduler host manager which subclasses the new location in the Nova tree.
This is a placeholder so that end users can gradually upgrade to use the
This host manager will consume all cpu's, disk space, and new settings. TODO: remove in the K release
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
""" """
from oslo.config import cfg
from ironic.common import i18n
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.openstack.common import timeutils from nova.scheduler import ironic_host_manager
from ironic.nova.scheduler import base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState): class IronicHostManager(ironic_host_manager.IronicHostManager):
"""Mutable and immutable information tracked for a host. """Ironic HostManager class that subclasses the Nova in-tree version."""
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute): def _do_deprecation_warning(self):
"""Update information about a host from its compute_node info.""" LOG.warning(i18n._LW(
super(IronicNodeState, self).update_from_compute_node(compute) 'This class (ironic.nova.scheduler.ironic_host_manager.'
'IronicHostManager) is deprecated and has moved into the Nova '
self.total_usable_disk_gb = compute['local_gb'] 'tree. Please set scheduler_host_manager = '
self.updated = compute['updated_at'] 'nova.scheduler.ironic_host_manager.IronicHostManager.'))
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
"""Ironic HostManager class."""
def __init__(self): def __init__(self):
super(IronicHostManager, self).__init__() super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters: self._do_deprecation_warning()
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)

View File

@ -1,75 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake nodes for Ironic host manager tests.
"""
from nova.openstack.common import jsonutils
COMPUTE_NODES = [
dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
service=dict(host='host1', disabled=False),
hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=10, free_ram_mb=1024),
dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
service=dict(host='host2', disabled=True),
hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=20, free_ram_mb=2048),
dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
service=dict(host='host3', disabled=False),
hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=30, free_ram_mb=3072),
dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
service=dict(host='host4', disabled=False),
hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=40, free_ram_mb=4096),
# Broken entry
dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
cpu_info='baremetal cpu',
stats=jsonutils.dumps(dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386')),
supported_instances='[["i386", "baremetal", "baremetal"]]',
free_disk_gb=50, free_ram_mb=5120),
]

View File

@ -1,412 +0,0 @@
# Copyright (c) 2014 OpenStack Foundation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For IronicHostManager
"""
import mock
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova.scheduler import filters
from nova.scheduler import host_manager
from ironic.nova.scheduler import ironic_host_manager
from nova import test
from ironic.nova.tests.scheduler import ironic_fakes
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class IronicHostManagerTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
def test_get_all_host_states(self):
# Ensure .service is set and we have the values we expect to.
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
node = compute_node['hypervisor_hostname']
state_key = (host, node)
self.assertEqual(compute_node['service'],
host_states_map[state_key].service)
self.assertEqual(jsonutils.loads(compute_node['stats']),
host_states_map[state_key].stats)
self.assertEqual(compute_node['free_ram_mb'],
host_states_map[state_key].free_ram_mb)
self.assertEqual(compute_node['free_disk_gb'] * 1024,
host_states_map[state_key].free_disk_mb)
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
ironic_driver = "nova.virt.ironic.driver.IronicDriver"
supported_instances = '[["i386", "baremetal", "baremetal"]]'
self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
stats=jsonutils.dumps(dict(
ironic_driver=ironic_driver,
cpu_arch='i386')),
supported_instances=supported_instances,
free_disk_gb=10, free_ram_mb=1024)
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'baremetal cpu'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_non_ironic_host_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'other cpu'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
def test_get_all_host_states_after_delete_one(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove node4 for second call
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
db.compute_node_get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_states_map))
def test_get_all_host_states_after_delete_all(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove all nodes for second call
db.compute_node_get_all(context).AndReturn([])
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(0, len(host_states_map))
def test_update_from_compute_node(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
self.assertEqual(1024, host.free_ram_mb)
self.assertEqual(1024, host.total_usable_ram_mb)
self.assertEqual(10240, host.free_disk_mb)
self.assertEqual(1, host.vcpus_total)
self.assertEqual(0, host.vcpus_used)
self.assertEqual(jsonutils.loads(self.compute_node['stats']),
host.stats)
def test_consume_identical_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_larger_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_smaller_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
class IronicHostManagerTestFilters(test.NoDBTestCase):
"""Test filters work for IronicHostManager."""
def setUp(self):
super(IronicHostManagerTestFilters, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
self.fake_hosts = [ironic_host_manager.IronicNodeState(
'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
self.fake_hosts += [ironic_host_manager.IronicNodeState(
'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test we returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(1, len(filter_classes))
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
def _mock_get_filtered_hosts(self, info, specified_filters=None):
self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1])
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_specified_filters(self):
fake_properties = {'moo': 1, 'cow': 2}
specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info, specified_filters)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties, filter_class_names=specified_filters)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)

View File

@ -1,125 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
from oslo.config import cfg
from nova import exception
from nova import test
from ironic.nova.tests.virt.ironic import utils as ironic_utils
from ironic.nova.virt.ironic import client_wrapper
CONF = cfg.CONF
FAKE_CLIENT = ironic_utils.FakeClient()
class IronicClientWrapperTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicClientWrapperTestCase, self).setUp()
self.icli = client_wrapper.IronicClientWrapper()
# Do not waste time sleeping
cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.icli.call("node.list")
mock_get_client.assert_called_once_with()
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with()
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
mock_get_client.return_value = FAKE_CLIENT
self.icli.call("node.list", 'test', associated=True)
mock_get_client.assert_called_once_with()
mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
mock_multi_getattr.return_value.assert_called_once_with('test',
associated=True)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_no_auth_token(self, mock_ir_cli):
self.flags(admin_auth_token=None, group='ironic')
icli = client_wrapper.IronicClientWrapper()
# dummy call to have _get_client() called
icli.call("node.list")
expected = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'ironic_url': CONF.ironic.api_endpoint}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_with_auth_token(self, mock_ir_cli):
self.flags(admin_auth_token='fake-token', group='ironic')
icli = client_wrapper.IronicClientWrapper()
# dummy call to have _get_client() called
icli.call("node.list")
expected = {'os_auth_token': 'fake-token',
'ironic_url': CONF.ironic.api_endpoint}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_fail(self, mock_get_client, mock_multi_getattr):
cfg.CONF.set_override('api_max_retries', 2, 'ironic')
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(exception.NovaException, self.icli.call, "node.list")
self.assertEqual(2, test_obj.call_count)
@mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
@mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
def test_call_fail_unexpected_exception(self, mock_get_client,
mock_multi_getattr):
test_obj = mock.Mock()
test_obj.side_effect = ironic_exception.HTTPNotFound
mock_multi_getattr.return_value = test_obj
mock_get_client.return_value = FAKE_CLIENT
self.assertRaises(ironic_exception.HTTPNotFound, self.icli.call,
"node.list")
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unauthorized(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.Unauthorized
self.assertRaises(exception.NovaException, self.icli._get_client)
@mock.patch.object(ironic_client, 'get_client')
def test__get_client_unexpected_exception(self, mock_get_client):
mock_get_client.side_effect = ironic_exception.ConnectionRefused
self.assertRaises(ironic_exception.ConnectionRefused,
self.icli._get_client)
def test__multi_getattr_good(self):
response = self.icli._multi_getattr(FAKE_CLIENT, "node.list")
self.assertEqual(FAKE_CLIENT.node.list, response)
def test__multi_getattr_fail(self):
self.assertRaises(AttributeError, self.icli._multi_getattr,
FAKE_CLIENT, "nonexistent")

File diff suppressed because it is too large Load Diff

View File

@ -1,140 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ironic.nova.virt.ironic import patcher
from ironic.nova.tests.virt.ironic import utils as ironic_utils
from nova import context as nova_context
from nova import test
from nova.tests import fake_instance
CONF = cfg.CONF
class IronicDriverFieldsTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicDriverFieldsTestCase, self).setUp()
self.image_meta = ironic_utils.get_test_image_meta()
self.flavor = ironic_utils.get_test_flavor()
self.ctx = nova_context.get_admin_context()
self.instance = fake_instance.fake_instance_obj(self.ctx)
# Generic expected patches
self._expected_deploy_patch = [{'path': '/instance_info/image_source',
'value': self.image_meta['id'],
'op': 'add'},
{'path': '/instance_info/root_gb',
'value': str(self.instance['root_gb']),
'op': 'add'},
{'path': '/instance_info/swap_mb',
'value': str(self.flavor['swap']),
'op': 'add'}]
self._expected_cleanup_patch = []
def test_create_generic(self):
node = ironic_utils.get_test_node(driver='fake')
patcher_obj = patcher.create(node)
self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
def test_create_pxe(self):
node = ironic_utils.get_test_node(driver='pxe_fake')
patcher_obj = patcher.create(node)
self.assertIsInstance(patcher_obj, patcher.PXEDriverFields)
def test_generic_get_deploy_patch(self):
node = ironic_utils.get_test_node(driver='fake')
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
def test_generic_get_deploy_patch_ephemeral(self):
CONF.set_override('default_ephemeral_format', 'testfmt')
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
ephemeral_gb=10)
patch = patcher.create(node).get_deploy_patch(
instance, self.image_meta, self.flavor)
expected = [{'path': '/instance_info/ephemeral_gb',
'value': str(instance.ephemeral_gb),
'op': 'add'},
{'path': '/instance_info/ephemeral_format',
'value': 'testfmt',
'op': 'add'}]
expected += self._expected_deploy_patch
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_deploy_patch_preserve_ephemeral(self):
node = ironic_utils.get_test_node(driver='fake')
for preserve in [True, False]:
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor,
preserve_ephemeral=preserve)
expected = [{'path': '/instance_info/preserve_ephemeral',
'value': str(preserve), 'op': 'add', }]
expected += self._expected_deploy_patch
self.assertEqual(sorted(expected), sorted(patch))
def test_generic_get_cleanup_patch(self):
node = ironic_utils.get_test_node(driver='fake')
patch = patcher.create(node).get_cleanup_patch(self.instance, None,
self.flavor)
self.assertEqual(self._expected_cleanup_patch, patch)
def test_pxe_get_deploy_patch(self):
node = ironic_utils.get_test_node(driver='pxe_fake')
extra_specs = self.flavor['extra_specs']
expected = [{'path': '/driver_info/pxe_deploy_kernel',
'value': extra_specs['baremetal:deploy_kernel_id'],
'op': 'add'},
{'path': '/driver_info/pxe_deploy_ramdisk',
'value': extra_specs['baremetal:deploy_ramdisk_id'],
'op': 'add'}]
expected += self._expected_deploy_patch
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, self.flavor)
self.assertEqual(sorted(expected), sorted(patch))
def test_pxe_get_deploy_patch_no_flavor_kernel_ramdisk_ids(self):
flavor = ironic_utils.get_test_flavor(extra_specs={})
node = ironic_utils.get_test_node(driver='pxe_fake')
patch = patcher.create(node).get_deploy_patch(
self.instance, self.image_meta, flavor)
# If there's no extra_specs patch should be exactly like a
# generic patch
self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
def test_pxe_get_cleanup_patch(self):
driver_info = {'pxe_deploy_kernel': 'fake-kernel-id',
'pxe_deploy_ramdisk': 'fake-ramdisk-id'}
node = ironic_utils.get_test_node(driver='pxe_fake',
driver_info=driver_info)
patch = patcher.create(node).get_cleanup_patch(self.instance, None,
self.flavor)
expected = [{'path': '/driver_info/pxe_deploy_kernel',
'op': 'remove'},
{'path': '/driver_info/pxe_deploy_ramdisk',
'op': 'remove'}]
self.assertEqual(sorted(expected), sorted(patch))
def test_pxe_get_cleanup_patch_no_flavor_kernel_ramdisk_ids(self):
self.flavor = ironic_utils.get_test_flavor(extra_specs={})
node = ironic_utils.get_test_node(driver='pxe_fake')
patch = patcher.create(node).get_cleanup_patch(self.instance, None,
self.flavor)
# If there's no extra_specs patch should be exactly like a
# generic patch
self.assertEqual(self._expected_cleanup_patch, patch)

View File

@ -1,115 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.nova.virt.ironic import ironic_states
def get_test_validation(**kw):
return type('interfaces', (object,),
{'power': kw.get('power', True),
'deploy': kw.get('deploy', True),
'console': kw.get('console', True),
'rescue': kw.get('rescue', True)})()
def get_test_node(**kw):
return type('node', (object,),
{'uuid': kw.get('uuid', 'eeeeeeee-dddd-cccc-bbbb-aaaaaaaaaaaa'),
'chassis_uuid': kw.get('chassis_uuid'),
'power_state': kw.get('power_state',
ironic_states.NOSTATE),
'target_power_state': kw.get('target_power_state',
ironic_states.NOSTATE),
'provision_state': kw.get('provision_state',
ironic_states.NOSTATE),
'target_provision_state': kw.get('target_provision_state',
ironic_states.NOSTATE),
'last_error': kw.get('last_error'),
'instance_uuid': kw.get('instance_uuid'),
'driver': kw.get('driver', 'fake'),
'driver_info': kw.get('driver_info', {}),
'properties': kw.get('properties', {}),
'reservation': kw.get('reservation'),
'maintenance': kw.get('maintenance', False),
'extra': kw.get('extra', {}),
'updated_at': kw.get('created_at'),
'created_at': kw.get('updated_at')})()
def get_test_port(**kw):
return type('port', (object,),
{'uuid': kw.get('uuid', 'gggggggg-uuuu-qqqq-ffff-llllllllllll'),
'node_uuid': kw.get('node_uuid', get_test_node().uuid),
'address': kw.get('address', 'FF:FF:FF:FF:FF:FF'),
'extra': kw.get('extra', {}),
'created_at': kw.get('created_at'),
'updated_at': kw.get('updated_at')})()
def get_test_flavor(**kw):
default_extra_specs = {'baremetal:deploy_kernel_id':
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'baremetal:deploy_ramdisk_id':
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'}
return {'name': kw.get('name', 'fake.flavor'),
'extra_specs': kw.get('extra_specs', default_extra_specs),
'swap': kw.get('swap', 0),
'ephemeral_gb': kw.get('ephemeral_gb', 0)}
def get_test_image_meta(**kw):
return {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}
class FakePortClient(object):
def get(self, port_uuid):
pass
def update(self, port_uuid, patch):
pass
class FakeNodeClient(object):
def list(self, detail=False):
return []
def get(self, node_uuid):
pass
def get_by_instance_uuid(self, instance_uuid):
pass
def list_ports(self, node_uuid):
pass
def set_power_state(self, node_uuid, target):
pass
def set_provision_state(self, node_uuid, target):
pass
def update(self, node_uuid, patch):
pass
def validate(self, node_uuid):
pass
class FakeClient(object):
node = FakeNodeClient()
port = FakePortClient()

View File

@ -1,119 +0,0 @@
# coding=utf-8
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from oslo.config import cfg
ironic = None
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class IronicClientWrapper(object):
"""Ironic client wrapper class that encapsulates retry logic."""
def __init__(self):
"""Initialise the IronicClientWrapper for use.
Initialise IronicClientWrapper by loading ironicclient
dynamically so that ironicclient is not a dependency for
Nova.
"""
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
def _get_client(self):
# TODO(deva): save and reuse existing client & auth token
# until it expires or is no longer valid
auth_token = CONF.ironic.admin_auth_token
if auth_token is None:
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'ironic_url': CONF.ironic.api_endpoint}
else:
kwargs = {'os_auth_token': auth_token,
'ironic_url': CONF.ironic.api_endpoint}
try:
cli = ironic.client.get_client(CONF.ironic.api_version, **kwargs)
except ironic.exc.Unauthorized:
msg = _("Unable to authenticate Ironic client.")
LOG.error(msg)
raise exception.NovaException(msg)
return cli
def _multi_getattr(self, obj, attr):
"""Support nested attribute path for getattr().
:param obj: Root object.
:param attr: Path of final attribute to get. E.g., "a.b.c.d"
:returns: The value of the final named attribute.
:raises: AttributeError will be raised if the path is invalid.
"""
for attribute in attr.split("."):
obj = getattr(obj, attribute)
return obj
def call(self, method, *args, **kwargs):
"""Call an Ironic client method and retry on errors.
:param method: Name of the client method to call as a string.
:param args: Client method arguments.
:param kwargs: Client method keyword arguments.
:raises: NovaException if all retries failed.
"""
retry_excs = (ironic.exc.ServiceUnavailable,
ironic.exc.ConnectionRefused,
ironic.exc.Conflict)
num_attempts = CONF.ironic.api_max_retries
for attempt in range(1, num_attempts + 1):
client = self._get_client()
try:
return self._multi_getattr(client, method)(*args, **kwargs)
except retry_excs:
msg = (_("Error contacting Ironic server for '%(method)s'. "
"Attempt %(attempt)d of %(total)d")
% {'method': method,
'attempt': attempt,
'total': num_attempts})
if attempt == num_attempts:
LOG.error(msg)
raise exception.NovaException(msg)
LOG.warning(msg)
time.sleep(CONF.ironic.api_retry_interval)

View File

@ -17,964 +17,27 @@
# under the License. # under the License.
""" """
A driver wrapping the Ironic API, such that Nova may provision A driver which subclasses the new location in the Nova tree.
bare metal resources. This is a placeholder so that end users can gradually upgrade to use the
new settings. TODO: remove in the K release
""" """
import logging as py_logging
import time
from oslo.config import cfg from ironic.common import i18n
from ironic.nova.virt.ironic import client_wrapper
from ironic.nova.virt.ironic import ironic_states
from ironic.nova.virt.ironic import patcher
from nova.compute import arch
from nova.compute import power_state
from nova.compute import task_states
from nova import context as nova_context
from nova import exception
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _, _LE, _LW
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall from nova.virt.ironic import driver
from nova.virt import driver as virt_driver
from nova.virt import firewall
ironic = None
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
opts = [
cfg.IntOpt('api_version',
default=1,
help='Version of Ironic API service endpoint.'),
cfg.StrOpt('api_endpoint',
help='URL for Ironic API endpoint.'),
cfg.StrOpt('admin_username',
help='Ironic keystone admin name'),
cfg.StrOpt('admin_password',
help='Ironic keystone admin password.'),
cfg.StrOpt('admin_auth_token',
help='Ironic keystone auth token.'),
cfg.StrOpt('admin_url',
help='Keystone public API endpoint.'),
cfg.StrOpt('client_log_level',
help='Log level override for ironicclient. Set this in '
'order to override the global "default_log_levels", '
'"verbose", and "debug" settings.'),
cfg.StrOpt('admin_tenant_name',
help='Ironic keystone tenant name.'),
cfg.IntOpt('api_max_retries',
default=60,
help=('How many retries when a request does conflict.')),
cfg.IntOpt('api_retry_interval',
default=2,
help=('How often to retry in seconds when a request '
'does conflict')),
]
ironic_group = cfg.OptGroup(name='ironic', class IronicDriver(driver.IronicDriver):
title='Ironic Options') """Nova Ironic driver that subclasses the Nova in-tree version."""
CONF = cfg.CONF def _do_deprecation_warning(self):
CONF.register_group(ironic_group) LOG.warning(i18n._LW(
CONF.register_opts(opts, ironic_group) 'This class (ironic.nova.virt.ironic.IronicDriver) is '
'deprecated and has moved into the Nova tree. Please set '
_POWER_STATE_MAP = { 'compute_driver = nova.virt.ironic.IronicDriver.'))
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _validate_instance_and_node(icli, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
return icli.call("node.get_by_instance_uuid", instance['uuid'])
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch, 'baremetal', 'baremetal')]
def _log_ironic_polling(what, node, instance):
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False}
def __init__(self, virtapi, read_only=False): def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi) super(IronicDriver, self).__init__(virtapi)
global ironic self._do_deprecation_warning()
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
icli_log_level = CONF.ironic.client_log_level
if icli_log_level:
level = py_logging.getLevelName(icli_log_level)
logger = py_logging.getLogger('ironicclient')
logger.setLevel(level)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an unacceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power and maintenance state.
Returns True if unacceptable.
"""
bad_states = [ironic_states.ERROR, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_states)
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
vcpus = int(node.properties.get('cpus', 0))
memory_mb = int(node.properties.get('memory_mb', 0))
local_gb = int(node.properties.get('local_gb', 0))
try:
cpu_arch = arch.canonicalize(node.properties.get('cpu_arch', None))
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warn(_LW("cpu_arch not defined for node '%s'"), node.uuid)
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
nodes_extra_specs['cpu_arch'] = cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = node.properties.get('capabilities')
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0]] = parts[1]
else:
LOG.warn(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if node.instance_uuid:
# Node has an instance, report all resource as unavailable
vcpus_used = vcpus
memory_mb_used = memory_mb
local_gb_used = local_gb
elif self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'node': str(node.uuid),
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
'cpu_info': 'baremetal cpu',
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_total': local_gb,
'disk_used': local_gb_used,
'disk_available': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'host_memory_total': memory_mb,
'host_memory_free': memory_mb - memory_mb_used,
'supported_instances': jsonutils.dumps(
_get_nodes_supported_instances(cpu_arch)),
'stats': jsonutils.dumps(nodes_extra_specs),
'host': CONF.host,
}
dic.update(nodes_extra_specs)
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
icli = client_wrapper.IronicClientWrapper()
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance['uuid']})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance['uuid']})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _cleanup_deploy(self, node, instance, network_info):
icli = client_wrapper.IronicClientWrapper()
context = nova_context.get_admin_context()
flavor = flavor_obj.Flavor.get_by_id(context,
instance['instance_type_id'])
patch = patcher.create(node).get_cleanup_patch(instance, network_info,
flavor)
# Unassociate the node
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
"when unprovisioning the instance %(instance)s"),
{'node': node.uuid, 'instance': instance['uuid']})
reason = (_("Fail to clean up node %s parameters") % node.uuid)
raise exception.InstanceTerminationFailure(reason=reason)
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, icli, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
node = _validate_instance_and_node(icli, instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state == ironic_states.DELETED:
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.NOSTATE:
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance['uuid'], 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return CONF.ironic.api_version
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
icli = client_wrapper.IronicClientWrapper()
try:
_validate_instance_and_node(icli, instance)
return True
except exception.InstanceNotFound:
return False
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
context = nova_context.get_admin_context()
return [instance_obj.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
return list(n.instance_uuid for n in node_list)
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
icli = client_wrapper.IronicClientWrapper()
try:
icli.call("node.get", nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call('node.list', detail=True)
node_cache = {}
for node in node_list:
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of all nodes in the Ironic inventory.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", nodename)
return self._node_resource(node)
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a dictionary containing:
:state: the running state. One of :mod:`nova.compute.power_state`.
:max_mem: (int) the maximum memory in KBytes allowed.
:mem: (int) the memory in KBytes used by the domain.
:num_cpu: (int) the number of CPUs.
:cpu_time: (int) the CPU time used in nanoseconds. Always 0 for
this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
return {'state': map_power_state(ironic_states.NOSTATE),
'max_mem': 0,
'mem': 0,
'num_cpu': 0,
'cpu_time': 0
}
memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
if memory_kib == 0:
LOG.warn(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
num_cpu = node.properties.get('cpus', 0)
if num_cpu == 0:
LOG.warn(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
return {'state': map_power_state(node.power_state),
'max_mem': memory_kib,
'mem': memory_kib,
'num_cpu': num_cpu,
'cpu_time': 0
}
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def macs_for_instance(self, instance):
"""List the MAC addresses of an instance.
List of MAC addresses for the node which this instance is
associated with.
:param instance: the instance object.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = icli.call("node.get", instance['node'])
except ironic.exc.NotFound:
return None
ports = icli.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise exception.NovaException(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance['uuid'])
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = flavor_obj.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor['ephemeral_gb']:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = icli.call("node.validate", node_uuid)
if not validate_chk.deploy or not validate_chk.power:
# something is wrong. undo what we have done
self._cleanup_deploy(node, instance, network_info)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self._cleanup_deploy(node, instance, network_info)
# trigger the node deploy
try:
icli.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s") % {'inst': instance['uuid'],
'reason': str(e)})
LOG.error(msg)
self._cleanup_deploy(node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self.destroy(context, instance, network_info)
def _unprovision(self, icli, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
icli.call("node.set_provision_state", node.uuid, "deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
node = _validate_instance_and_node(icli, instance)
if not node.provision_state:
LOG.debug("Ironic node %(node)s is now unprovisioned",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= CONF.ironic.api_max_retries:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance['uuid'])
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in (ironic_states.ACTIVE,
ironic_states.DEPLOYFAIL,
ironic_states.ERROR,
ironic_states.DEPLOYWAIT):
self._unprovision(icli, instance, node)
self._cleanup_deploy(node, instance, network_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot. Ignored by
this driver.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'reboot')
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. Ignored by
this driver.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'off')
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'on')
def get_host_stats(self, refresh=False):
"""Return the currently known stats for all Ironic nodes.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of dictionaries; each dictionary contains the
stats for a node.
"""
caps = []
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list")
for node in node_list:
data = self._node_resource(node)
caps.append(data)
return caps
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store.
Invoked when instances are added/removed to a security group.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_provider_fw_rules(self):
"""Triggers a firewall update based on database changes."""
self.firewall_driver.refresh_provider_fw_rules()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this this ahead of time so that
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.NovaException(_(
"Ironic node: %(id)s virtual to physical interface count"
" missmatch"
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
% {'id': node.uuid,
'vif_count': len(network_info),
'pif_count': len(ports)})
if len(network_info) > 0:
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# attach what neutron needs directly to the port
port_id = unicode(vif['id'])
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
icli.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this this ahead of time so that
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
if network_info and len(network_info) > 0:
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
icli.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance['node']
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = flavor_obj.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor,
preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
icli.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance['uuid'],
'reason': str(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()

View File

@ -1,66 +0,0 @@
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mapping of bare metal node states.
A node may have empty {} `properties` and `driver_info` in which case, it is
said to be "initialized" but "not available", and the state is NOSTATE.
When updating `properties`, any data will be rejected if the data fails to be
validated by the driver. Any node with non-empty `properties` is said to be
"initialized", and the state is INIT.
When the driver has received both `properties` and `driver_info`, it will check
the power status of the node and update the `power_state` accordingly. If the
driver fails to read the power state from the node, it will reject the
`driver_info` change, and the state will remain as INIT. If the power status
check succeeds, `power_state` will change to one of POWER_ON or POWER_OFF,
accordingly.
At this point, the power state may be changed via the API, a console
may be started, and a tenant may be associated.
The `power_state` for a node always represents the current power state. Any
power operation sets this to the actual state when done (whether successful or
not). It is set to ERROR only when unable to get the power state from a node.
When `instance_uuid` is set to a non-empty / non-None value, the node is said
to be "associated" with a tenant.
An associated node can not be deleted.
The `instance_uuid` field may be unset only if the node is in POWER_OFF or
ERROR states.
"""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
REBUILD = 'rebuild'
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'

View File

@ -1,170 +0,0 @@
# coding=utf-8
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Helper classes for Ironic HTTP PATCH creation.
"""
from oslo.config import cfg
from nova.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
def create(node):
"""Create an instance of the appropriate DriverFields class.
:param node: a node object returned from ironicclient
:returns: GenericDriverFields or a subclass thereof, as appropriate
for the supplied node.
"""
if 'pxe' in node.driver:
return PXEDriverFields(node)
else:
return GenericDriverFields(node)
class GenericDriverFields(object):
def __init__(self, node):
self.node = node
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = []
patch.append({'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta['id']})
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)})
patch.append({'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])})
if instance.ephemeral_gb:
patch.append({'path': '/instance_info/ephemeral_gb',
'op': 'add',
'value': str(instance.ephemeral_gb)})
if CONF.default_ephemeral_format:
patch.append({'path': '/instance_info/ephemeral_format',
'op': 'add',
'value': CONF.default_ephemeral_format})
if preserve_ephemeral is not None:
patch.append({'path': '/instance_info/preserve_ephemeral',
'op': 'add', 'value': str(preserve_ephemeral)})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
return []
class PXEDriverFields(GenericDriverFields):
def _get_kernel_ramdisk_dict(self, flavor):
"""Get the deploy ramdisk and kernel IDs from the flavor.
:param flavor: the flavor object.
:returns: a dict with the pxe options for the deploy ramdisk and
kernel if the IDs were found in the flavor, otherwise an empty
dict is returned.
"""
extra_specs = flavor['extra_specs']
deploy_kernel = extra_specs.get('baremetal:deploy_kernel_id')
deploy_ramdisk = extra_specs.get('baremetal:deploy_ramdisk_id')
deploy_ids = {}
if deploy_kernel and deploy_ramdisk:
deploy_ids['pxe_deploy_kernel'] = deploy_kernel
deploy_ids['pxe_deploy_ramdisk'] = deploy_ramdisk
return deploy_ids
def get_deploy_patch(self, instance, image_meta, flavor,
preserve_ephemeral=None):
"""Build a patch to add the required fields to deploy a node.
Build a json-patch to add the required fields to deploy a node
using the PXE driver.
:param instance: the instance object.
:param image_meta: the metadata associated with the instance
image.
:param flavor: the flavor object.
:param preserve_ephemeral: preserve_ephemeral status (bool) to be
specified during rebuild.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_deploy_patch(
instance, image_meta, flavor, preserve_ephemeral)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains both ramdisk
# and kernel ids, use them.
for key, value in self._get_kernel_ramdisk_dict(flavor).items():
patch.append({'path': '/driver_info/%s' % key,
'op': 'add', 'value': value})
return patch
def get_cleanup_patch(self, instance, network_info, flavor):
"""Build a patch to clean up the fields.
Build a json-patch to remove the fields used to deploy a node
using the PXE driver. Note that the fields added to the Node's
instance_info don't need to be removed because they are purged
during the Node's tear down.
:param instance: the instance object.
:param network_info: the instance network information.
:param flavor: the flavor object.
:returns: a json-patch with the fields that needs to be updated.
"""
patch = super(PXEDriverFields, self).get_cleanup_patch(
instance, network_info, flavor)
# TODO(lucasagomes): Remove it in Kilo. This is for backwards
# compatibility with Icehouse. If flavor contains a ramdisk and
# kernel id remove it from nodes as part of the tear down process
for key in self._get_kernel_ramdisk_dict(flavor):
if key in self.node.driver_info:
patch.append({'op': 'remove',
'path': '/driver_info/%s' % key})
return patch

View File

@ -17,7 +17,3 @@ sphinx>=1.1.2,!=1.2.0,<1.3
sphinxcontrib-pecanwsme>=0.8 sphinxcontrib-pecanwsme>=0.8
oslosphinx>=2.2.0.0a2 oslosphinx>=2.2.0.0a2
# Required for Nova unit tests in ironic/nova/tests/ and can be removed
# once the driver code lands in Nova.
http://tarballs.openstack.org/nova/nova-master.tar.gz#egg=nova
mox>=0.5.3

View File

@ -12,8 +12,6 @@ deps = -r{toxinidir}/requirements.txt
whitelist_externals = bash whitelist_externals = bash
commands = commands =
bash -c "TESTS_DIR=./ironic/tests/ python setup.py testr --slowest --testr-args='{posargs}'" bash -c "TESTS_DIR=./ironic/tests/ python setup.py testr --slowest --testr-args='{posargs}'"
bash -c "TESTS_DIR=./ironic/nova/tests/ python setup.py testr --slowest --testr-args='{posargs}'"
bash -c "cat .testrepository/1 >>.testrepository/0"
[tox:jenkins] [tox:jenkins]
downloadcache = ~/cache/pip downloadcache = ~/cache/pip