Fix last of direct use of object modules

This replaces all uses of nova.objects.<module>.<object> with
nova.objects.<object> in the remaining places.

Implements-Blueprint: object-subclassing

Change-Id: Ic7632cca2455a38abcbdb94feb7e39cfb898bb27
This commit is contained in:
Chris Behrens
2014-05-20 14:55:13 -07:00
committed by Michael Still
parent a905cf2c0c
commit 3883697d31
16 changed files with 89 additions and 106 deletions

View File

@@ -48,7 +48,6 @@ from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import base as objects_base
from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -1103,7 +1102,7 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
log_str = _("Got message to create instance fault: "
"%(instance_fault)s")
LOG.debug(log_str, {'instance_fault': instance_fault})
fault = instance_fault_obj.InstanceFault(context=message.ctxt)
fault = objects.InstanceFault(context=message.ctxt)
fault.update(instance_fault)
fault.create()

View File

@@ -33,7 +33,6 @@ from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import instance_action as instance_action_obj
from nova.openstack.common import log as logging
from nova.scheduler import utils as scheduler_utils
from nova import utils
@@ -120,7 +119,7 @@ class CellsScheduler(base.Base):
def _create_action_here(self, ctxt, instance_uuids):
for instance_uuid in instance_uuids:
instance_action_obj.InstanceAction.action_start(
objects.InstanceAction.action_start(
ctxt,
instance_uuid,
instance_actions.CREATE,

View File

@@ -35,7 +35,6 @@ from nova.i18n import _
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as objects_base
from nova.objects import network as network_obj
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -72,7 +71,7 @@ def del_lease(mac, ip_address):
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network = network_obj.Network.get_by_id(ctxt, network_id)
network = objects.Network.get_by_id(ctxt, network_id)
network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network)

View File

@@ -39,7 +39,6 @@ from nova.network.security_group import openstack_driver
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -503,9 +502,9 @@ class ComputeTaskManager(base.Base):
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = quotas_obj.Quotas.from_reservations(context,
reservations,
instance=instance)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_rpcapi.select_destinations(

View File

@@ -24,7 +24,7 @@ import six
from nova import db
from nova import exception
from nova.i18n import _
from nova.objects import keypair as keypair_obj
from nova import objects
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -1405,6 +1405,11 @@ class QuotaEngine(object):
return sorted(self._resources.keys())
def _keypair_get_count_by_user(*args, **kwargs):
"""Helper method to avoid referencing objects.KeyPairList on import."""
return objects.KeyPairList.get_count_by_user(*args, **kwargs)
QUOTAS = QuotaEngine()
@@ -1426,7 +1431,7 @@ resources = [
CountableResource('security_group_rules',
db.security_group_rule_count_by_group,
'quota_security_group_rules'),
CountableResource('key_pairs', keypair_obj.KeyPairList.get_count_by_user,
CountableResource('key_pairs', _keypair_get_count_by_user,
'quota_key_pairs'),
]

View File

@@ -26,7 +26,7 @@ from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
from nova.i18n import _
from nova.objects import instance_group as instance_group_obj
from nova import objects
from nova.openstack.common import log as logging
from nova.pci import pci_request
from nova import rpc
@@ -208,8 +208,7 @@ class FilterScheduler(driver.Scheduler):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group', None)
if group_hint:
group = instance_group_obj.InstanceGroup.get_by_hint(context,
group_hint)
group = objects.InstanceGroup.get_by_hint(context, group_hint)
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
update_group_hosts = True

View File

@@ -33,7 +33,6 @@ from nova.network import model as network_model
from nova import objects
from nova.objects import base as objects_base
from nova.objects import fields as objects_fields
from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -1621,7 +1620,7 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
'message': 'fake-message',
'details': 'fake-details'}
if_mock = mock.Mock(spec_set=instance_fault_obj.InstanceFault)
if_mock = mock.Mock(spec_set=objects.InstanceFault)
def _check_create():
self.assertEqual('fake-message', if_mock.message)
@@ -1631,8 +1630,7 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
if_mock.create.side_effect = _check_create
with mock.patch.object(instance_fault_obj,
'InstanceFault') as if_obj_mock:
with mock.patch.object(objects, 'InstanceFault') as if_obj_mock:
if_obj_mock.return_value = if_mock
self.src_msg_runner.instance_fault_create_at_top(
self.ctxt, fake_instance_fault)

View File

@@ -17,7 +17,6 @@ import uuid
from nova import objects
from nova.objects import fields
from nova.objects import instance_fault as inst_fault_obj
def fake_db_secgroups(instance, names):
@@ -103,6 +102,6 @@ def fake_fault_obj(context, instance_uuid, code=404,
}
if updates:
fault.update(updates)
return inst_fault_obj.InstanceFault._from_db_object(context,
inst_fault_obj.InstanceFault(),
fault)
return objects.InstanceFault._from_db_object(context,
objects.InstanceFault(),
fault)

View File

@@ -27,7 +27,7 @@ from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group as instance_group_obj
from nova import objects
from nova.pci import pci_request
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
@@ -375,7 +375,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
group = instance_group_obj.InstanceGroup()
group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
@@ -393,10 +393,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
}
with contextlib.nested(
mock.patch.object(instance_group_obj.InstanceGroup, func,
return_value=group),
mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts',
return_value=['hostA']),
mock.patch.object(objects.InstanceGroup, func, return_value=group),
mock.patch.object(objects.InstanceGroup, 'get_hosts',
return_value=['hostA']),
) as (get_group, get_hosts):
update_group_hosts = sched._setup_instance_group(self.context,
filter_properties)

View File

@@ -25,7 +25,7 @@ from oslo.config import cfg
from testtools import matchers
from nova import exception
from nova.objects import flavor as flavor_obj
from nova import objects
from nova.openstack.common.db import exception as db_exc
from nova.tests.image import fake as fake_image
from nova.tests import utils
@@ -437,15 +437,15 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_cache_images(self):
self._create_node()
self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id')
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, "get_tftp_image_info")
self.mox.StubOutWithMock(self.driver, "_cache_tftp_images")
self.mox.StubOutWithMock(self.driver, "_cache_image")
self.mox.StubOutWithMock(self.driver, "_inject_into_image")
flavor_obj.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn([])
self.driver._cache_tftp_images(self.context, self.instance, [])
self.driver._cache_image(self.context, self.instance, [])
@@ -501,7 +501,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
pxe_path = pxe.get_pxe_config_file_path(self.instance)
pxe.get_image_file_path(self.instance)
self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id')
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(pxe, 'get_tftp_image_info')
self.mox.StubOutWithMock(pxe, 'get_partition_sizes')
self.mox.StubOutWithMock(bm_utils, 'random_alnum')
@@ -509,9 +509,9 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
flavor_obj.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
objects.Flavor.get_by_id(self.context,
self.instance['instance_type_id']
).AndReturn({})
pxe.get_tftp_image_info(self.instance, {}).AndReturn(image_info)
pxe.get_partition_sizes(self.instance).AndReturn((0, 0, 0))
bm_utils.random_alnum(32).AndReturn('alnum')
@@ -533,7 +533,7 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
def test_activate_and_deactivate_bootloader(self):
self._create_node()
flavor = flavor_obj.Flavor(
flavor = objects.Flavor(
context=self.context,
extra_specs={
'baremetal:deploy_kernel_id': 'eeee',
@@ -541,13 +541,13 @@ class PXEPublicMethodsTestCase(BareMetalPXETestCase):
})
self.instance['uuid'] = 'fake-uuid'
self.mox.StubOutWithMock(flavor_obj.Flavor, 'get_by_id')
self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
self.mox.StubOutWithMock(bm_utils, 'write_to_file')
self.mox.StubOutWithMock(bm_utils, 'create_link_without_raise')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
self.mox.StubOutWithMock(bm_utils, 'rmtree_without_raise')
flavor_obj.Flavor.get_by_id(
objects.Flavor.get_by_id(
self.context, self.instance['instance_type_id']).AndReturn(
flavor)

View File

@@ -46,9 +46,6 @@ from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import flavor as flavor_obj
from nova.objects import pci_device as pci_device_obj
from nova.objects import service as service_obj
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
@@ -634,7 +631,7 @@ class LibvirtConnTestCase(test.TestCase,
def create_instance_obj(self, context, **params):
default_params = self.test_instance
default_params['pci_devices'] = pci_device_obj.PciDeviceList()
default_params['pci_devices'] = objects.PciDeviceList()
default_params.update(params)
instance = objects.Instance(context, **params)
flavor = flavors.get_default_flavor()
@@ -902,7 +899,7 @@ class LibvirtConnTestCase(test.TestCase,
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=set_close_callback),
mock.patch.object(service_obj.Service, "get_by_compute_host",
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
@@ -929,7 +926,7 @@ class LibvirtConnTestCase(test.TestCase,
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=TypeError('dd')),
mock.patch.object(service_obj.Service, "get_by_compute_host",
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
connection = conn._get_connection()
@@ -947,7 +944,7 @@ class LibvirtConnTestCase(test.TestCase,
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=AttributeError('dd')),
mock.patch.object(service_obj.Service, "get_by_compute_host",
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
connection = conn._get_connection()
@@ -1552,9 +1549,8 @@ class LibvirtConnTestCase(test.TestCase,
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_flavor = flavor_obj.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor = objects.Flavor.get_by_id(
self.context, self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_watchdog_action': 'none'}
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1562,7 +1558,7 @@ class LibvirtConnTestCase(test.TestCase,
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
@@ -1591,9 +1587,8 @@ class LibvirtConnTestCase(test.TestCase,
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_flavor = flavor_obj.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor = objects.Flavor.get_by_id(
self.context, self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_watchdog_action': 'none'}
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1603,7 +1598,7 @@ class LibvirtConnTestCase(test.TestCase,
image_meta = {"properties": {"hw_watchdog_action": "pause"}}
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1713,7 +1708,7 @@ class LibvirtConnTestCase(test.TestCase,
agent_enabled=True,
group='spice')
instance_type = flavor_obj.Flavor.get_by_id(self.context, 5)
instance_type = objects.Flavor.get_by_id(self.context, 5)
instance_type.extra_specs = {'hw_video:ram_max_mb': "100"}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1722,7 +1717,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=instance_type):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1772,7 +1767,7 @@ class LibvirtConnTestCase(test.TestCase,
agent_enabled=True,
group='spice')
instance_type = flavor_obj.Flavor.get_by_id(self.context, 5)
instance_type = objects.Flavor.get_by_id(self.context, 5)
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1781,7 +1776,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=instance_type):
self.assertRaises(exception.RequestedVRamTooHigh,
conn._get_guest_config,
@@ -1824,7 +1819,7 @@ class LibvirtConnTestCase(test.TestCase,
use_usb_tablet=False,
group='libvirt')
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1835,7 +1830,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1892,7 +1887,7 @@ class LibvirtConnTestCase(test.TestCase,
use_usb_tablet=False,
group='libvirt')
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True',
@@ -1905,7 +1900,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1935,7 +1930,7 @@ class LibvirtConnTestCase(test.TestCase,
rng_dev_path='/dev/hw_rng',
group='libvirt')
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1946,7 +1941,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
with contextlib.nested(mock.patch.object(flavor_obj.Flavor,
with contextlib.nested(mock.patch.object(objects.Flavor,
'get_by_id',
return_value=fake_flavor),
mock.patch('nova.virt.libvirt.driver.os.path.exists',
@@ -1979,7 +1974,7 @@ class LibvirtConnTestCase(test.TestCase,
rng_dev_path='/dev/hw_rng',
group='libvirt')
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1990,7 +1985,7 @@ class LibvirtConnTestCase(test.TestCase,
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
with contextlib.nested(mock.patch.object(flavor_obj.Flavor,
with contextlib.nested(mock.patch.object(objects.Flavor,
'get_by_id',
return_value=fake_flavor),
mock.patch('nova.virt.libvirt.driver.os.path.exists',
@@ -2006,7 +2001,7 @@ class LibvirtConnTestCase(test.TestCase,
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -2017,7 +2012,7 @@ class LibvirtConnTestCase(test.TestCase,
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
@@ -2029,7 +2024,7 @@ class LibvirtConnTestCase(test.TestCase,
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
@@ -2040,7 +2035,7 @@ class LibvirtConnTestCase(test.TestCase,
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
self.assertRaises(ValueError,
conn._get_guest_config,
@@ -2557,7 +2552,7 @@ class LibvirtConnTestCase(test.TestCase,
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_topology(self):
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.vcpus = 8
@@ -2568,7 +2563,7 @@ class LibvirtConnTestCase(test.TestCase,
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
conf = conn._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
@@ -5624,7 +5619,7 @@ class LibvirtConnTestCase(test.TestCase,
with contextlib.nested(
mock.patch.object(libvirt, 'openAuth',
return_value=mock.MagicMock()),
mock.patch.object(service_obj.Service, "get_by_compute_host",
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
conn.get_num_instances()
@@ -7540,7 +7535,7 @@ Active: 8381604 kB
else:
raise ValueError("Unhandled method %" % method_name)
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context, test_instance['instance_type_id'])
expected = conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta,
@@ -7548,7 +7543,7 @@ Active: 8381604 kB
self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta,
mox.IsA(flavor_obj.Flavor)).\
mox.IsA(objects.Flavor)).\
AndReturn(expected)
self.mox.ReplayAll()
@@ -9963,7 +9958,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.libvirtconnection.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
fake_flavor = flavor_obj.Flavor.get_by_id(
fake_flavor = objects.Flavor.get_by_id(
self.context, instance['instance_type_id'])
if method == 'attach_interface':
fake_image_meta = {'id': instance['image_ref']}
@@ -9977,7 +9972,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.libvirtconnection.vif_driver.get_config(
instance, network_info[0],
fake_image_meta,
mox.IsA(flavor_obj.Flavor)).AndReturn(expected)
mox.IsA(objects.Flavor)).AndReturn(expected)
domain.info().AndReturn([power_state])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), expected_flags)

View File

@@ -23,6 +23,7 @@ import six
from nova.compute import manager
from nova import exception
from nova import objects
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -784,8 +785,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# Previous status of the service: disabled: False
service_mock.configure_mock(disabled_reason='None',
disabled=False)
from nova.objects import service as service_obj
with mock.patch.object(service_obj.Service, "get_by_compute_host",
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
@@ -798,8 +798,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# Previous status of the service: disabled: True, 'AUTO: ERROR'
service_mock.configure_mock(disabled_reason='AUTO: ERROR',
disabled=True)
from nova.objects import service as service_obj
with mock.patch.object(service_obj.Service, "get_by_compute_host",
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertFalse(service_mock.disabled)
@@ -812,8 +811,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
from nova.objects import service as service_obj
with mock.patch.object(service_obj.Service, "get_by_compute_host",
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertTrue(service_mock.disabled)
@@ -826,8 +824,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
from nova.objects import service as service_obj
with mock.patch.object(service_obj.Service, "get_by_compute_host",
with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)

View File

@@ -27,7 +27,7 @@ from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova.objects import flavor as flavor_obj
from nova import objects
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
@@ -334,8 +334,8 @@ class PXE(base.NodeDriver):
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
flavor = flavor_obj.Flavor.get_by_id(context,
instance['instance_type_id'])
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, flavor)
self._cache_tftp_images(context, instance, tftp_image_info)
@@ -379,8 +379,8 @@ class PXE(base.NodeDriver):
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
flavor = flavor_obj.Flavor.get_by_id(context,
instance['instance_type_id'])
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
image_info = get_tftp_image_info(instance, flavor)
(root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)

View File

@@ -23,8 +23,6 @@ from nova.i18n import _
from nova.i18n import _LI
from nova.network import linux_net
from nova import objects
from nova.objects import security_group as security_group_obj
from nova.objects import security_group_rule as security_group_rule_obj
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
@@ -357,13 +355,13 @@ class IptablesFirewallDriver(FirewallDriver):
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
security_groups = security_group_obj.SecurityGroupList.get_by_instance(
security_groups = objects.SecurityGroupList.get_by_instance(
ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
rules_cls = security_group_rule_obj.SecurityGroupRuleList
rules = rules_cls.get_by_security_group(ctxt, security_group)
rules = objects.SecurityGroupRuleList.get_by_security_group(
ctxt, security_group)
for rule in rules:
if not rule['cidr']:

View File

@@ -62,8 +62,6 @@ from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import objects
from nova.objects import flavor as flavor_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
@@ -1422,7 +1420,7 @@ class LibvirtDriver(driver.ComputeDriver):
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = flavor_obj.Flavor.get_by_id(
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
@@ -1443,7 +1441,7 @@ class LibvirtDriver(driver.ComputeDriver):
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
flavor = flavor_obj.Flavor.get_by_id(
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
cfg = self.vif_driver.get_config(instance, vif, None, flavor)
@@ -2881,7 +2879,7 @@ class LibvirtDriver(driver.ComputeDriver):
ctx = nova_context.get_admin_context()
try:
service = service_obj.Service.get_by_compute_host(ctx, CONF.host)
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
@@ -3183,7 +3181,7 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = flavor_obj.Flavor.get_by_id(
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)

View File

@@ -27,7 +27,6 @@ from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import service as service_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_whitelist
@@ -120,8 +119,8 @@ class Host(object):
# Since capabilities are gone, use service table to disable a node
# in scheduler
cntxt = context.get_admin_context()
service = service_obj.Service.get_by_args(cntxt, CONF.host,
'nova-compute')
service = objects.Service.get_by_args(cntxt, CONF.host,
'nova-compute')
service.disabled = not enabled
service.disabled_reason = 'set by xenapi host_state'
service.save()