Browse Source

Add context as parameter for resume

Now for KVM when resume an instance with block storage, nova
compute will throw exception and failed to resume the VM.

The root cause is that when resume a VM with block storage,
libvirt driver needs to call conductor via rpcapi to update
block device, but the function of resume() do not have context,
this will cause RPC api failed.

Change-Id: I712777ed1d893a2b6463d30c407b0a677e37b602
Closes-Bug: #1241337
(cherry picked from commit de41588610)
tags/2013.2.2
Jay Lau 6 years ago
parent
commit
b56453526f
13 changed files with 66 additions and 20 deletions
  1. +1
    -1
      nova/compute/manager.py
  2. +4
    -2
      nova/tests/virt/hyperv/test_hypervapi.py
  3. +36
    -0
      nova/tests/virt/libvirt/test_libvirt.py
  4. +2
    -1
      nova/tests/virt/powervm/test_powervm.py
  5. +2
    -2
      nova/tests/virt/test_virt_drivers.py
  6. +3
    -3
      nova/tests/virt/vmwareapi/test_vmwareapi.py
  7. +10
    -3
      nova/virt/driver.py
  8. +1
    -1
      nova/virt/fake.py
  9. +1
    -1
      nova/virt/hyperv/driver.py
  10. +2
    -2
      nova/virt/libvirt/driver.py
  11. +1
    -1
      nova/virt/powervm/driver.py
  12. +2
    -2
      nova/virt/vmwareapi/driver.py
  13. +1
    -1
      nova/virt/xenapi/driver.py

+ 1
- 1
nova/compute/manager.py View File

@@ -3307,7 +3307,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_info = self._get_instance_volume_block_device_info(
context, instance)

self.driver.resume(instance, network_info,
self.driver.resume(context, instance, network_info,
block_device_info)

instance.power_state = self._get_power_state(context, instance)

+ 4
- 2
nova/tests/virt/hyperv/test_hypervapi.py View File

@@ -542,12 +542,14 @@ class HyperVAPITestCase(test.NoDBTestCase):
constants.HYPERV_VM_STATE_SUSPENDED)

def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None),
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)

def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(i, None), None,
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None), None,
constants.HYPERV_VM_STATE_ENABLED)

def test_power_off(self):

+ 36
- 0
nova/tests/virt/libvirt/test_libvirt.py View File

@@ -49,6 +49,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import uuidutils
from nova.pci import pci_manager
from nova import test
from nova.tests import fake_network
import nova.tests.image.fake
@@ -4173,6 +4174,41 @@ class LibvirtConnTestCase(test.TestCase):
conn._hard_reboot(self.context, instance, network_info,
block_device_info)

def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = db.instance_create(self.context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(conn, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(conn, '_create_domain_and_network',
return_value='fake_dom'),
mock.patch.object(conn, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs):
conn.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(dummyxml,
instance, network_info,
block_device_info=block_device_info,
context=self.context)])
_attach_pci_devices.assert_has_calls([mock.call('fake_dom',
'fake_pci_devs')])

def test_destroy_undefines(self):
mock = self.mox.CreateMock(libvirt.virDomain)
mock.ID()

+ 2
- 1
nova/tests/virt/powervm/test_powervm.py View File

@@ -855,7 +855,8 @@ class PowerVMDriverTestCase(test.TestCase):
def test_resume(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.resume,
instance=None, network_info=None)
context.get_admin_context(), instance=None,
network_info=None)

def test_host_power_action(self):
# Check to make sure the method raises NotImplementedError.

+ 2
- 2
nova/tests/virt/test_virt_drivers.py View File

@@ -386,13 +386,13 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_resume_unsuspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume(instance_ref, network_info)
self.connection.resume(self.ctxt, instance_ref, network_info)

@catch_notimplementederror
def test_resume_suspended_instance(self):
instance_ref, network_info = self._get_running_instance()
self.connection.suspend(instance_ref)
self.connection.resume(instance_ref, network_info)
self.connection.resume(self.ctxt, instance_ref, network_info)

@catch_notimplementederror
def test_destroy_instance_nonexistent(self):

+ 3
- 3
nova/tests/virt/vmwareapi/test_vmwareapi.py View File

@@ -502,7 +502,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
self.conn.resume(self.context, self.instance, self.network_info)
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
@@ -510,7 +510,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def test_resume_non_existent(self):
self._create_instance_in_the_db()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.instance, self.network_info)
self.context, self.instance, self.network_info)

def test_resume_not_suspended(self):
self._create_vm()
@@ -518,7 +518,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
self.context, self.instance, self.network_info)

def test_power_on(self):
self._create_vm()

+ 10
- 3
nova/virt/driver.py View File

@@ -431,9 +431,16 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()

def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
def resume(self, context, instance, network_info, block_device_info=None):
"""
resume the specified instance.

:param context: the context for the resume
:param instance: the instance being resumed
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
raise NotImplementedError()

def resume_state_on_host_boot(self, context, instance, network_info,

+ 1
- 1
nova/virt/fake.py View File

@@ -203,7 +203,7 @@ class FakeDriver(driver.ComputeDriver):
def suspend(self, instance):
pass

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
pass

def destroy(self, instance, network_info, block_device_info=None,

+ 1
- 1
nova/virt/hyperv/driver.py View File

@@ -100,7 +100,7 @@ class HyperVDriver(driver.ComputeDriver):
def suspend(self, instance):
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)

def power_off(self, instance):

+ 2
- 2
nova/virt/libvirt/driver.py View File

@@ -1954,12 +1954,12 @@ class LibvirtDriver(driver.ComputeDriver):
pci_manager.get_instance_pci_devs(instance))
dom.managedSave(0)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(xml, instance, network_info,
block_device_info)
block_device_info=block_device_info, context=context)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))


+ 1
- 1
nova/virt/powervm/driver.py View File

@@ -196,7 +196,7 @@ class PowerVMDriver(driver.ComputeDriver):
raise NotImplementedError(_("Suspend is not supported by the"
"PowerVM driver."))

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
raise NotImplementedError(_("Resume is not supported by the"
"PowerVM driver."))

+ 2
- 2
nova/virt/vmwareapi/driver.py View File

@@ -212,7 +212,7 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Suspend the specified instance."""
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)

@@ -681,7 +681,7 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""Resume the suspended VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.resume(instance)

+ 1
- 1
nova/virt/xenapi/driver.py View File

@@ -287,7 +287,7 @@ class XenAPIDriver(driver.ComputeDriver):
"""suspend the specified instance."""
self._vmops.suspend(instance)

def resume(self, instance, network_info, block_device_info=None):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)


Loading…
Cancel
Save