* Tests to verify correct vm-params for Windows and Linux instances
This commit is contained in:
@@ -125,7 +125,10 @@ class API(base.Base):
|
|||||||
raise quota.QuotaError(msg, "MetadataLimitExceeded")
|
raise quota.QuotaError(msg, "MetadataLimitExceeded")
|
||||||
|
|
||||||
image = self.image_service.show(context, image_id)
|
image = self.image_service.show(context, image_id)
|
||||||
os_type = image['properties'].get('os_type', 'linux')
|
|
||||||
|
os_type = None
|
||||||
|
if 'properties' in image and 'os_type' in image['properties']:
|
||||||
|
os_type = image['properties']['os_type']
|
||||||
|
|
||||||
if kernel_id is None:
|
if kernel_id is None:
|
||||||
kernel_id = image.get('kernel_id', None)
|
kernel_id = image.get('kernel_id', None)
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ instances_os_type = Column('os_type',
|
|||||||
String(length=255, convert_unicode=False,
|
String(length=255, convert_unicode=False,
|
||||||
assert_unicode=None, unicode_error=None,
|
assert_unicode=None, unicode_error=None,
|
||||||
_warn_on_bytestring=False),
|
_warn_on_bytestring=False),
|
||||||
nullable=False)
|
nullable=True)
|
||||||
|
|
||||||
|
|
||||||
def upgrade(migrate_engine):
|
def upgrade(migrate_engine):
|
||||||
@@ -43,5 +43,3 @@ def upgrade(migrate_engine):
|
|||||||
meta.bind = migrate_engine
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
instances.create_column(instances_os_type)
|
instances.create_column(instances_os_type)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -62,6 +62,7 @@ def stub_out_db_instance_api(stubs):
|
|||||||
'mac_address': values['mac_address'],
|
'mac_address': values['mac_address'],
|
||||||
'vcpus': type_data['vcpus'],
|
'vcpus': type_data['vcpus'],
|
||||||
'local_gb': type_data['local_gb'],
|
'local_gb': type_data['local_gb'],
|
||||||
|
'os_type': values['os_type']
|
||||||
}
|
}
|
||||||
return FakeModel(base_options)
|
return FakeModel(base_options)
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@
|
|||||||
Test suite for XenAPI
|
Test suite for XenAPI
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import functools
|
||||||
import stubout
|
import stubout
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
@@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
|
||||||
|
"""
|
||||||
|
vm_utils.with_vdi_attached_here needs to be stubbed out because it
|
||||||
|
calls down to the filesystem to attach a vdi. This provides a
|
||||||
|
decorator to handle that.
|
||||||
|
"""
|
||||||
|
@functools.wraps(function)
|
||||||
|
def decorated_function(self, *args, **kwargs):
|
||||||
|
orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
|
||||||
|
vm_utils.with_vdi_attached_here = lambda *x: should_return
|
||||||
|
function(self, *args, **kwargs)
|
||||||
|
vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
|
||||||
|
return decorated_function
|
||||||
|
|
||||||
|
|
||||||
class XenAPIVolumeTestCase(test.TestCase):
|
class XenAPIVolumeTestCase(test.TestCase):
|
||||||
"""
|
"""
|
||||||
Unit tests for Volume operations
|
Unit tests for Volume operations
|
||||||
@@ -62,6 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type': 'm1.large',
|
'instance_type': 'm1.large',
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
|
'os_type': 'linux'
|
||||||
}
|
}
|
||||||
|
|
||||||
def _create_volume(self, size='0'):
|
def _create_volume(self, size='0'):
|
||||||
@@ -219,7 +236,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
|
|
||||||
check()
|
check()
|
||||||
|
|
||||||
def check_vm_record(self, conn):
|
def create_vm_record(self, conn, os_type):
|
||||||
instances = conn.list_instances()
|
instances = conn.list_instances()
|
||||||
self.assertEquals(instances, [1])
|
self.assertEquals(instances, [1])
|
||||||
|
|
||||||
@@ -231,28 +248,63 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
in xenapi_fake.get_all_records('VM').iteritems()
|
in xenapi_fake.get_all_records('VM').iteritems()
|
||||||
if not rec['is_control_domain']]
|
if not rec['is_control_domain']]
|
||||||
vm = vms[0]
|
vm = vms[0]
|
||||||
|
self.vm_info = vm_info
|
||||||
|
self.vm = vm
|
||||||
|
|
||||||
|
def check_vm_record(self):
|
||||||
# Check that m1.large above turned into the right thing.
|
# Check that m1.large above turned into the right thing.
|
||||||
instance_type = instance_types.INSTANCE_TYPES['m1.large']
|
instance_type = instance_types.INSTANCE_TYPES['m1.large']
|
||||||
mem_kib = long(instance_type['memory_mb']) << 10
|
mem_kib = long(instance_type['memory_mb']) << 10
|
||||||
mem_bytes = str(mem_kib << 10)
|
mem_bytes = str(mem_kib << 10)
|
||||||
vcpus = instance_type['vcpus']
|
vcpus = instance_type['vcpus']
|
||||||
self.assertEquals(vm_info['max_mem'], mem_kib)
|
self.assertEquals(self.vm_info['max_mem'], mem_kib)
|
||||||
self.assertEquals(vm_info['mem'], mem_kib)
|
self.assertEquals(self.vm_info['mem'], mem_kib)
|
||||||
self.assertEquals(vm['memory_static_max'], mem_bytes)
|
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
|
||||||
self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
|
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
|
||||||
self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
|
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
|
||||||
self.assertEquals(vm['VCPUs_max'], str(vcpus))
|
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
|
||||||
self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
|
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
|
||||||
|
|
||||||
# Check that the VM is running according to Nova
|
# Check that the VM is running according to Nova
|
||||||
self.assertEquals(vm_info['state'], power_state.RUNNING)
|
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
|
||||||
|
|
||||||
# Check that the VM is running according to XenAPI.
|
# Check that the VM is running according to XenAPI.
|
||||||
self.assertEquals(vm['power_state'], 'Running')
|
self.assertEquals(self.vm['power_state'], 'Running')
|
||||||
|
|
||||||
|
def check_vm_params_for_windows(self):
|
||||||
|
self.assertEquals(self.vm['platform']['nx'], 'true')
|
||||||
|
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
|
||||||
|
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
|
||||||
|
|
||||||
|
# check that these are not set
|
||||||
|
self.assertEquals(self.vm['PV_args'], '')
|
||||||
|
self.assertEquals(self.vm['PV_bootloader'], '')
|
||||||
|
self.assertEquals(self.vm['PV_kernel'], '')
|
||||||
|
self.assertEquals(self.vm['PV_ramdisk'], '')
|
||||||
|
|
||||||
|
def check_vm_params_for_linux(self):
|
||||||
|
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||||
|
self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
|
||||||
|
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
|
||||||
|
|
||||||
|
# check that these are not set
|
||||||
|
self.assertEquals(self.vm['PV_kernel'], '')
|
||||||
|
self.assertEquals(self.vm['PV_ramdisk'], '')
|
||||||
|
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||||
|
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||||
|
|
||||||
|
def check_vm_params_for_linux_with_external_kernel(self):
|
||||||
|
self.assertEquals(self.vm['platform']['nx'], 'false')
|
||||||
|
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
|
||||||
|
self.assertNotEquals(self.vm['PV_kernel'], '')
|
||||||
|
self.assertNotEquals(self.vm['PV_ramdisk'], '')
|
||||||
|
|
||||||
|
# check that these are not set
|
||||||
|
self.assertEquals(self.vm['HVM_boot_params'], {})
|
||||||
|
self.assertEquals(self.vm['HVM_boot_policy'], '')
|
||||||
|
|
||||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||||
instance_type="m1.large"):
|
instance_type="m1.large", os_type="linux"):
|
||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||||
values = {'name': 1,
|
values = {'name': 1,
|
||||||
'id': 1,
|
'id': 1,
|
||||||
@@ -263,11 +315,13 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'ramdisk_id': ramdisk_id,
|
'ramdisk_id': ramdisk_id,
|
||||||
'instance_type': instance_type,
|
'instance_type': instance_type,
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
|
'os_type': os_type
|
||||||
}
|
}
|
||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
instance = db.instance_create(values)
|
instance = db.instance_create(values)
|
||||||
conn.spawn(instance)
|
conn.spawn(instance)
|
||||||
self.check_vm_record(conn)
|
self.create_vm_record(conn, os_type)
|
||||||
|
self.check_vm_record()
|
||||||
|
|
||||||
def test_spawn_not_enough_memory(self):
|
def test_spawn_not_enough_memory(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
@@ -283,24 +337,37 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self._test_spawn(1, 2, 3)
|
self._test_spawn(1, 2, 3)
|
||||||
|
|
||||||
|
@stub_vm_utils_with_vdi_attached_here
|
||||||
def test_spawn_raw_glance(self):
|
def test_spawn_raw_glance(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
|
||||||
|
self.check_vm_params_for_linux()
|
||||||
|
|
||||||
def test_spawn_vhd_glance(self):
|
def test_spawn_vhd_glance_linux(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None)
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||||
|
os_type="linux")
|
||||||
|
self.check_vm_params_for_linux()
|
||||||
|
|
||||||
|
def test_spawn_vhd_glance_windows(self):
|
||||||
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
|
||||||
|
os_type="windows")
|
||||||
|
self.check_vm_params_for_windows()
|
||||||
|
|
||||||
def test_spawn_glance(self):
|
def test_spawn_glance(self):
|
||||||
FLAGS.xenapi_image_service = 'glance'
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
|
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
|
||||||
glance_stubs.FakeGlance.IMAGE_KERNEL,
|
glance_stubs.FakeGlance.IMAGE_KERNEL,
|
||||||
glance_stubs.FakeGlance.IMAGE_RAMDISK)
|
glance_stubs.FakeGlance.IMAGE_RAMDISK)
|
||||||
|
self.check_vm_params_for_linux_with_external_kernel()
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
super(XenAPIVMTestCase, self).tearDown()
|
super(XenAPIVMTestCase, self).tearDown()
|
||||||
self.manager.delete_project(self.project)
|
self.manager.delete_project(self.project)
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
|
self.vm_info = None
|
||||||
|
self.vm = None
|
||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
|
|
||||||
def _create_instance(self):
|
def _create_instance(self):
|
||||||
@@ -314,7 +381,8 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'kernel_id': 2,
|
'kernel_id': 2,
|
||||||
'ramdisk_id': 3,
|
'ramdisk_id': 3,
|
||||||
'instance_type': 'm1.large',
|
'instance_type': 'm1.large',
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
|
'os_type': 'linux'}
|
||||||
instance = db.instance_create(values)
|
instance = db.instance_create(values)
|
||||||
self.conn.spawn(instance)
|
self.conn.spawn(instance)
|
||||||
return instance
|
return instance
|
||||||
@@ -360,6 +428,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.fake_instance = FakeInstance()
|
self.fake_instance = FakeInstance()
|
||||||
self.fake_instance.id = 42
|
self.fake_instance.id = 42
|
||||||
|
self.fake_instance.os_type = 'linux'
|
||||||
|
|
||||||
def assert_disk_type(self, disk_type):
|
def assert_disk_type(self, disk_type):
|
||||||
dt = vm_utils.VMHelper.determine_disk_image_type(
|
dt = vm_utils.VMHelper.determine_disk_image_type(
|
||||||
|
|||||||
@@ -80,7 +80,8 @@ class VMHelper(HelperBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_vm(cls, session, instance, kernel, ramdisk, use_pv_kernel=False):
|
def create_vm(cls, session, instance, kernel, ramdisk,
|
||||||
|
use_pv_kernel=False):
|
||||||
"""Create a VM record. Returns a Deferred that gives the new
|
"""Create a VM record. Returns a Deferred that gives the new
|
||||||
VM reference.
|
VM reference.
|
||||||
the use_pv_kernel flag indicates whether the guest is HVM or PV
|
the use_pv_kernel flag indicates whether the guest is HVM or PV
|
||||||
@@ -319,7 +320,7 @@ class VMHelper(HelperBase):
|
|||||||
'glance_host': FLAGS.glance_host,
|
'glance_host': FLAGS.glance_host,
|
||||||
'glance_port': FLAGS.glance_port,
|
'glance_port': FLAGS.glance_port,
|
||||||
'sr_path': get_sr_path(session),
|
'sr_path': get_sr_path(session),
|
||||||
'os_type': instance.get('os_type', 'linux')}
|
'os_type': instance.os_type}
|
||||||
|
|
||||||
kwargs = {'params': pickle.dumps(params)}
|
kwargs = {'params': pickle.dumps(params)}
|
||||||
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
|
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
|
||||||
@@ -524,7 +525,7 @@ class VMHelper(HelperBase):
|
|||||||
Determine whether the VM will use a paravirtualized kernel or if it
|
Determine whether the VM will use a paravirtualized kernel or if it
|
||||||
will use hardware virtualization.
|
will use hardware virtualization.
|
||||||
|
|
||||||
1. Objectstore (any image type):
|
1. Objectstore (any image type):
|
||||||
We use plugin to figure out whether the VDI uses PV
|
We use plugin to figure out whether the VDI uses PV
|
||||||
|
|
||||||
2. Glance (VHD): then we use `os_type`, raise if not set
|
2. Glance (VHD): then we use `os_type`, raise if not set
|
||||||
@@ -540,7 +541,8 @@ class VMHelper(HelperBase):
|
|||||||
session, vdi_ref, disk_image_type, os_type)
|
session, vdi_ref, disk_image_type, os_type)
|
||||||
else:
|
else:
|
||||||
# 1. Objecstore
|
# 1. Objecstore
|
||||||
return cls._determine_is_pv_objectstore(session, instance_id, vdi_ref)
|
return cls._determine_is_pv_objectstore(session, instance_id,
|
||||||
|
vdi_ref)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
|
def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
|
||||||
@@ -564,7 +566,7 @@ class VMHelper(HelperBase):
|
|||||||
"""
|
"""
|
||||||
For a Glance image, determine if we need paravirtualization.
|
For a Glance image, determine if we need paravirtualization.
|
||||||
|
|
||||||
The relevant scenarios are:
|
The relevant scenarios are:
|
||||||
2. Glance (VHD): then we use `os_type`, raise if not set
|
2. Glance (VHD): then we use `os_type`, raise if not set
|
||||||
|
|
||||||
3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
|
3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
|
||||||
@@ -582,7 +584,7 @@ class VMHelper(HelperBase):
|
|||||||
is_pv = True
|
is_pv = True
|
||||||
elif disk_image_type == ImageType.DISK_RAW:
|
elif disk_image_type == ImageType.DISK_RAW:
|
||||||
# 3. RAW
|
# 3. RAW
|
||||||
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
|
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
|
||||||
elif disk_image_type == ImageType.DISK:
|
elif disk_image_type == ImageType.DISK:
|
||||||
# 4. Disk
|
# 4. Disk
|
||||||
is_pv = True
|
is_pv = True
|
||||||
|
|||||||
@@ -87,8 +87,6 @@ class VMOps(object):
|
|||||||
|
|
||||||
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||||
|
|
||||||
os_type = instance.get('os_type', 'linux')
|
|
||||||
|
|
||||||
kernel = None
|
kernel = None
|
||||||
if instance.kernel_id:
|
if instance.kernel_id:
|
||||||
kernel = VMHelper.fetch_image(self._session, instance.id,
|
kernel = VMHelper.fetch_image(self._session, instance.id,
|
||||||
@@ -99,8 +97,8 @@ class VMOps(object):
|
|||||||
ramdisk = VMHelper.fetch_image(self._session, instance.id,
|
ramdisk = VMHelper.fetch_image(self._session, instance.id,
|
||||||
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
|
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
|
||||||
|
|
||||||
use_pv_kernel = VMHelper.determine_is_pv(
|
use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
|
||||||
self._session, instance.id, vdi_ref, disk_image_type, os_type)
|
vdi_ref, disk_image_type, instance.os_type)
|
||||||
vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
|
vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
|
||||||
use_pv_kernel)
|
use_pv_kernel)
|
||||||
|
|
||||||
@@ -242,7 +240,7 @@ class VMOps(object):
|
|||||||
finally:
|
finally:
|
||||||
self._destroy(instance, template_vm_ref, shutdown=False,
|
self._destroy(instance, template_vm_ref, shutdown=False,
|
||||||
destroy_kernel_ramdisk=False)
|
destroy_kernel_ramdisk=False)
|
||||||
|
|
||||||
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
|
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
|
||||||
|
|
||||||
def reboot(self, instance):
|
def reboot(self, instance):
|
||||||
|
|||||||
Reference in New Issue
Block a user