Fix up instance types in sys meta for resizes

This adds code to the resize paths to update the system_metadata
with new (or reverted) flavor information so that resized instances
are consistent with normal ones.

It also stashes the new and old instance_type information in system_metadata
during resize, which allows us to avoid database lookups for type
information at finish, confirm, and revert time.

Related to blueprint no-db-compute

Change-Id: I7f74ef8131be7a60e3a844fdf00fdeb3683f1317
This commit is contained in:
Dan Smith
2013-01-29 13:40:40 -05:00
parent 47bbf12a6c
commit 734b48b85f
8 changed files with 200 additions and 32 deletions

View File

@@ -507,12 +507,8 @@ class API(base.Base):
availability_zone, forced_host = self._handle_availability_zone(
availability_zone)
system_metadata = {}
instance_type_props = ['id', 'name', 'memory_mb', 'vcpus',
'root_gb', 'ephemeral_gb', 'flavorid',
'swap', 'rxtx_factor', 'vcpu_weight']
for k in instance_type_props:
system_metadata["instance_type_%s" % k] = instance_type[k]
system_metadata = instance_types.save_instance_type_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,

View File

@@ -308,9 +308,7 @@ class ComputeCellsAPI(compute_api.API):
# specified flavor_id is valid and exists. We'll need to load
# it again, but that should be safe.
old_instance_type_id = instance['instance_type_id']
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
old_instance_type = instance_types.extract_instance_type(instance)
flavor_id = kwargs.get('flavor_id')

View File

@@ -44,6 +44,25 @@ LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_instance_type_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
def create(name, memory, vcpus, root_gb, ephemeral_gb=None, flavorid=None,
swap=None, rxtx_factor=None, is_public=True):
"""Creates instance types."""
@@ -210,3 +229,42 @@ def remove_instance_type_access(flavorid, projectid, ctxt=None):
ctxt = context.get_admin_context()
return db.instance_type_access_remove(ctxt, flavorid, projectid)
def extract_instance_type(instance, prefix=''):
"""Create an InstanceType-like object from instance's system_metadata
information."""
instance_type = {}
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
for key, type_fn in system_metadata_instance_type_props.items():
type_key = '%sinstance_type_%s' % (prefix, key)
instance_type[key] = type_fn(sys_meta[type_key])
return instance_type
def save_instance_type_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize)."""
for key in system_metadata_instance_type_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
return metadata
def delete_instance_type_info(metadata, *prefixes):
"""Delete instance_type information from instance's system_metadata
by prefix."""
for key in system_metadata_instance_type_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
return metadata

View File

@@ -1752,6 +1752,12 @@ class ComputeManager(manager.SchedulerDependentManager):
with self._error_out_instance_on_exception(context, instance['uuid'],
reservations):
# NOTE(danms): delete stashed old/new instance_type information
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
instance_types.delete_instance_type_info(sys_meta, 'old_', 'new_')
self._instance_update(context, instance['uuid'],
system_metadata=sys_meta)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'], teardown=True)
@@ -1834,8 +1840,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
old_instance_type = migration['old_instance_type_id']
instance_type = instance_types.get_instance_type(old_instance_type)
instance_type = instance_types.extract_instance_type(instance,
prefix='old_')
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
instance_types.save_instance_type_info(sys_meta, instance_type)
instance_types.delete_instance_type_info(sys_meta, 'new_', 'old_')
instance = self._instance_update(context,
instance['uuid'],
@@ -1845,7 +1854,8 @@ class ComputeManager(manager.SchedulerDependentManager):
ephemeral_gb=instance_type['ephemeral_gb'],
instance_type_id=instance_type['id'],
host=migration['source_compute'],
node=migration['source_node'])
node=migration['source_node'],
system_metadata=sys_meta)
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
@@ -1910,6 +1920,14 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _('destination same as source!')
raise exception.MigrationError(msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
instance_types.save_instance_type_info(sys_meta, instance_type,
prefix='new_')
instance = self._instance_update(context, instance['uuid'],
system_metadata=sys_meta)
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
with rt.resize_claim(context, instance, instance_type, limits=limits) \
@@ -2070,8 +2088,15 @@ class ComputeManager(manager.SchedulerDependentManager):
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
if old_instance_type_id != new_instance_type_id:
instance_type = instance_types.get_instance_type(
new_instance_type_id)
instance_type = instance_types.extract_instance_type(instance,
prefix='new_')
old_instance_type = instance_types.extract_instance_type(instance)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
instance_types.save_instance_type_info(sys_meta,
old_instance_type,
prefix='old_')
instance_types.save_instance_type_info(sys_meta, instance_type)
instance = self._instance_update(
context,
instance['uuid'],
@@ -2079,7 +2104,9 @@ class ComputeManager(manager.SchedulerDependentManager):
memory_mb=instance_type['memory_mb'],
vcpus=instance_type['vcpus'],
root_gb=instance_type['root_gb'],
ephemeral_gb=instance_type['ephemeral_gb'])
ephemeral_gb=instance_type['ephemeral_gb'],
system_metadata=sys_meta)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host

View File

@@ -34,6 +34,7 @@ allowed_updates = ['task_state', 'vm_state', 'expected_task_state',
'instance_type_id', 'root_device_name', 'launched_on',
'progress', 'vm_mode', 'default_ephemeral_device',
'default_swap_device', 'root_device_name',
'system_metadata',
]
# Fields that we want to convert back into a datetime object.

View File

@@ -173,6 +173,13 @@ class BaseTestCase(test.TestCase):
if not params:
params = {}
def make_fake_sys_meta():
sys_meta = {}
inst_type = instance_types.get_instance_type_by_name(type_name)
for key in instance_types.system_metadata_instance_type_props:
sys_meta['instance_type_%s' % key] = inst_type[key]
return sys_meta
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
@@ -191,6 +198,7 @@ class BaseTestCase(test.TestCase):
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['system_metadata'] = make_fake_sys_meta()
inst.update(params)
_create_service_entries(self.context.elevated(),
{'fake_zone': [inst['host']]})
@@ -1842,6 +1850,7 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(), instance['uuid'], 'pre-migrating')
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=instance,
@@ -1871,6 +1880,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(test.TestingException, self.compute.finish_resize,
self.context,
migration=jsonutils.to_primitive(migration_ref),
@@ -1974,6 +1984,8 @@ class ComputeTestCase(BaseTestCase):
timeutils.set_time_override(cur_time)
test_notifier.NOTIFICATIONS = []
new_instance = db.instance_get_by_uuid(self.context,
new_instance['uuid'])
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=new_instance)
@@ -2159,7 +2171,8 @@ class ComputeTestCase(BaseTestCase):
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
inst = args[0]
self.assertEqual(inst['instance_type']['flavorid'], '1')
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta['instance_type_flavorid'], '1')
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration',
@@ -2193,6 +2206,8 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(),
inst_ref['uuid'], 'pre-migrating')
# NOTE(danms): make sure to refresh our inst_ref after prep_resize
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
instance = jsonutils.to_primitive(inst_ref)
db.instance_update(self.context, instance_uuid,
{"task_state": task_states.RESIZE_PREP})
@@ -2226,6 +2241,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(network_api.API, 'setup_networks_on_host',
fake_setup_networks_on_host)
rpcinst = db.instance_get_by_uuid(self.context, rpcinst['uuid'])
self.compute.finish_revert_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
instance=rpcinst, reservations=reservations)

View File

@@ -68,6 +68,25 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
lambda context, instance: self.data)
def _update_instance_type(self, instance_type_info):
self.instance_type = {
'id': 1,
'name': 'foo',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
}
self.instance_type.update(instance_type_info)
self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
'value': value}
for key, value in
self.instance_type.items()]
def _validate_device(self, device=None):
bdms = db.block_device_mapping_get_all_by_instance(
self.context, self.instance['uuid'])
@@ -163,40 +182,40 @@ class ComputeValidateDeviceTestCase(test.TestCase):
self.assertEqual(device, '/dev/vdc')
def test_ephemeral_xenapi(self):
self.instance_type = {
'ephemeral_gb': 10,
'swap': 0,
}
self._update_instance_type({
'ephemeral_gb': 10,
'swap': 0,
})
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdc')
def test_swap_xenapi(self):
self.instance_type = {
'ephemeral_gb': 0,
'swap': 10,
}
self._update_instance_type({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdb')
def test_swap_and_ephemeral_xenapi(self):
self.instance_type = {
'ephemeral_gb': 10,
'swap': 10,
}
self._update_instance_type({
'ephemeral_gb': 10,
'swap': 10,
})
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()
self.assertEqual(device, '/dev/xvdd')
def test_swap_and_one_attachment_xenapi(self):
self.instance_type = {
'ephemeral_gb': 0,
'swap': 10,
}
self._update_instance_type({
'ephemeral_gb': 0,
'swap': 10,
})
self.stubs.Set(instance_types, 'get_instance_type',
lambda instance_type_id, ctxt=None: self.instance_type)
device = self._validate_device()

View File

@@ -359,6 +359,59 @@ class InstanceTypeTestCase(test.TestCase):
self.assertTrue(instance["instance_type"])
class InstanceTypeToolsTest(test.TestCase):
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_instance_type(self, prefix):
instance_type = instance_types.get_default_instance_type()
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type,
prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_instance_type = instance_types.extract_instance_type(instance, prefix)
props = instance_types.system_metadata_instance_type_props.keys()
for key in instance_type.keys():
if key not in props:
del instance_type[key]
self.assertEqual(instance_type, _instance_type)
def test_extract_instance_type(self):
self._test_extract_instance_type('')
def test_extract_instance_type_prefix(self):
self._test_extract_instance_type('foo_')
def test_save_instance_type_info(self):
instance_type = instance_types.get_default_instance_type()
example = {}
example_prefix = {}
for key in instance_types.system_metadata_instance_type_props.keys():
example['instance_type_%s' % key] = instance_type[key]
example_prefix['fooinstance_type_%s' % key] = instance_type[key]
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type)
self.assertEqual(example, metadata)
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type, 'foo')
self.assertEqual(example_prefix, metadata)
def test_delete_instance_type_info(self):
instance_type = instance_types.get_default_instance_type()
metadata = {}
instance_types.save_instance_type_info(metadata, instance_type)
instance_types.save_instance_type_info(metadata, instance_type, '_')
instance_types.delete_instance_type_info(metadata, '', '_')
self.assertEqual(metadata, {})
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
def setUp(self):