Merge "Remove (almost) all references to 'instance_type'"

This commit is contained in:
Zuul 2021-06-13 05:57:49 +00:00 committed by Gerrit Code Review
commit 052cf96358
47 changed files with 640 additions and 652 deletions

View File

@ -279,8 +279,8 @@ class InstanceMetadata(object):
meta_data['public-ipv4'] = floating_ip
if self._check_version('2007-08-29', version):
instance_type = self.instance.get_flavor()
meta_data['instance-type'] = instance_type['name']
flavor = self.instance.get_flavor()
meta_data['instance-type'] = flavor['name']
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings

View File

@ -547,34 +547,32 @@ class ViewBuilder(common.ViewBuilder):
else:
return ""
def _get_flavor_dict(self, request, instance_type, show_extra_specs):
def _get_flavor_dict(self, request, flavor, show_extra_specs):
flavordict = {
"vcpus": instance_type.vcpus,
"ram": instance_type.memory_mb,
"disk": instance_type.root_gb,
"ephemeral": instance_type.ephemeral_gb,
"swap": instance_type.swap,
"original_name": instance_type.name
"vcpus": flavor.vcpus,
"ram": flavor.memory_mb,
"disk": flavor.root_gb,
"ephemeral": flavor.ephemeral_gb,
"swap": flavor.swap,
"original_name": flavor.name
}
if show_extra_specs:
flavordict['extra_specs'] = instance_type.extra_specs
flavordict['extra_specs'] = flavor.extra_specs
return flavordict
def _get_flavor(self, request, instance, show_extra_specs):
instance_type = instance.get_flavor()
if not instance_type:
LOG.warning("Instance has had its instance_type removed "
flavor = instance.get_flavor()
if not flavor:
LOG.warning("Instance has had its flavor removed "
"from the DB", instance=instance)
return {}
if api_version_request.is_supported(request, min_version="2.47"):
return self._get_flavor_dict(request, instance_type,
show_extra_specs)
return self._get_flavor_dict(request, flavor, show_extra_specs)
flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
flavor_id = flavor["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(
request, flavor_id, "flavors")
return {
"id": str(flavor_id),
"links": [{

View File

@ -57,9 +57,11 @@ class Claim(NopClaim):
correct decisions with respect to host selection.
"""
def __init__(self, context, instance, nodename, tracker, compute_node,
pci_requests, migration=None, limits=None):
super(Claim, self).__init__(migration=migration)
def __init__(
self, context, instance, nodename, tracker, compute_node, pci_requests,
migration=None, limits=None,
):
super().__init__(migration=migration)
# Stash a copy of the instance at the current point of time
self.instance = instance.obj_clone()
self.nodename = nodename
@ -159,21 +161,24 @@ class MoveClaim(Claim):
Move can be either a migrate/resize, live-migrate or an evacuate operation.
"""
def __init__(self, context, instance, nodename, instance_type, image_meta,
tracker, compute_node, pci_requests, migration, limits=None):
def __init__(
self, context, instance, nodename, flavor, image_meta, tracker,
compute_node, pci_requests, migration, limits=None,
):
self.context = context
self.instance_type = instance_type
self.flavor = flavor
if isinstance(image_meta, dict):
image_meta = objects.ImageMeta.from_dict(image_meta)
self.image_meta = image_meta
super(MoveClaim, self).__init__(context, instance, nodename, tracker,
compute_node, pci_requests,
migration=migration, limits=limits)
super().__init__(
context, instance, nodename, tracker, compute_node, pci_requests,
migration=migration, limits=limits,
)
@property
def numa_topology(self):
return hardware.numa_get_constraints(self.instance_type,
self.image_meta)
return hardware.numa_get_constraints(self.flavor, self.image_meta)
def abort(self):
"""Compute operation requiring claimed resources has failed or
@ -183,7 +188,7 @@ class MoveClaim(Claim):
self.tracker.drop_move_claim(
self.context,
self.instance, self.nodename,
instance_type=self.instance_type)
flavor=self.flavor)
self.instance.drop_migration_context()
def _test_pci(self):

View File

@ -173,26 +173,27 @@ def extract_flavor(instance, prefix=''):
# NOTE(danms): This method is deprecated, do not use it!
# Use instance.{old_,new_,}flavor instead, as instances no longer
# have flavor information in system_metadata.
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
# NOTE(stephenfin): 'prefix' is unused and could be removed
def save_flavor_info(metadata, flavor, prefix=''):
"""Save properties from flavor into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
as stash information about another flavor for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
metadata[to_key] = flavor[key]
# NOTE(danms): We do NOT save all of extra_specs here, but only the
# NUMA-related ones that we need to avoid an uglier alternative. This
# should be replaced by a general split-out of flavor information from
# system_metadata very soon.
extra_specs = instance_type.get('extra_specs', {})
extra_specs = flavor.get('extra_specs', {})
for extra_prefix in system_metadata_flavor_extra_props:
for key in extra_specs:
if key.startswith(extra_prefix):

View File

@ -5083,9 +5083,10 @@ class ComputeManager(manager.Manager):
instance.uuid)
return orig_alloc
def _prep_resize(self, context, image, instance, instance_type,
filter_properties, node, migration, request_spec,
clean_shutdown=True):
def _prep_resize(
self, context, image, instance, flavor, filter_properties, node,
migration, request_spec, clean_shutdown=True,
):
if not filter_properties:
filter_properties = {}
@ -5097,7 +5098,7 @@ class ComputeManager(manager.Manager):
same_host = instance.host == self.host
# if the flavor IDs match, it's migrate; otherwise resize
if same_host and instance_type.id == instance['instance_type_id']:
if same_host and flavor.id == instance['instance_type_id']:
# check driver whether support migrate to same host
if not self.driver.capabilities.get(
'supports_migrate_to_same_host', False):
@ -5108,9 +5109,9 @@ class ComputeManager(manager.Manager):
inner_exception=exception.UnableToMigrateToSelf(
instance_id=instance.uuid, host=self.host))
# NOTE(danms): Stash the new instance_type to avoid having to
# NOTE(danms): Stash the new flavor to avoid having to
# look it up in the database later
instance.new_flavor = instance_type
instance.new_flavor = flavor
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
@ -5149,14 +5150,15 @@ class ComputeManager(manager.Manager):
limits = filter_properties.get('limits', {})
allocs = self.reportclient.get_allocations_for_consumer(
context, instance.uuid)
with self.rt.resize_claim(context, instance, instance_type, node,
migration, allocs, image_meta=image,
limits=limits) as claim:
with self.rt.resize_claim(
context, instance, flavor, node, migration, allocs,
image_meta=image, limits=limits,
) as claim:
LOG.info('Migrating', instance=instance)
# RPC cast to the source host to start the actual resize/migration.
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, request_spec, clean_shutdown)
context, instance, claim.migration, image,
flavor, request_spec, clean_shutdown)
def _send_prep_resize_notifications(
self, context, instance, phase, flavor):
@ -5262,7 +5264,7 @@ class ComputeManager(manager.Manager):
flavor)
def _reschedule_resize_or_reraise(self, context, instance, exc_info,
instance_type, request_spec, filter_properties, host_list):
flavor, request_spec, filter_properties, host_list):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
@ -5291,7 +5293,7 @@ class ComputeManager(manager.Manager):
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(
context, instance, scheduler_hint, instance_type,
context, instance, scheduler_hint, flavor,
request_spec=request_spec, host_list=host_list)
rescheduled = True
@ -5561,9 +5563,10 @@ class ComputeManager(manager.Manager):
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
def _resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown,
request_spec):
def _resize_instance(
self, context, instance, image, migration, flavor,
clean_shutdown, request_spec,
):
# Pass instance_state=instance.vm_state because we can resize
# a STOPPED server and we don't want to set it back to ACTIVE
# in case migrate_disk_and_power_off raises InstanceFaultRollback.
@ -5592,10 +5595,10 @@ class ComputeManager(manager.Manager):
timeout, retry_interval = self._get_power_off_values(
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
context, instance, migration.dest_host,
flavor, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
@ -5672,13 +5675,13 @@ class ComputeManager(manager.Manager):
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type.id
instance.memory_mb = instance_type.memory_mb
instance.vcpus = instance_type.vcpus
instance.root_gb = instance_type.root_gb
instance.ephemeral_gb = instance_type.ephemeral_gb
instance.flavor = instance_type
def _set_instance_info(instance, flavor):
instance.instance_type_id = flavor.id
instance.memory_mb = flavor.memory_mb
instance.vcpus = flavor.vcpus
instance.root_gb = flavor.root_gb
instance.ephemeral_gb = flavor.ephemeral_gb
instance.flavor = flavor
def _update_volume_attachments(self, context, instance, bdms):
"""Updates volume attachments using the virt driver host connector.
@ -9160,7 +9163,7 @@ class ComputeManager(manager.Manager):
LOG.debug('Dropping live migration resource claim on destination '
'node %s', nodename, instance=instance)
self.rt.drop_move_claim(
context, instance, nodename, instance_type=instance.flavor)
context, instance, nodename, flavor=instance.flavor)
@wrap_exception()
@wrap_instance_event(prefix='compute')

View File

@ -201,27 +201,29 @@ class ResourceTracker(object):
def rebuild_claim(self, context, instance, nodename, allocations,
limits=None, image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(
context, instance, instance_type, nodename, migration, allocations,
move_type=fields.MigrationType.EVACUATION,
context, instance, instance.flavor, nodename, migration,
allocations, move_type=fields.MigrationType.EVACUATION,
image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def resize_claim(self, context, instance, instance_type, nodename,
migration, allocations, image_meta=None, limits=None):
def resize_claim(
self, context, instance, flavor, nodename, migration, allocations,
image_meta=None, limits=None,
):
"""Create a claim for a resize or cold-migration move.
Note that this code assumes ``instance.new_flavor`` is set when
resizing with a new flavor.
"""
return self._move_claim(context, instance, instance_type, nodename,
migration, allocations, image_meta=image_meta,
limits=limits)
return self._move_claim(
context, instance, flavor, nodename, migration,
allocations, image_meta=image_meta, limits=limits)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def live_migration_claim(self, context, instance, nodename, migration,
limits, allocs):
def live_migration_claim(
self, context, instance, nodename, migration, limits, allocs,
):
"""Builds a MoveClaim for a live migration.
:param context: The request context.
@ -235,17 +237,18 @@ class ResourceTracker(object):
:returns: A MoveClaim for this live migration.
"""
# Flavor and image cannot change during a live migration.
instance_type = instance.flavor
flavor = instance.flavor
image_meta = instance.image_meta
return self._move_claim(
context, instance, instance_type, nodename, migration, allocs,
context, instance, flavor, nodename, migration, allocs,
move_type=fields.MigrationType.LIVE_MIGRATION,
image_meta=image_meta, limits=limits,
)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, allocations, move_type=None,
image_meta=None, limits=None):
def _move_claim(
self, context, instance, new_flavor, nodename, migration, allocations,
move_type=None, image_meta=None, limits=None,
):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
@ -253,7 +256,7 @@ class ResourceTracker(object):
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param new_flavor: new flavor being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
@ -271,9 +274,8 @@ class ResourceTracker(object):
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
migration = self._create_migration(
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
@ -287,7 +289,7 @@ class ResourceTracker(object):
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_flavor)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests
# with the new instance flavor pci_requests.
@ -296,7 +298,7 @@ class ResourceTracker(object):
if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_flavor, image_meta, self, cn,
new_pci_requests, migration, limits=limits)
claimed_pci_devices_objs = []
@ -345,8 +347,9 @@ class ResourceTracker(object):
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
def _create_migration(
self, context, instance, new_flavor, nodename, move_type=None,
):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
@ -356,7 +359,7 @@ class ResourceTracker(object):
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.new_instance_type_id = new_flavor.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
@ -587,38 +590,35 @@ class ResourceTracker(object):
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
flavor=None, prefix='new_'):
self._drop_move_claim(
context, instance, nodename, instance_type, prefix='new_')
context, instance, nodename, flavor, prefix='new_')
def _drop_move_claim(
self, context, instance, nodename, instance_type=None, prefix='new_',
self, context, instance, nodename, flavor=None, prefix='new_',
):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
completed successfully, this is normally the source.
If it did not complete successfully (failed or
reverted), this is normally the destination.
:param instance_type: The flavor that determines the usage to remove.
If the migration completed successfully, this is
the old flavor to be removed from the source. If
the migration did not complete successfully, this
is the new flavor to be removed from the
destination.
completed successfully, this is normally the source. If it did not
complete successfully (failed or reverted), this is normally the
destination.
:param flavor: The flavor that determines the usage to remove. If the
migration completed successfully, this is the old flavor to be
removed from the source. If the migration did not complete
successfully, this is the new flavor to be removed from the
destination.
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the
default.
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
# Remove usage for an instance that is tracked in migrations, such as
# on the dest node during revert resize.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
instance_type = self._get_instance_type(instance, prefix,
migration)
if not flavor:
flavor = self._get_flavor(instance, prefix, migration)
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
@ -626,11 +626,11 @@ class ResourceTracker(object):
elif instance['uuid'] in self.tracked_instances:
self.tracked_instances.remove(instance['uuid'])
if instance_type is not None:
if flavor is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
flavor, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
resources = self._get_migration_context_resource(
'resources', instance, prefix=prefix)
@ -1298,9 +1298,8 @@ class ResourceTracker(object):
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(instance, 'new_', migration)
if instance['instance_type_id'] == migration.old_instance_type_id:
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
@ -1316,13 +1315,13 @@ class ResourceTracker(object):
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(instance, 'old_', migration)
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_instance_type(instance, 'new_', migration)
itype = self._get_flavor(instance, 'new_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
@ -1332,7 +1331,7 @@ class ResourceTracker(object):
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(instance, 'old_', migration)
itype = self._get_flavor(instance, 'old_', migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
# We could be racing with confirm_resize setting the
@ -1657,15 +1656,15 @@ class ResourceTracker(object):
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, instance, prefix, migration):
"""Get the instance type from instance."""
def _get_flavor(self, instance, prefix, migration):
"""Get the flavor from instance."""
if migration.is_resize:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.

View File

@ -1083,9 +1083,10 @@ def get_headroom(quotas, usages, deltas):
return headroom
def check_num_instances_quota(context, instance_type, min_count,
max_count, project_id=None, user_id=None,
orig_num_req=None):
def check_num_instances_quota(
context, flavor, min_count, max_count, project_id=None, user_id=None,
orig_num_req=None,
):
"""Enforce quota limits on number of instances created."""
# project_id is also used for the TooManyInstances error message
if project_id is None:
@ -1100,8 +1101,8 @@ def check_num_instances_quota(context, instance_type, min_count,
if not any(r in user_quotas for r in ['instances', 'cores', 'ram']):
user_id = None
# Determine requested cores and ram
req_cores = max_count * instance_type.vcpus
req_ram = max_count * instance_type.memory_mb
req_cores = max_count * flavor.vcpus
req_ram = max_count * flavor.memory_mb
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
@ -1117,8 +1118,8 @@ def check_num_instances_quota(context, instance_type, min_count,
if min_count == max_count == 0:
# orig_num_req is the original number of instances requested in the
# case of a recheck quota, for use in the over quota exception.
req_cores = orig_num_req * instance_type.vcpus
req_ram = orig_num_req * instance_type.memory_mb
req_cores = orig_num_req * flavor.vcpus
req_ram = orig_num_req * flavor.memory_mb
requested = {'instances': orig_num_req, 'cores': req_cores,
'ram': req_ram}
(overs, reqs, total_alloweds, useds) = get_over_quota_detail(
@ -1136,21 +1137,19 @@ def check_num_instances_quota(context, instance_type, min_count,
allowed = headroom.get('instances', 1)
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type.vcpus:
allowed = min(allowed,
headroom['cores'] // instance_type.vcpus)
if instance_type.memory_mb:
allowed = min(allowed,
headroom['ram'] // instance_type.memory_mb)
if flavor.vcpus:
allowed = min(allowed, headroom['cores'] // flavor.vcpus)
if flavor.memory_mb:
allowed = min(allowed, headroom['ram'] // flavor.memory_mb)
# Convert to the appropriate exception message
if allowed <= 0:
msg = "Cannot run any more instances of this type."
elif min_count <= allowed <= max_count:
# We're actually OK, but still need to check against allowed
return check_num_instances_quota(context, instance_type, min_count,
allowed, project_id=project_id,
user_id=user_id)
return check_num_instances_quota(
context, flavor, min_count, allowed, project_id=project_id,
user_id=user_id)
else:
msg = "Can only run %s more instances of this type." % allowed

View File

@ -1351,18 +1351,18 @@ class ComputeTaskManager(base.Base):
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
def _volume_size(flavor, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
size = flavor.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
size = flavor.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
def _create_block_device_mapping(self, cell, flavor, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
@ -1373,7 +1373,7 @@ class ComputeTaskManager(base.Base):
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.volume_size = self._volume_size(flavor, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()

View File

@ -560,6 +560,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
@ -578,6 +579,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
@ -594,6 +596,7 @@ def upgrade(migrate_engine):
mysql_charset='utf8'
)
# TODO(stephenfin): Remove this table since it has been moved to the API DB
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),

View File

@ -321,9 +321,9 @@ def info_from_instance(context, instance, network_info,
image_ref_url = instance.image_ref
exc_ctx.reraise = False
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
flavor = instance.get_flavor()
flavor_name = flavor.get('name', '')
instance_flavorid = flavor.get('flavorid', '')
instance_info = dict(
# Owner properties
@ -337,7 +337,7 @@ def info_from_instance(context, instance, network_info,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type=flavor_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,

View File

@ -148,6 +148,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
# TODO(stephenfin): Remove this in version 3.0 of the object as it has
# been replaced by 'flavor'
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),

View File

@ -53,6 +53,8 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
'source_node': fields.StringField(nullable=True), # source nodename
'dest_node': fields.StringField(nullable=True), # dest nodename
'dest_host': fields.StringField(nullable=True), # dest host IP
# TODO(stephenfin): Rename these to old_flavor_id, new_flavor_id in
# v2.0
'old_instance_type_id': fields.IntegerField(nullable=True),
'new_instance_type_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),

View File

@ -28,7 +28,7 @@ _SCOPE = 'aggregate_instance_extra_specs'
class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
"""AggregateInstanceExtraSpecsFilter works with InstanceType records."""
"""AggregateInstanceExtraSpecsFilter works with flavor records."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
@ -36,21 +36,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create instance_type
"""Return a list of hosts that can create flavor.
Check that the extra specs associated with the instance type match
the metadata provided by aggregates. If not present return False.
"""
instance_type = spec_obj.flavor
flavor = spec_obj.flavor
# If 'extra_specs' is not present or extra_specs are empty then we
# need not proceed further
if (not instance_type.obj_attr_is_set('extra_specs') or
not instance_type.extra_specs):
if 'extra_specs' not in flavor or not flavor.extra_specs:
return True
metadata = utils.aggregate_metadata_get_by_host(host_state)
for key, req in instance_type.extra_specs.items():
for key, req in flavor.extra_specs.items():
# Either not scope format, or aggregate_instance_extra_specs scope
scope = key.split(':', 1)
if len(scope) > 1:
@ -62,18 +61,20 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
aggregate_vals = metadata.get(key, None)
if not aggregate_vals:
LOG.debug(
"%(host_state)s fails instance_type extra_specs "
"requirements. Extra_spec %(key)s is not in aggregate.",
"%(host_state)s fails flavor extra_specs requirements. "
"Extra_spec %(key)s is not in aggregate.",
{'host_state': host_state, 'key': key})
return False
for aggregate_val in aggregate_vals:
if extra_specs_ops.match(aggregate_val, req):
break
else:
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements. '%(aggregate_vals)s' do not "
"match '%(req)s'",
{'host_state': host_state, 'req': req,
'aggregate_vals': aggregate_vals})
LOG.debug(
"%(host_state)s fails flavor extra_specs requirements. "
"'%(aggregate_vals)s' do not match '%(req)s'",
{
'host_state': host_state, 'req': req,
'aggregate_vals': aggregate_vals,
})
return False
return True

View File

@ -65,14 +65,14 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return None
return cap
def _satisfies_extra_specs(self, host_state, instance_type):
def _satisfies_extra_specs(self, host_state, flavor):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in instance_type:
if 'extra_specs' not in flavor:
return True
for key, req in instance_type.extra_specs.items():
for key, req in flavor.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
@ -106,10 +106,10 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create instance_type."""
instance_type = spec_obj.flavor
if not self._satisfies_extra_specs(host_state, instance_type):
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements", {'host_state': host_state})
"""Return a list of hosts that can create flavor."""
if not self._satisfies_extra_specs(host_state, spec_obj.flavor):
LOG.debug(
"%(host_state)s fails flavor extra_specs requirements",
{'host_state': host_state})
return False
return True

View File

@ -19,9 +19,9 @@ from nova.scheduler.filters import utils
class AggregateTypeAffinityFilter(filters.BaseHostFilter):
"""AggregateTypeAffinityFilter limits instance_type by aggregate
"""AggregateTypeAffinityFilter limits flavors by aggregate
return True if no instance_type key is set or if the aggregate metadata
return True if no flavor key is set or if the aggregate metadata
key 'instance_type' has the instance_type name as a value
"""
@ -31,13 +31,11 @@ class AggregateTypeAffinityFilter(filters.BaseHostFilter):
RUN_ON_REBUILD = False
def host_passes(self, host_state, spec_obj):
instance_type = spec_obj.flavor
# TODO(stephenfin): Add support for 'flavor' key
aggregate_vals = utils.aggregate_values_from_key(
host_state, 'instance_type')
for val in aggregate_vals:
if (instance_type.name in
[x.strip() for x in val.split(',')]):
if spec_obj.flavor.name in [x.strip() for x in val.split(',')]:
return True
return not aggregate_vals

View File

@ -534,7 +534,7 @@ class ResourceRequest(object):
list(str(rg) for rg in list(self._rg_by_id.values()))))
def build_request_spec(image, instances, instance_type=None):
def build_request_spec(image, instances, flavor=None):
"""Build a request_spec (ahem, not a RequestSpec) for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
@ -543,21 +543,21 @@ def build_request_spec(image, instances, instance_type=None):
:param image: optional primitive image meta dict
:param instances: list of instances; objects will be converted to
primitives
:param instance_type: optional flavor; objects will be converted to
:param flavor: optional flavor; objects will be converted to
primitives
:return: dict with the following keys::
'image': the image dict passed in or {}
'instance_properties': primitive version of the first instance passed
'instance_type': primitive version of the instance_type or None
'instance_type': primitive version of the flavor or None
'num_instances': the number of instances passed in
"""
instance = instances[0]
if instance_type is None:
if flavor is None:
if isinstance(instance, obj_instance.Instance):
instance_type = instance.get_flavor()
flavor = instance.get_flavor()
else:
instance_type = flavors.extract_flavor(instance)
flavor = flavors.extract_flavor(instance)
if isinstance(instance, obj_instance.Instance):
instance = obj_base.obj_to_primitive(instance)
@ -565,25 +565,26 @@ def build_request_spec(image, instances, instance_type=None):
# to detach our metadata blob because we modify it below.
instance['system_metadata'] = dict(instance.get('system_metadata', {}))
if isinstance(instance_type, objects.Flavor):
instance_type = obj_base.obj_to_primitive(instance_type)
if isinstance(flavor, objects.Flavor):
flavor = obj_base.obj_to_primitive(flavor)
# NOTE(danms): Replicate this old behavior because the
# scheduler RPC interface technically expects it to be
# there. Remove this when we bump the scheduler RPC API to
# v5.0
try:
flavors.save_flavor_info(instance.get('system_metadata', {}),
instance_type)
flavors.save_flavor_info(
instance.get('system_metadata', {}), flavor)
except KeyError:
# If the flavor isn't complete (which is legit with a
# flavor object, just don't put it in the request spec
pass
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances)}
'image': image or {},
'instance_properties': instance,
'instance_type': flavor,
'num_instances': len(instances),
}
# NOTE(mriedem): obj_to_primitive above does not serialize everything
# in an object, like datetime fields, so we need to still call to_primitive
# to recursively serialize the items in the request_spec dict.
@ -898,11 +899,12 @@ def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
context, method, instance_uuid, request_spec, vm_state, ex)
def build_filter_properties(scheduler_hints, forced_host,
forced_node, instance_type):
def build_filter_properties(
scheduler_hints, forced_host, forced_node, flavor,
):
"""Build the filter_properties dict from data in the boot request."""
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
filter_properties['instance_type'] = flavor
# TODO(alaski): It doesn't seem necessary that these are conditionally
# added. Let's just add empty lists if not forced_host/node.
if forced_host:

View File

@ -87,7 +87,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
# but fails in conductor once the instance has been created in cell1.
original_quota_check = compute_utils.check_num_instances_quota
def stub_check_num_instances_quota(_self, context, instance_type,
def stub_check_num_instances_quota(_self, context, flavor,
min_count, *args, **kwargs):
# Determine where we are in the flow based on whether or not the
# min_count is 0 (API will pass 1, conductor will pass 0).
@ -96,7 +96,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
'test_bfv_quota_race_local_delete')
# We're checking from the API so perform the original quota check.
return original_quota_check(
_self, context, instance_type, min_count, *args, **kwargs)
_self, context, flavor, min_count, *args, **kwargs)
self.stub_out('nova.compute.utils.check_num_instances_quota',
stub_check_num_instances_quota)

View File

@ -43,8 +43,9 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
claim_calls = []
def fake_orig_claim(
_self, context, instance, instance_type, nodename,
*args, **kwargs):
_self, context, instance, flavor, nodename,
*args, **kwargs,
):
if not claim_calls:
claim_calls.append(nodename)
raise exception.ComputeResourcesUnavailable(
@ -52,7 +53,7 @@ class PinnedComputeRpcTests(integrated_helpers.ProviderUsageBaseTestCase):
else:
claim_calls.append(nodename)
return orig_claim(
_self, context, instance, instance_type, nodename, *args,
_self, context, instance, flavor, nodename, *args,
**kwargs)
with mock.patch(

View File

@ -47,8 +47,9 @@ class ForcedHostMissingReScheduleTestCase(
claim_calls = []
def fake_orig_claim(
_self, context, instance, instance_type, nodename,
*args, **kwargs):
_self, context, instance, flavor, nodename,
*args, **kwargs,
):
if not claim_calls:
claim_calls.append(nodename)
raise exception.ComputeResourcesUnavailable(
@ -56,7 +57,7 @@ class ForcedHostMissingReScheduleTestCase(
else:
claim_calls.append(nodename)
return orig_claim(
_self, context, instance, instance_type, nodename, *args,
_self, context, instance, flavor, nodename, *args,
**kwargs)
with mock.patch(

View File

@ -73,19 +73,18 @@ class DiskConfigTestCaseV21(test.TestCase):
def fake_instance_create(context, inst_, session=None):
inst = fake_instance.fake_db_instance(**{
'id': 1,
'uuid': AUTO_INSTANCE_UUID,
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'progress': 0,
'name': 'instance-1', # this is a property
'task_state': '',
'vm_state': '',
'auto_disk_config': inst_['auto_disk_config'],
'security_groups': inst_['security_groups'],
'instance_type': objects.Flavor.get_by_name(context,
'm1.small'),
})
'id': 1,
'uuid': AUTO_INSTANCE_UUID,
'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
'progress': 0,
'name': 'instance-1', # this is a property
'task_state': '',
'vm_state': '',
'auto_disk_config': inst_['auto_disk_config'],
'security_groups': inst_['security_groups'],
'flavor': objects.Flavor.get_by_name(context, 'm1.small'),
})
return inst

View File

@ -49,16 +49,18 @@ def generate_flavor(flavorid, ispublic):
}
INSTANCE_TYPES = {
'0': generate_flavor(0, True),
'1': generate_flavor(1, True),
'2': generate_flavor(2, False),
'3': generate_flavor(3, False)}
FLAVORS = {
'0': generate_flavor(0, True),
'1': generate_flavor(1, True),
'2': generate_flavor(2, False),
'3': generate_flavor(3, False)}
ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'}]
ACCESS_LIST = [
{'flavor_id': '2', 'project_id': 'proj2'},
{'flavor_id': '2', 'project_id': 'proj3'},
{'flavor_id': '3', 'project_id': 'proj3'},
]
def fake_get_flavor_access_by_flavor_id(context, flavorid):
@ -70,7 +72,7 @@ def fake_get_flavor_access_by_flavor_id(context, flavorid):
def fake_get_flavor_by_flavor_id(context, flavorid):
return INSTANCE_TYPES[flavorid]
return FLAVORS[flavorid]
def _has_flavor_access(flavorid, projectid):
@ -85,10 +87,10 @@ def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
return sorted(FLAVORS.values(), key=lambda item: item[sort_key])
res = {}
for k, v in INSTANCE_TYPES.items():
for k, v in FLAVORS.items():
if filters['is_public'] and _has_flavor_access(k, context.project_id):
res.update({k: v})
continue

View File

@ -914,12 +914,12 @@ class DisabledFlavorsWithRealDBTestV21(test.TestCase):
self.context = self.req.environ['nova.context']
self.admin_context = context.get_admin_context()
self.disabled_type = self._create_disabled_instance_type()
self.disabled_type = self._create_disabled_flavor()
self.addCleanup(self.disabled_type.destroy)
self.inst_types = objects.FlavorList.get_all(self.admin_context)
self.controller = self.Controller()
def _create_disabled_instance_type(self):
def _create_disabled_flavor(self):
flavor = objects.Flavor(context=self.admin_context,
name='foo.disabled', flavorid='10.disabled',
memory_mb=512, vcpus=2, root_gb=1,

View File

@ -4208,7 +4208,7 @@ class ServersControllerCreateTest(test.TestCase):
self.controller = servers.ServersController()
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
flavor = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/%s/images/%s' % (self.project_id,
image_uuid)
@ -4218,7 +4218,7 @@ class ServersControllerCreateTest(test.TestCase):
'display_name': inst['display_name'] or 'test',
'display_description': inst['display_description'] or '',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'flavor': flavor,
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': fakes.FAKE_PROJECT_ID,
@ -7194,7 +7194,7 @@ class ServersViewBuilderTest(test.TestCase):
'ips': [_ip(fixed_ipv4[2])]}]}}]
return nw_cache
def test_get_flavor_valid_instance_type(self):
def test_get_flavor_valid_flavor(self):
flavor_bookmark = "http://localhost/%s/flavors/1" % self.project_id
expected = {"id": "1",
"links": [{"rel": "bookmark",

View File

@ -78,7 +78,7 @@ def _fake_instance(start, end, instance_id, tenant_id,
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
instance_type_id=FAKE_INST_TYPE['id'],
instance_type_id=flavor.id,
launched_at=start,
terminated_at=end,
vm_state=vm_state,

View File

@ -126,18 +126,18 @@ class BootFromVolumeTest(test.TestCase):
self._legacy_bdm_seen = True
def _get_fake_compute_api_create(self):
def _fake_compute_api_create(cls, context, instance_type,
def _fake_compute_api_create(cls, context, flavor,
image_href, **kwargs):
self._block_device_mapping_seen = kwargs.get(
'block_device_mapping')
self._legacy_bdm_seen = kwargs.get('legacy_bdm')
inst_type = flavors.get_flavor_by_flavor_id(2)
flavor = flavors.get_flavor_by_flavor_id(2)
resv_id = None
return ([{'id': 1,
'display_name': 'test_server',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'flavor': flavor,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': IMAGE_UUID,

View File

@ -441,7 +441,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
flavor=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None, system_metadata=None,
services=None, trusted_certs=None, hidden=False):
if user_id is None:
@ -481,11 +481,11 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
info_cache = create_info_cache(nw_cache)
if instance_type is None:
instance_type = objects.Flavor.get_by_name(
if flavor is None:
flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
flavorinfo = jsonutils.dumps({
'cur': instance_type.obj_to_primitive(),
'cur': flavor.obj_to_primitive(),
'old': None,
'new': None,
})
@ -501,62 +501,60 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"hostname": display_name or server_name,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"config_drive": config_drive,
"power_state": power_state,
"vm_state": vm_state or vm_states.ACTIVE,
"task_state": task_state,
"power_state": power_state,
"services": services,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"hostname": display_name or server_name,
"host": host,
"node": node,
"instance_type_id": 1,
"instance_type": inst_type,
"instance_type_id": flavor.id,
"user_data": user_data,
"reservation_id": reservation_id,
"mac_address": "",
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": display_description,
"launched_on": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
"os_type": "",
"architecture": "",
"vm_mode": "",
"uuid": uuid,
"root_device_name": root_device_name,
"default_ephemeral_device": "",
"default_swap_device": "",
"config_drive": config_drive,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress,
"auto_disk_config": auto_disk_config,
"name": "instance-%s" % id,
"progress": progress,
"shutdown_terminate": True,
"disable_terminate": False,
"security_groups": security_groups,
"root_device_name": root_device_name,
"system_metadata": utils.dict_to_metadata(sys_meta),
"pci_devices": [],
"vm_mode": "",
"default_swap_device": "",
"default_ephemeral_device": "",
"launched_on": "",
"cell_name": "",
"architecture": "",
"os_type": "",
"metadata": metadata,
"system_metadata": utils.dict_to_metadata(sys_meta),
"security_groups": security_groups,
"cleaned": cleaned,
"pci_devices": [],
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
"trusted_certs": trusted_certs,
},
"cleaned": cleaned,
"services": services,
"tags": [],
"hidden": hidden,
"name": "instance-%s" % id,
}
instance.update(info_cache)

View File

@ -153,6 +153,9 @@ class _ComputeAPIUnitTestMixIn(object):
instance.host = 'fake_host'
instance.node = NODENAME
instance.instance_type_id = flavor.id
instance.flavor = flavor
instance.old_flavor = None
instance.new_flavor = None
instance.ami_launch_index = 0
instance.memory_mb = 0
instance.vcpus = 0
@ -167,8 +170,6 @@ class _ComputeAPIUnitTestMixIn(object):
instance.disable_terminate = False
instance.info_cache = objects.InstanceInfoCache()
instance.info_cache.network_info = model.NetworkInfo()
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
instance.numa_topology = None
if params:

View File

@ -79,7 +79,7 @@ class ClaimTestCase(test.NoDBTestCase):
def _claim(self, limits=None, requests=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
instance.flavor = self._fake_instance_type(**kwargs)
instance.flavor = self._fake_flavor(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
@ -114,8 +114,8 @@ class ClaimTestCase(test.NoDBTestCase):
instance.update(**kwargs)
return fake_instance.fake_instance_obj(self.context, **instance)
def _fake_instance_type(self, **kwargs):
instance_type = {
def _fake_flavor(self, **kwargs):
flavor = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1024,
@ -123,8 +123,8 @@ class ClaimTestCase(test.NoDBTestCase):
'root_gb': 10,
'ephemeral_gb': 5
}
instance_type.update(**kwargs)
return objects.Flavor(**instance_type)
flavor.update(**kwargs)
return objects.Flavor(**flavor)
def _fake_compute_node(self, values=None):
compute_node = {
@ -323,7 +323,7 @@ class MoveClaimTestCase(ClaimTestCase):
def _claim(self, limits=None, requests=None,
image_meta=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
flavor = self._fake_flavor(**kwargs)
numa_topology = kwargs.pop('numa_topology', None)
image_meta = image_meta or {}
self.instance = self._fake_instance(**kwargs)
@ -347,7 +347,7 @@ class MoveClaimTestCase(ClaimTestCase):
return_value=self.db_numa_topology)
def get_claim(mock_extra_get, mock_numa_get):
return claims.MoveClaim(
self.context, self.instance, _NODENAME, instance_type,
self.context, self.instance, _NODENAME, flavor,
image_meta, self.tracker, self.compute_node, requests,
objects.Migration(migration_type='migration'), limits=limits)
return get_claim()
@ -371,20 +371,20 @@ class MoveClaimTestCase(ClaimTestCase):
class LiveMigrationClaimTestCase(ClaimTestCase):
def test_live_migration_claim_bad_pci_request(self):
instance_type = self._fake_instance_type()
flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = None
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
'PCI requests are not supported',
claims.MoveClaim, self.context, instance, _NODENAME, instance_type,
claims.MoveClaim, self.context, instance, _NODENAME, flavor,
{}, self.tracker, self.compute_node,
objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(alias_name='fake-alias')]),
objects.Migration(migration_type='live-migration'), None)
def test_live_migration_page_size(self):
instance_type = self._fake_instance_type()
flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
@ -399,12 +399,12 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
exception.ComputeResourcesUnavailable,
'Requested page size is different',
claims.MoveClaim, self.context, instance, _NODENAME,
instance_type, {}, self.tracker, self.compute_node,
flavor, {}, self.tracker, self.compute_node,
self.empty_requests,
objects.Migration(migration_type='live-migration'), None)
def test_claim_fails_page_size_not_called(self):
instance_type = self._fake_instance_type()
flavor = self._fake_flavor()
instance = self._fake_instance()
# This topology cannot fit in self.compute_node
# (see _fake_compute_node())
@ -422,16 +422,16 @@ class LiveMigrationClaimTestCase(ClaimTestCase):
exception.ComputeResourcesUnavailable,
'Requested instance NUMA topology',
claims.MoveClaim, self.context, instance, _NODENAME,
instance_type, {}, self.tracker, self.compute_node,
flavor, {}, self.tracker, self.compute_node,
self.empty_requests,
objects.Migration(migration_type='live-migration'), None)
mock_test_page_size.assert_not_called()
def test_live_migration_no_instance_numa_topology(self):
instance_type = self._fake_instance_type()
flavor = self._fake_flavor()
instance = self._fake_instance()
instance.numa_topology = None
claims.MoveClaim(
self.context, instance, _NODENAME, instance_type, {}, self.tracker,
self.context, instance, _NODENAME, flavor, {}, self.tracker,
self.compute_node, self.empty_requests,
objects.Migration(migration_type='live-migration'), None)

View File

@ -944,7 +944,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_prepare_image_mapping(self):
swap_size = 1
ephemeral_size = 1
instance_type = {'swap': swap_size,
flavor = {'swap': swap_size,
'ephemeral_gb': ephemeral_size}
mappings = [
{'virtual': 'ami', 'device': 'sda1'},
@ -957,7 +957,7 @@ class ComputeVolumeTestCase(BaseTestCase):
]
preped_bdm = self.compute_api._prepare_image_mapping(
instance_type, mappings)
flavor, mappings)
expected_result = [
{
@ -1010,7 +1010,7 @@ class ComputeVolumeTestCase(BaseTestCase):
image_id = '77777777-aaaa-bbbb-cccc-555555555555'
instance = self._create_fake_instance_obj()
instance_type = {'swap': 1, 'ephemeral_gb': 2}
flavor = {'swap': 1, 'ephemeral_gb': 2}
mappings = [
fake_block_device.FakeDbBlockDeviceDict({
'device_name': '/dev/sdb4',
@ -1058,7 +1058,7 @@ class ComputeVolumeTestCase(BaseTestCase):
volume_id: fake_get(None, None, volume_id)
}
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings, {},
flavor, mappings, {},
volumes)
self.assertEqual(4, mappings[1].volume_size)
self.assertEqual(6, mappings[2].volume_size)
@ -1067,7 +1067,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings[2].boot_index = 2
self.assertRaises(exception.InvalidBDMBootSequence,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings, {}, volumes)
mappings[2].boot_index = 0
@ -1075,7 +1075,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(max_local_block_devices=1)
self.assertRaises(exception.InvalidBDMLocalsLimit,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings, {}, volumes)
ephemerals = [
fake_block_device.FakeDbBlockDeviceDict({
@ -1105,7 +1105,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_ = mappings[:]
mappings_.objects.extend(ephemerals)
self.compute_api._validate_bdm(self.context, instance,
instance_type, mappings_, {},
flavor, mappings_, {},
volumes)
# Ephemerals over the size limit
@ -1114,14 +1114,14 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(ephemerals)
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings_, {}, volumes)
# Swap over the size limit
mappings[0].volume_size = 3
self.assertRaises(exception.InvalidBDMSwapSize,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings, {}, volumes)
mappings[0].volume_size = 1
@ -1144,7 +1144,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(additional_swap)
self.assertRaises(exception.InvalidBDMFormat,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings_, {}, volumes)
image_no_size = [
@ -1163,7 +1163,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(image_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings_, {}, volumes)
# blank device without a specified size fails
@ -1182,11 +1182,11 @@ class ComputeVolumeTestCase(BaseTestCase):
mappings_.objects.extend(blank_no_size)
self.assertRaises(exception.InvalidBDM,
self.compute_api._validate_bdm,
self.context, instance, instance_type,
self.context, instance, flavor,
mappings_, {}, volumes)
def test_validate_bdm_with_more_than_one_default(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
flavor = {'swap': 1, 'ephemeral_gb': 1}
all_mappings = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@ -1217,13 +1217,13 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertRaises(exception.InvalidBDMEphemeralSize,
self.compute_api._validate_bdm,
self.context, self.instance,
instance_type, all_mappings, image_cache, volumes)
flavor, all_mappings, image_cache, volumes)
@mock.patch.object(cinder.API, 'attachment_create',
side_effect=exception.InvalidVolume(reason='error'))
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create):
volume_id = uuids.volume_id
instance_type = {'swap': 1, 'ephemeral_gb': 1}
flavor = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@ -1263,7 +1263,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance_object,
instance_type, bdms, {}, volumes)
flavor, bdms, {}, volumes)
@mock.patch.object(cinder.API, 'check_availability_zone')
@mock.patch.object(cinder.API, 'attachment_create',
@ -1271,7 +1271,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_validate_bdm_media_service_valid(self, mock_att_create,
mock_check_av_zone):
volume_id = uuids.volume_id
instance_type = {'swap': 1, 'ephemeral_gb': 1}
flavor = {'swap': 1, 'ephemeral_gb': 1}
bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
@ -1292,7 +1292,7 @@ class ComputeVolumeTestCase(BaseTestCase):
image_cache = {}
volumes = {volume_id: volume}
self.compute_api._validate_bdm(self.context, self.instance_object,
instance_type, bdms, image_cache,
flavor, bdms, image_cache,
volumes)
mock_check_av_zone.assert_not_called()
mock_att_create.assert_called_once_with(
@ -2930,13 +2930,13 @@ class ComputeTestCase(BaseTestCase,
power_state=10003,
vm_state=vm_states.ACTIVE,
task_state=expected_task,
instance_type=self.default_flavor,
flavor=self.default_flavor,
launched_at=timeutils.utcnow()))
updated_dbinstance2 = fake_instance.fake_db_instance(
**dict(uuid=uuids.db_instance_2,
power_state=10003,
vm_state=vm_states.ACTIVE,
instance_type=self.default_flavor,
flavor=self.default_flavor,
task_state=expected_task,
launched_at=timeutils.utcnow()))
@ -4551,7 +4551,7 @@ class ComputeTestCase(BaseTestCase,
migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
migration.uuid = uuids.migration_uuid
migration.new_instance_type_id = '1'
instance_type = objects.Flavor()
flavor = objects.Flavor()
actions = [
("reboot_instance", task_states.REBOOTING,
@ -4593,7 +4593,7 @@ class ComputeTestCase(BaseTestCase,
'request_spec': {}}),
("prep_resize", task_states.RESIZE_PREP,
{'image': {},
'flavor': instance_type,
'flavor': flavor,
'request_spec': {},
'filter_properties': {},
'node': None,
@ -4683,18 +4683,18 @@ class ComputeTestCase(BaseTestCase,
instance = self._create_fake_instance_obj(params)
image = {}
disk_info = 'fake-disk-info'
instance_type = self.default_flavor
flavor = self.default_flavor
if not resize_instance:
old_instance_type = self.tiny_flavor
instance_type['root_gb'] = old_instance_type['root_gb']
instance_type['swap'] = old_instance_type['swap']
instance_type['ephemeral_gb'] = old_instance_type['ephemeral_gb']
old_flavor = self.tiny_flavor
flavor['root_gb'] = old_flavor['root_gb']
flavor['swap'] = old_flavor['swap']
flavor['ephemeral_gb'] = old_flavor['ephemeral_gb']
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
flavor=instance_type,
flavor=flavor,
image={}, request_spec={},
filter_properties={}, node=None,
migration=None, clean_shutdown=True,
@ -4762,7 +4762,7 @@ class ComputeTestCase(BaseTestCase,
def _instance_save0(expected_task_state=None):
self.assertEqual(task_states.RESIZE_MIGRATED,
expected_task_state)
self.assertEqual(instance_type['id'],
self.assertEqual(flavor['id'],
instance.instance_type_id)
self.assertEqual(task_states.RESIZE_FINISH,
instance.task_state)
@ -4932,11 +4932,11 @@ class ComputeTestCase(BaseTestCase,
jsonutils.dumps(connection_info))
# begin resize
instance_type = self.default_flavor
flavor = self.default_flavor
instance.task_state = task_states.RESIZE_PREP
instance.save()
self.compute.prep_resize(self.context, instance=instance,
flavor=instance_type,
flavor=flavor,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@ -4953,7 +4953,8 @@ class ComputeTestCase(BaseTestCase,
instance.uuid, 'pre-migrating')
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={},
flavor=jsonutils.to_primitive(instance_type),
# TODO(stephenfin): Why a JSON string?
flavor=jsonutils.to_primitive(flavor),
clean_shutdown=True, request_spec=request_spec)
# assert bdm is unchanged
@ -5020,12 +5021,12 @@ class ComputeTestCase(BaseTestCase,
old_flavor_name = 'm1.tiny'
instance = self._create_fake_instance_obj(type_name=old_flavor_name)
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
request_spec = objects.RequestSpec()
self.compute.prep_resize(self.context, instance=instance,
flavor=instance_type,
flavor=flavor,
image={},
request_spec=request_spec,
filter_properties={},
@ -5053,7 +5054,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(old_flavor['root_gb'], instance.root_gb)
self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
self.assertEqual(old_flavor['id'], instance.instance_type_id)
self.assertNotEqual(instance_type['id'], instance.instance_type_id)
self.assertNotEqual(flavor['id'], instance.instance_type_id)
def test_set_instance_info(self):
old_flavor_name = 'm1.tiny'
@ -5482,10 +5483,10 @@ class ComputeTestCase(BaseTestCase,
instance.numa_topology = numa_topology
instance.save()
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None, clean_shutdown=True,
migration=None, host_list=None)
@ -5493,7 +5494,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage should increase after the resize as well
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
new_instance_type_ref.memory_mb)
new_flavor_ref.memory_mb)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
@ -5512,7 +5513,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -5523,7 +5524,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
new_instance_type_ref.memory_mb)
new_flavor_ref.memory_mb)
# Prove that the instance size is now the new size
flavor = objects.Flavor.get_by_id(self.context,
@ -5548,7 +5549,7 @@ class ComputeTestCase(BaseTestCase,
# Resources from the migration (based on initial flavor) should
# be freed now
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + new_instance_type_ref.memory_mb)
memory_mb_used + new_flavor_ref.memory_mb)
mock_notify.assert_has_calls([
mock.call(self.context, instance,
@ -5819,10 +5820,10 @@ class ComputeTestCase(BaseTestCase,
instance.numa_topology = numa_topology
instance.save()
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
migration=None, clean_shutdown=True, host_list=[])
@ -5830,7 +5831,7 @@ class ComputeTestCase(BaseTestCase,
# Memory usage should increase after the resize as well
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
new_instance_type_ref.memory_mb)
new_flavor_ref.memory_mb)
migration = objects.Migration.get_by_instance_and_status(
self.context.elevated(),
@ -5848,7 +5849,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -5859,11 +5860,11 @@ class ComputeTestCase(BaseTestCase,
# Memory usage shouldn't had changed
self.assertEqual(self.rt.compute_nodes[NODENAME].memory_mb_used,
memory_mb_used + flavor.memory_mb +
new_instance_type_ref.memory_mb)
new_flavor_ref.memory_mb)
# Prove that the instance size is now the new size
instance_type_ref = flavors.get_flavor_by_flavor_id(3)
self.assertEqual(instance_type_ref['flavorid'], '3')
flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.assertEqual(flavor_ref['flavorid'], '3')
# Prove that the NUMA topology has also been updated to that of the new
# flavor - meaning None
self.assertIsNone(instance.numa_topology)
@ -5955,10 +5956,10 @@ class ComputeTestCase(BaseTestCase,
request_spec, {},
[], block_device_mapping=[])
new_instance_type_ref = flavors.get_flavor_by_flavor_id(3)
new_flavor_ref = flavors.get_flavor_by_flavor_id(3)
self.compute.prep_resize(self.context,
instance=instance,
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
image={}, request_spec=request_spec,
filter_properties={}, node=None,
clean_shutdown=True, migration=None,
@ -5976,7 +5977,7 @@ class ComputeTestCase(BaseTestCase,
self.compute.resize_instance(self.context, instance=instance,
migration=migration,
image={},
flavor=new_instance_type_ref,
flavor=new_flavor_ref,
clean_shutdown=True,
request_spec=request_spec)
self.compute.finish_resize(self.context,
@ -13084,13 +13085,13 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
self.compute_api = compute.API()
self.inst_type = objects.Flavor.get_by_name(self.context, 'm1.small')
def test_can_build_instance_from_visible_instance_type(self):
def test_can_build_instance_from_visible_flavor(self):
self.inst_type['disabled'] = False
# Assert that exception.FlavorNotFound is not raised
self.compute_api.create(self.context, self.inst_type,
image_href=uuids.image_instance)
def test_cannot_build_instance_from_disabled_instance_type(self):
def test_cannot_build_instance_from_disabled_flavor(self):
self.inst_type['disabled'] = True
self.assertRaises(exception.FlavorNotFound,
self.compute_api.create, self.context, self.inst_type, None)
@ -13099,7 +13100,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
new=mock.Mock(return_value=obj_fields.HostStatus.UP))
@mock.patch('nova.compute.api.API._validate_flavor_image_nostatus')
@mock.patch('nova.objects.RequestSpec')
def test_can_resize_to_visible_instance_type(self, mock_reqspec,
def test_can_resize_to_visible_flavor(self, mock_reqspec,
mock_validate):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id =\
@ -13107,11 +13108,11 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
flavor = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = False
return instance_type
flavor['disabled'] = False
return flavor
self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
@ -13121,18 +13122,18 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
@mock.patch('nova.compute.api.API.get_instance_host_status',
new=mock.Mock(return_value=obj_fields.HostStatus.UP))
def test_cannot_resize_to_disabled_instance_type(self):
def test_cannot_resize_to_disabled_flavor(self):
instance = self._create_fake_instance_obj()
orig_get_flavor_by_flavor_id = \
flavors.get_flavor_by_flavor_id
def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
read_deleted="yes"):
instance_type = orig_get_flavor_by_flavor_id(flavor_id,
flavor = orig_get_flavor_by_flavor_id(flavor_id,
ctxt,
read_deleted)
instance_type['disabled'] = True
return instance_type
flavor['disabled'] = True
return flavor
self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
@ -13153,7 +13154,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
self.instance_uuid = self.instance['uuid']
self.instance_type = objects.Flavor.get_by_name(
self.flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.tiny')
self.request_spec = objects.RequestSpec()
@ -13169,14 +13170,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
flavor=self.instance_type,
flavor=self.flavor,
request_spec=self.request_spec,
filter_properties={}, migration=mock.Mock(),
node=None,
clean_shutdown=True, host_list=None)
mock_res.assert_called_once_with(mock.ANY, inst_obj, mock.ANY,
self.instance_type,
self.flavor,
self.request_spec, {}, None)
def test_reschedule_resize_or_reraise_no_filter_properties(self):
@ -13195,7 +13196,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
# because we're not retrying, we should re-raise the exception
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
self.instance, exc_info, self.instance_type,
self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
def test_reschedule_resize_or_reraise_no_retry_info(self):
@ -13214,7 +13215,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
# because we're not retrying, we should re-raise the exception
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
self.instance, exc_info, self.instance_type,
self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
@mock.patch.object(compute_manager.ComputeManager, '_instance_update')
@ -13236,14 +13237,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.assertRaises(test.TestingException,
self.compute._reschedule_resize_or_reraise, self.context,
self.instance, exc_info, self.instance_type,
self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
mock_update.assert_called_once_with(
self.context, mock.ANY, task_state=task_states.RESIZE_PREP)
mock_resize.assert_called_once_with(
self.context, mock.ANY,
{'filter_properties': filter_properties}, self.instance_type,
{'filter_properties': filter_properties}, self.flavor,
request_spec=self.request_spec, host_list=None)
mock_notify.assert_called_once_with(
self.context, self.instance, 'fake-mini', action='resize',
@ -13266,14 +13267,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
exc_info = sys.exc_info()
self.compute._reschedule_resize_or_reraise(
self.context, self.instance, exc_info, self.instance_type,
self.context, self.instance, exc_info, self.flavor,
self.request_spec, filter_properties, None)
mock_update.assert_called_once_with(
self.context, mock.ANY, task_state=task_states.RESIZE_PREP)
mock_resize.assert_called_once_with(
self.context, mock.ANY,
{'filter_properties': filter_properties}, self.instance_type,
{'filter_properties': filter_properties}, self.flavor,
request_spec=self.request_spec, host_list=None)
mock_notify.assert_called_once_with(
self.context, self.instance, 'fake-mini', action='resize',
@ -13819,91 +13820,91 @@ class CheckRequestedImageTestCase(test.TestCase):
self.context = context.RequestContext(
'fake_user_id', 'fake_project_id')
self.instance_type = objects.Flavor.get_by_name(self.context,
self.flavor = objects.Flavor.get_by_name(self.context,
'm1.small')
self.instance_type['memory_mb'] = 64
self.instance_type['root_gb'] = 1
self.flavor['memory_mb'] = 64
self.flavor['root_gb'] = 1
def test_no_image_specified(self):
self.compute_api._validate_flavor_image(self.context, None, None,
self.instance_type, None)
self.flavor, None)
def test_image_status_must_be_active(self):
image = dict(id=uuids.image_id, status='foo')
self.assertRaises(exception.ImageNotActive,
self.compute_api._validate_flavor_image, self.context,
image['id'], image, self.instance_type, None)
image['id'], image, self.flavor, None)
image['status'] = 'active'
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_image_min_ram_check(self):
image = dict(id=uuids.image_id, status='active', min_ram='65')
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api._validate_flavor_image, self.context,
image['id'], image, self.instance_type, None)
image['id'], image, self.flavor, None)
image['min_ram'] = '64'
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_image_min_disk_check(self):
image = dict(id=uuids.image_id, status='active', min_disk='2')
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api._validate_flavor_image, self.context,
image['id'], image, self.instance_type, None)
image['id'], image, self.flavor, None)
image['min_disk'] = '1'
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_image_too_large(self):
image = dict(id=uuids.image_id, status='active', size='1073741825')
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api._validate_flavor_image, self.context,
image['id'], image, self.instance_type, None)
image['id'], image, self.flavor, None)
image['size'] = '1073741824'
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_root_gb_zero_disables_size_check(self):
self.policy.set_rules({
servers_policy.ZERO_DISK_FLAVOR: base_policy.RULE_ADMIN_OR_OWNER
}, overwrite=False)
self.instance_type['root_gb'] = 0
self.flavor['root_gb'] = 0
image = dict(id=uuids.image_id, status='active', size='1073741825')
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_root_gb_zero_disables_min_disk(self):
self.policy.set_rules({
servers_policy.ZERO_DISK_FLAVOR: base_policy.RULE_ADMIN_OR_OWNER
}, overwrite=False)
self.instance_type['root_gb'] = 0
self.flavor['root_gb'] = 0
image = dict(id=uuids.image_id, status='active', min_disk='2')
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
def test_config_drive_option(self):
image = {'id': uuids.image_id, 'status': 'active'}
image['properties'] = {'img_config_drive': 'optional'}
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
image['properties'] = {'img_config_drive': 'mandatory'}
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, None)
image, self.flavor, None)
image['properties'] = {'img_config_drive': 'bar'}
self.assertRaises(exception.InvalidImageConfigDrive,
self.compute_api._validate_flavor_image,
self.context, image['id'], image, self.instance_type,
self.context, image['id'], image, self.flavor,
None)
def test_volume_blockdevicemapping(self):
@ -13913,42 +13914,42 @@ class CheckRequestedImageTestCase(test.TestCase):
# larger than the flavor root disk.
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb + 1)
size=self.flavor.root_gb * units.Gi,
min_disk=self.flavor.root_gb + 1)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='volume', destination_type='volume',
volume_id=volume_uuid, volume_size=self.instance_type.root_gb + 1)
volume_id=volume_uuid, volume_size=self.flavor.root_gb + 1)
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, root_bdm)
image, self.flavor, root_bdm)
def test_volume_blockdevicemapping_min_disk(self):
# A bdm object volume smaller than the image's min_disk should not be
# allowed
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb + 1)
size=self.flavor.root_gb * units.Gi,
min_disk=self.flavor.root_gb + 1)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='volume',
image_id=image_uuid, volume_id=volume_uuid,
volume_size=self.instance_type.root_gb)
volume_size=self.flavor.root_gb)
self.assertRaises(exception.VolumeSmallerThanMinDisk,
self.compute_api._validate_flavor_image,
self.context, image_uuid, image, self.instance_type,
self.context, image_uuid, image, self.flavor,
root_bdm)
def test_volume_blockdevicemapping_min_disk_no_size(self):
# We should allow a root volume whose size is not given
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi,
min_disk=self.instance_type.root_gb)
size=self.flavor.root_gb * units.Gi,
min_disk=self.flavor.root_gb)
volume_uuid = uuids.fake_2
root_bdm = block_device_obj.BlockDeviceMapping(
@ -13956,27 +13957,27 @@ class CheckRequestedImageTestCase(test.TestCase):
volume_id=volume_uuid, volume_size=None)
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, root_bdm)
image, self.flavor, root_bdm)
def test_image_blockdevicemapping(self):
# Test that we can succeed when passing bdms, and the root bdm isn't a
# volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=self.instance_type.root_gb * units.Gi, min_disk=0)
size=self.flavor.root_gb * units.Gi, min_disk=0)
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='local', image_id=image_uuid)
self.compute_api._validate_flavor_image(self.context, image['id'],
image, self.instance_type, root_bdm)
image, self.flavor, root_bdm)
def test_image_blockdevicemapping_too_big(self):
# We should do a size check against flavor if we were passed bdms but
# the root bdm isn't a volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=(self.instance_type.root_gb + 1) * units.Gi,
size=(self.flavor.root_gb + 1) * units.Gi,
min_disk=0)
root_bdm = block_device_obj.BlockDeviceMapping(
@ -13985,14 +13986,14 @@ class CheckRequestedImageTestCase(test.TestCase):
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api._validate_flavor_image,
self.context, image['id'],
image, self.instance_type, root_bdm)
image, self.flavor, root_bdm)
def test_image_blockdevicemapping_min_disk(self):
# We should do a min_disk check against flavor if we were passed bdms
# but the root bdm isn't a volume
image_uuid = uuids.fake
image = dict(id=image_uuid, status='active',
size=0, min_disk=self.instance_type.root_gb + 1)
size=0, min_disk=self.flavor.root_gb + 1)
root_bdm = block_device_obj.BlockDeviceMapping(
source_type='image', destination_type='local', image_id=image_uuid)
@ -14000,7 +14001,7 @@ class CheckRequestedImageTestCase(test.TestCase):
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api._validate_flavor_image,
self.context, image['id'],
image, self.instance_type, root_bdm)
image, self.flavor, root_bdm)
@mock.patch('nova.virt.hardware.get_dedicated_cpu_constraint')
def test_cpu_policy(self, dedicated_cpu_mock):
@ -14014,11 +14015,11 @@ class CheckRequestedImageTestCase(test.TestCase):
dedicated_cpu_mock.return_value = None
self.compute_api._validate_flavor_image(
self.context, image['id'], image, self.instance_type, None)
self.context, image['id'], image, self.flavor, None)
image['properties'] = {'hw_cpu_policy': 'bar'}
self.assertRaises(exception.InvalidRequest,
self.compute_api._validate_flavor_image,
self.context, image['id'], image, self.instance_type,
self.context, image['id'], image, self.flavor,
None)
def test_cpu_thread_policy(self):
@ -14028,11 +14029,11 @@ class CheckRequestedImageTestCase(test.TestCase):
for v in obj_fields.CPUThreadAllocationPolicy.ALL:
image['properties']['hw_cpu_thread_policy'] = v
self.compute_api._validate_flavor_image(
self.context, image['id'], image, self.instance_type, None)
self.context, image['id'], image, self.flavor, None)
image['properties']['hw_cpu_thread_policy'] = 'bar'
self.assertRaises(exception.InvalidRequest,
self.compute_api._validate_flavor_image,
self.context, image['id'], image, self.instance_type,
self.context, image['id'], image, self.flavor,
None)
image['properties'] = {
@ -14041,5 +14042,5 @@ class CheckRequestedImageTestCase(test.TestCase):
obj_fields.CPUThreadAllocationPolicy.ISOLATE}
self.assertRaises(exception.CPUThreadPolicyConfigurationInvalid,
self.compute_api._validate_flavor_image,
self.context, image['id'], image, self.instance_type,
self.context, image['id'], image, self.flavor,
None)

View File

@ -3248,7 +3248,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_get_nodename.assert_called_once_with(instance)
mock_drop_move_claim.assert_called_once_with(
self.context, instance, 'fake-node',
instance_type=instance.flavor)
flavor=instance.flavor)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(fake_driver.FakeDriver,

View File

@ -104,7 +104,7 @@ _COMPUTE_NODE_FIXTURES = [
),
]
_INSTANCE_TYPE_FIXTURES = {
_FLAVOR_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
@ -136,7 +136,7 @@ _INSTANCE_TYPE_FIXTURES = {
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
_FLAVOR_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
@ -199,50 +199,50 @@ _INSTANCE_FIXTURES = [
host=_HOSTNAME,
node=_NODENAME,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[1]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[1]['vcpus'],
root_gb=_FLAVOR_FIXTURES[1]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
instance_type_id=1,
instance_type_id=_FLAVOR_OBJ_FIXTURES[1].id,
flavor=_FLAVOR_OBJ_FIXTURES[1],
old_flavor=_FLAVOR_OBJ_FIXTURES[1],
new_flavor=_FLAVOR_OBJ_FIXTURES[1],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
deleted = False,
resources = None,
deleted=False,
resources=None,
),
objects.Instance(
id=2,
host=_HOSTNAME,
node=_NODENAME,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
flavor=_FLAVOR_OBJ_FIXTURES[2],
old_flavor=_FLAVOR_OBJ_FIXTURES[2],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
deleted = False,
resources = None,
deleted=False,
resources=None,
),
]
@ -312,24 +312,24 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[1]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[1]['vcpus'],
root_gb=_FLAVOR_FIXTURES[1]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
instance_type_id=1,
instance_type_id=_FLAVOR_OBJ_FIXTURES[1].id,
flavor=_FLAVOR_OBJ_FIXTURES[1],
old_flavor=_FLAVOR_OBJ_FIXTURES[1],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources = None,
resources=None,
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
@ -337,23 +337,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
flavor=_FLAVOR_OBJ_FIXTURES[2],
old_flavor=_FLAVOR_OBJ_FIXTURES[1],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# source-and-dest
@ -362,23 +362,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
flavor=_FLAVOR_OBJ_FIXTURES[2],
old_flavor=_FLAVOR_OBJ_FIXTURES[1],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# dest-only-evac
@ -387,23 +387,23 @@ _MIGRATION_INSTANCE_FIXTURES = {
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
flavor=_FLAVOR_OBJ_FIXTURES[2],
old_flavor=_FLAVOR_OBJ_FIXTURES[1],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
}
@ -2388,7 +2388,7 @@ class TestResize(BaseTestCase):
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# This migration context is fine, it points to the first instance
# fixture and indicates a source-and-dest resize.
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[instance.uuid]
@ -2409,7 +2409,7 @@ class TestResize(BaseTestCase):
status='migrating',
uuid=uuids.migration,
)
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# not using mock.sentinel.ctx because resize_claim calls #elevated
ctx = mock.MagicMock()
@ -2499,7 +2499,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
old_flavor = instance.flavor
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
instance.pci_requests = objects.InstancePCIRequests(requests=[])
# allocations for create
@ -2559,7 +2559,7 @@ class TestResize(BaseTestCase):
status='migrating',
uuid=uuids.migration,
)
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# Resize instance
with test.nested(
@ -2674,7 +2674,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.task_state = task_states.RESIZE_MIGRATING
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
# A destination-only migration
migration = objects.Migration(
@ -2698,7 +2698,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance.migration_context = mig_context_obj
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
new_flavor = _FLAVOR_OBJ_FIXTURES[2]
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
@ -2761,7 +2761,7 @@ class TestResize(BaseTestCase):
instance = _INSTANCE_FIXTURES[0].obj_clone()
instance.task_state = task_states.RESIZE_MIGRATING
instance.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
instance.migration_context = objects.MigrationContext()
instance.migration_context.new_pci_devices = objects.PciDeviceList(
objects=pci_devs)
@ -2835,7 +2835,7 @@ class TestResize(BaseTestCase):
instance1.id = 1
instance1.uuid = uuids.instance1
instance1.task_state = task_states.RESIZE_MIGRATING
instance1.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance1.new_flavor = _FLAVOR_OBJ_FIXTURES[2]
migration1 = objects.Migration(
id=1,
@ -2858,7 +2858,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance1.migration_context = mig_context_obj1
flavor1 = _INSTANCE_TYPE_OBJ_FIXTURES[2]
flavor1 = _FLAVOR_OBJ_FIXTURES[2]
# Instance #2 is resizing to instance type 1 which has 1 vCPU, 128MB
# RAM and 1GB root disk.
@ -2866,8 +2866,8 @@ class TestResize(BaseTestCase):
instance2.id = 2
instance2.uuid = uuids.instance2
instance2.task_state = task_states.RESIZE_MIGRATING
instance2.old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
instance2.new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
instance2.old_flavor = _FLAVOR_OBJ_FIXTURES[2]
instance2.new_flavor = _FLAVOR_OBJ_FIXTURES[1]
migration2 = objects.Migration(
id=2,
@ -2890,7 +2890,7 @@ class TestResize(BaseTestCase):
old_numa_topology=None,
)
instance2.migration_context = mig_context_obj2
flavor2 = _INSTANCE_TYPE_OBJ_FIXTURES[1]
flavor2 = _FLAVOR_OBJ_FIXTURES[1]
expected = self.rt.compute_nodes[_NODENAME].obj_clone()
expected.vcpus_used = (expected.vcpus_used +
@ -2990,23 +2990,23 @@ class TestRebuild(BaseTestCase):
host=None,
node=None,
uuid='abef5b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
memory_mb=_FLAVOR_FIXTURES[2]['memory_mb'],
vcpus=_FLAVOR_FIXTURES[2]['vcpus'],
root_gb=_FLAVOR_FIXTURES[2]['root_gb'],
ephemeral_gb=_FLAVOR_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
instance_type_id=_FLAVOR_OBJ_FIXTURES[2].id,
flavor=_FLAVOR_OBJ_FIXTURES[2],
old_flavor=_FLAVOR_OBJ_FIXTURES[2],
new_flavor=_FLAVOR_OBJ_FIXTURES[2],
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
os_type='fake-os',
project_id='fake-project',
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
resources = None,
resources=None,
)
# not using mock.sentinel.ctx because resize_claim calls #elevated

View File

@ -40,10 +40,11 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
self.fake_flavor_obj = fake_flavor.fake_flavor_obj(self.context)
self.fake_flavor = jsonutils.to_primitive(self.fake_flavor_obj)
instance_attr = {'host': 'fake_host',
'instance_type_id': self.fake_flavor_obj['id'],
'instance_type': self.fake_flavor_obj}
instance_attr = {
'host': 'fake_host',
'instance_type_id': self.fake_flavor_obj['id'],
'flavor': self.fake_flavor_obj,
}
self.fake_instance_obj = fake_instance.fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
@ -920,7 +921,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
rpcapi.resize_instance(
ctxt, instance=self.fake_instance_obj,
migration=mock.sentinel.migration, image='image',
flavor='instance_type', clean_shutdown=True,
flavor=self.fake_flavor_obj, clean_shutdown=True,
request_spec=self.fake_request_spec_obj)
mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
@ -930,7 +931,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
mock_cctx.cast.assert_called_with(
ctxt, 'resize_instance', instance=self.fake_instance_obj,
migration=mock.sentinel.migration, image='image',
instance_type='instance_type', clean_shutdown=True)
instance_type=self.fake_flavor_obj, clean_shutdown=True)
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',

View File

@ -40,8 +40,8 @@ class MigrationTaskTestCase(test.NoDBTestCase):
self.context.cell_uuid = uuids.cell1
self.flavor = fake_flavor.fake_flavor_obj(self.context)
self.flavor.extra_specs = {'extra_specs': 'fake'}
inst = fake_instance.fake_db_instance(image_ref='image_ref',
instance_type=self.flavor)
inst = fake_instance.fake_db_instance(
image_ref='image_ref', flavor=self.flavor)
inst_object = objects.Instance(
flavor=self.flavor,
numa_topology=None,

View File

@ -457,21 +457,21 @@ class _BaseTaskTestCase(object):
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
instance_type.created_at = None
flavor.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
flavor=instance_type) for i in range(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
flavor=flavor) for i in range(2)]
flavor_p = obj_base.obj_to_primitive(flavor)
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
{}, instance_type)
{}, flavor)
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'instance_type': flavor_p,
'num_instances': 2}
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
sched_return = copy.deepcopy(fake_host_lists2)
@ -564,16 +564,16 @@ class _BaseTaskTestCase(object):
"""
fake_spec = objects.RequestSpec()
mock_fp.return_value = fake_spec
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
# NOTE(danms): Avoid datetime timezone issues with converted flavors
instance_type.created_at = None
flavor.created_at = None
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuids.fake,
flavor=instance_type) for i in range(2)]
flavor=flavor) for i in range(2)]
instance_properties = obj_base.obj_to_primitive(instances[0])
instance_properties['system_metadata'] = flavors.save_flavor_info(
{}, instance_type)
{}, flavor)
sched_return = copy.deepcopy(fake_host_lists2)
mock_schedule.return_value = sched_return

View File

@ -28,8 +28,8 @@ def stub_out(test, funcs):
def stub_out_db_instance_api(test, injected=True):
"""Stubs out the db API for creating Instances."""
def _create_instance_type(**updates):
instance_type = {'id': 2,
def _create_flavor(**updates):
flavor = {'id': 2,
'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
@ -49,11 +49,11 @@ def stub_out_db_instance_api(test, injected=True):
'description': None
}
if updates:
instance_type.update(updates)
return instance_type
flavor.update(updates)
return flavor
INSTANCE_TYPES = {
'm1.tiny': _create_instance_type(
FLAVORS = {
'm1.tiny': _create_flavor(
id=2,
name='m1.tiny',
memory_mb=512,
@ -64,7 +64,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=1,
rxtx_factor=1.0,
swap=0),
'm1.small': _create_instance_type(
'm1.small': _create_flavor(
id=5,
name='m1.small',
memory_mb=2048,
@ -75,7 +75,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=2,
rxtx_factor=1.0,
swap=1024),
'm1.medium': _create_instance_type(
'm1.medium': _create_flavor(
id=1,
name='m1.medium',
memory_mb=4096,
@ -86,7 +86,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=3,
rxtx_factor=1.0,
swap=0),
'm1.large': _create_instance_type(
'm1.large': _create_flavor(
id=3,
name='m1.large',
memory_mb=8192,
@ -97,7 +97,7 @@ def stub_out_db_instance_api(test, injected=True):
flavorid=4,
rxtx_factor=1.0,
swap=0),
'm1.xlarge': _create_instance_type(
'm1.xlarge': _create_flavor(
id=4,
name='m1.xlarge',
memory_mb=16384,
@ -110,15 +110,15 @@ def stub_out_db_instance_api(test, injected=True):
swap=0)}
def fake_flavor_get_all(*a, **k):
return INSTANCE_TYPES.values()
return FLAVORS.values()
@classmethod
def fake_flavor_get_by_name(cls, context, name):
return INSTANCE_TYPES[name]
return FLAVORS[name]
@classmethod
def fake_flavor_get(cls, context, id):
for inst_type in INSTANCE_TYPES.values():
for inst_type in FLAVORS.values():
if str(inst_type['id']) == str(id):
return inst_type
return None

View File

@ -43,11 +43,11 @@ def fake_db_secgroups(instance, names):
def fake_db_instance(**updates):
if 'instance_type' in updates:
if isinstance(updates['instance_type'], objects.Flavor):
flavor = updates['instance_type']
if 'flavor' in updates:
if isinstance(updates['flavor'], objects.Flavor):
flavor = updates['flavor']
else:
flavor = objects.Flavor(**updates['instance_type'])
flavor = objects.Flavor(**updates['flavor'])
flavorinfo = jsonutils.dumps({
'cur': flavor.obj_to_primitive(),
'old': None,

View File

@ -40,21 +40,21 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
def test_build_request_spec_without_image(self):
instance = {'uuid': uuids.instance}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
flavor = objects.Flavor(**test_flavor.fake_flavor)
with mock.patch.object(flavors, 'extract_flavor') as mock_extract:
mock_extract.return_value = instance_type
mock_extract.return_value = flavor
request_spec = scheduler_utils.build_request_spec(None,
[instance])
mock_extract.assert_called_once_with({'uuid': uuids.instance})
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
instance_type = objects.Flavor()
flavor = objects.Flavor()
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(instance, 'get_flavor') as mock_get:
mock_get.return_value = instance_type
mock_get.return_value = flavor
request_spec = scheduler_utils.build_request_spec(None,
[instance])
mock_get.assert_called_once_with()
@ -134,23 +134,23 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
sched_hints = {'hint': ['over-there']}
forced_host = 'forced-host1'
forced_node = 'forced-node1'
instance_type = objects.Flavor()
flavor = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
forced_host, forced_node, flavor)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual([forced_host], filt_props['force_hosts'])
self.assertEqual([forced_node], filt_props['force_nodes'])
self.assertEqual(instance_type, filt_props['instance_type'])
self.assertEqual(flavor, filt_props['instance_type'])
def test_build_filter_properties_no_forced_host_no_force_node(self):
sched_hints = {'hint': ['over-there']}
forced_host = None
forced_node = None
instance_type = objects.Flavor()
flavor = objects.Flavor()
filt_props = scheduler_utils.build_filter_properties(sched_hints,
forced_host, forced_node, instance_type)
forced_host, forced_node, flavor)
self.assertEqual(sched_hints, filt_props['scheduler_hints'])
self.assertEqual(instance_type, filt_props['instance_type'])
self.assertEqual(flavor, filt_props['instance_type'])
self.assertNotIn('forced_host', filt_props)
self.assertNotIn('forced_node', filt_props)

View File

@ -40,7 +40,6 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
flavor.extra_specs = self.specs
flavor.create()
self.flavor = flavor
self.instance_type_id = flavor.id
self.flavorid = flavor.flavorid
def tearDown(self):
@ -48,28 +47,25 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
self.flavor.destroy()
super(InstanceTypeExtraSpecsTestCase, self).tearDown()
def test_instance_type_specs_get(self):
flavor = objects.Flavor.get_by_flavor_id(self.context,
self.flavorid)
def test_flavor_extra_specs_get(self):
flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
def test_flavor_extra_specs_delete(self):
del self.specs["xpu_model"]
del self.flavor.extra_specs['xpu_model']
self.flavor.save()
flavor = objects.Flavor.get_by_flavor_id(self.context,
self.flavorid)
flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
def test_instance_type_extra_specs_update(self):
def test_flavor_extra_specs_update(self):
self.specs["cpu_model"] = "Sandy Bridge"
self.flavor.extra_specs["cpu_model"] = "Sandy Bridge"
self.flavor.save()
flavor = objects.Flavor.get_by_flavor_id(self.context,
self.flavorid)
flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
def test_instance_type_extra_specs_create(self):
def test_flavor_extra_specs_create(self):
net_attrs = {
"net_arch": "ethernet",
"net_mbps": "10000"
@ -77,15 +73,14 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
self.specs.update(net_attrs)
self.flavor.extra_specs.update(net_attrs)
self.flavor.save()
flavor = objects.Flavor.get_by_flavor_id(self.context,
self.flavorid)
flavor = objects.Flavor.get_by_flavor_id(self.context, self.flavorid)
self.assertEqual(self.specs, flavor.extra_specs)
def test_instance_type_get_with_extra_specs(self):
def test_flavor_get_with_extra_specs(self):
flavor = objects.Flavor.get_by_id(self.context, 5)
self.assertEqual(flavor.extra_specs, {})
def test_instance_type_get_by_name_with_extra_specs(self):
def test_flavor_get_by_name_with_extra_specs(self):
flavor = objects.Flavor.get_by_name(self.context,
"cg1.4xlarge")
self.assertEqual(flavor.extra_specs, self.specs)
@ -93,13 +88,13 @@ class InstanceTypeExtraSpecsTestCase(test.TestCase):
"m1.small")
self.assertEqual(flavor.extra_specs, {})
def test_instance_type_get_by_flavor_id_with_extra_specs(self):
def test_flavor_get_by_flavor_id_with_extra_specs(self):
flavor = objects.Flavor.get_by_flavor_id(self.context, 105)
self.assertEqual(flavor.extra_specs, self.specs)
flavor = objects.Flavor.get_by_flavor_id(self.context, 2)
self.assertEqual(flavor.extra_specs, {})
def test_instance_type_get_all(self):
def test_flavor_get_all(self):
flavors = objects.FlavorList.get_all(self.context)
name2specs = {flavor.name: flavor.extra_specs

View File

@ -22,7 +22,7 @@ from nova.objects import base as obj_base
from nova import test
class InstanceTypeTestCase(test.TestCase):
class FlavorTestCase(test.TestCase):
"""Test cases for flavor code."""
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
@ -31,39 +31,39 @@ class InstanceTypeTestCase(test.TestCase):
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
default_instance_type = objects.Flavor.get_by_name(
default_flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
flavorid = default_instance_type.flavorid
flavorid = default_flavor.flavorid
fetched = flavors.get_flavor_by_flavor_id(flavorid)
self.assertIsInstance(fetched, objects.Flavor)
self.assertEqual(default_instance_type.flavorid, fetched.flavorid)
self.assertEqual(default_flavor.flavorid, fetched.flavorid)
class InstanceTypeToolsTest(test.TestCase):
class FlavorToolsTest(test.TestCase):
def setUp(self):
super(InstanceTypeToolsTest, self).setUp()
super().setUp()
self.context = context.get_admin_context()
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_flavor(self, prefix):
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
instance_type_p = obj_base.obj_to_primitive(instance_type)
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor_p = obj_base.obj_to_primitive(flavor)
metadata = {}
flavors.save_flavor_info(metadata, instance_type, prefix)
flavors.save_flavor_info(metadata, flavor, prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_instance_type = flavors.extract_flavor(instance, prefix)
_instance_type_p = obj_base.obj_to_primitive(_instance_type)
_flavor = flavors.extract_flavor(instance, prefix)
_flavor_p = obj_base.obj_to_primitive(_flavor)
props = flavors.system_metadata_flavor_props.keys()
for key in list(instance_type_p.keys()):
for key in list(flavor_p.keys()):
if key not in props:
del instance_type_p[key]
del flavor_p[key]
self.assertEqual(instance_type_p, _instance_type_p)
self.assertEqual(flavor_p, _flavor_p)
def test_extract_flavor(self):
self._test_extract_flavor('')
@ -79,47 +79,47 @@ class InstanceTypeToolsTest(test.TestCase):
self._test_extract_flavor('foo_')
def test_save_flavor_info(self):
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
example = {}
example_prefix = {}
for key in flavors.system_metadata_flavor_props.keys():
example['instance_type_%s' % key] = instance_type[key]
example_prefix['fooinstance_type_%s' % key] = instance_type[key]
example['instance_type_%s' % key] = flavor[key]
example_prefix['fooinstance_type_%s' % key] = flavor[key]
metadata = {}
flavors.save_flavor_info(metadata, instance_type)
flavors.save_flavor_info(metadata, flavor)
self.assertEqual(example, metadata)
metadata = {}
flavors.save_flavor_info(metadata, instance_type, 'foo')
flavors.save_flavor_info(metadata, flavor, 'foo')
self.assertEqual(example_prefix, metadata)
def test_flavor_numa_extras_are_saved(self):
instance_type = objects.Flavor.get_by_name(self.context, 'm1.small')
instance_type['extra_specs'] = {
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor['extra_specs'] = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
'foo': 'bar',
}
sysmeta = flavors.save_flavor_info({}, instance_type)
_instance_type = flavors.extract_flavor({'system_metadata': sysmeta})
sysmeta = flavors.save_flavor_info({}, flavor)
_flavor = flavors.extract_flavor({'system_metadata': sysmeta})
expected_extra_specs = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
}
self.assertEqual(expected_extra_specs, _instance_type['extra_specs'])
self.assertEqual(expected_extra_specs, _flavor['extra_specs'])
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
class FlavorFilteringTest(test.TestCase):
"""Test cases for the filter option available for FlavorList.get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
super().setUp()
self.context = context.get_admin_context()
def assertFilterResults(self, filters, expected):
@ -153,7 +153,7 @@ class InstanceTypeFilteringTest(test.TestCase):
self.assertFilterResults(filters, expected)
class CreateInstanceTypeTest(test.TestCase):
class CreateFlavorTest(test.TestCase):
def assertInvalidInput(self, *create_args, **create_kwargs):
self.assertRaises(exception.InvalidInput, flavors.create,

View File

@ -66,11 +66,12 @@ class NotificationsTestCase(test.TestCase):
self.decorated_function_called = False
def _wrapped_create(self, params=None):
instance_type = objects.Flavor.get_by_name(self.context, 'm1.tiny')
flavor = objects.Flavor.get_by_name(self.context, 'm1.tiny')
inst = objects.Instance(image_ref=uuids.image_ref,
user_id=self.user_id,
project_id=self.project_id,
instance_type_id=instance_type['id'],
instance_type_id=flavor.id,
flavor=flavor,
root_gb=0,
ephemeral_gb=0,
access_ip_v4='1.2.3.4',
@ -82,7 +83,6 @@ class NotificationsTestCase(test.TestCase):
inst._context = self.context
if params:
inst.update(params)
inst.flavor = instance_type
inst.create()
return inst

View File

@ -2180,20 +2180,15 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_add_instance_info, mock_set_pstate,
mock_looping, mock_wait_active, preserve=False):
node_id = uuidutils.generate_uuid()
node = _get_cached_node(id=node_id, instance_id=self.instance_id,
instance_type_id=5)
node_uuid = uuidutils.generate_uuid()
node = _get_cached_node(id=node_uuid, instance_id=self.instance_id)
self.mock_conn.get_node.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
flavor = objects.Flavor(flavor_id=5, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_id,
instance_type_id=flavor_id)
instance.flavor = flavor
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
@ -2210,9 +2205,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
node, instance,
test.MatchType(objects.ImageMeta),
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_id,
ironic_states.REBUILD,
configdrive=mock.ANY)
mock_set_pstate.assert_called_once_with(
node_uuid, ironic_states.REBUILD, configdrive=mock.ANY)
mock_looping.assert_called_once_with(mock_wait_active, instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
@ -2256,21 +2250,16 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_configdrive):
node_uuid = uuidutils.generate_uuid()
node = _get_cached_node(
uuid=node_uuid, instance_uuid=self.instance_uuid,
instance_type_id=5)
uuid=node_uuid, instance_uuid=self.instance_uuid)
mock_get.return_value = node
mock_required_by.return_value = True
mock_configdrive.side_effect = exception.NovaException()
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
flavor = objects.Flavor(flavor_id=5, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
@ -2291,20 +2280,15 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_required_by, mock_configdrive):
node_uuid = uuidutils.generate_uuid()
node = _get_cached_node(
uuid=node_uuid, instance_uuid=self.instance_uuid,
instance_type_id=5)
uuid=node_uuid, instance_uuid=self.instance_uuid)
mock_get.return_value = node
mock_required_by.return_value = False
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
flavor = objects.Flavor(flavor_id=5, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=self.instance_uuid, node=node_uuid, flavor=flavor)
exceptions = [
exception.NovaException(),
@ -2329,7 +2313,6 @@ class IronicDriverTestCase(test.NoDBTestCase):
host=hostname)
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5,
network_interface='flat')
mock_get.return_value = node
host_id = self.driver.network_binding_host_id(self.ctx, instance)

View File

@ -42,6 +42,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.project_id = 'fake'
self.context = context.get_admin_context()
self.useFixture(nova_fixtures.GlanceFixture(self))
flavor = objects.Flavor(
id=2, name='m1.micro', vcpus=1, memory_mb=128, root_gb=0,
ephemeral_gb=0, swap=0, rxtx_factor=1.0, flavorid='1',
vcpu_weight=None,)
self.test_instance = {
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
@ -53,7 +59,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': 2, # m1.tiny
'instance_type_id': flavor.id,
'flavor': flavor,
'old_flavor': None,
'new_flavor': None,
'config_drive': None,
'launched_at': None,
'system_metadata': {},
@ -62,20 +71,6 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'disk_format': 'raw',
}
flavor = objects.Flavor(memory_mb=128,
root_gb=0,
name='m1.micro',
ephemeral_gb=0,
vcpus=1,
swap=0,
rxtx_factor=1.0,
flavorid='1',
vcpu_weight=None,
id=2)
self.test_instance['flavor'] = flavor
self.test_instance['old_flavor'] = None
self.test_instance['new_flavor'] = None
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
@ -1361,30 +1356,29 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
def setUp(self):
super(DefaultDeviceNamesTestCase, self).setUp()
self.context = context.get_admin_context()
self.flavor = objects.Flavor(id=2, swap=4)
self.instance = objects.Instance(
uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
memory_kb='1024000',
basepath='/some/path',
bridge_name='br100',
vcpus=2,
project_id='fake',
bridge='br101',
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
root_gb=10,
ephemeral_gb=20,
instance_type_id=2,
config_drive=False,
root_device_name = '/dev/vda',
system_metadata={})
uuid='32dfcb37-5af1-552b-357c-be8c3aa38310',
memory_kb='1024000',
basepath='/some/path',
bridge_name='br100',
vcpus=2,
project_id='fake',
bridge='br101',
image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
root_gb=10,
ephemeral_gb=20,
instance_type_id=self.flavor.id,
flavor=self.flavor,
config_drive=False,
root_device_name = '/dev/vda',
system_metadata={})
self.image_meta = objects.ImageMeta(
disk_format='raw',
properties=objects.ImageMetaProps())
self.virt_type = 'kvm'
self.flavor = objects.Flavor(swap=4)
self.patchers = []
self.patchers.append(mock.patch.object(self.instance, 'get_flavor',
return_value=self.flavor))
self.patchers.append(mock.patch(
'nova.objects.block_device.BlockDeviceMapping.save'))
for patcher in self.patchers:

View File

@ -685,11 +685,10 @@ def _create_test_instance():
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw'
},
'instance_type_id': flavor.id,
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
@ -2844,7 +2843,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
claim = mock.Mock(autospec=True)
claimed_numa_topology = objects.InstanceNUMATopology()
claim.claimed_numa_topology = claimed_numa_topology
claim.instance_type = instance.flavor
claim.flavor = instance.flavor
numa_info = objects.LibvirtLiveMigrateNUMAInfo()
with test.nested(
mock.patch.object(drvr, '_get_live_migrate_numa_info',
@ -6692,8 +6691,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
flavor = instance_ref.get_flavor()
flavor.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
@ -21248,7 +21247,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['instance_type_id'] = flavor.id
inst['flavor'] = flavor
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
@ -21265,9 +21265,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
inst.update(params)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['metadata', 'system_metadata',
'pci_devices'],
flavor=flavor, **inst)
self.context,
expected_attrs=['metadata', 'system_metadata', 'pci_devices'],
**inst)
# Attributes which we need to be set so they don't touch the db,
# but it's not worth the effort to fake properly

View File

@ -954,12 +954,12 @@ class MigrationMonitorTestCase(test.NoDBTestCase):
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'instance_type_id': flavor.id,
'flavor': flavor,
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw',
},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),

View File

@ -343,10 +343,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.conn = driver.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_instance_type_by_name(self, type):
for instance_type in DEFAULT_FLAVOR_OBJS:
if instance_type.name == type:
return instance_type
def _get_flavor_by_name(self, type):
for flavor in DEFAULT_FLAVOR_OBJS:
if flavor.name == type:
return flavor
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
@ -356,15 +356,15 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
uuid=None, instance_type='m1.large',
ephemeral=None, instance_type_updates=None):
uuid=None, flavor='m1.large',
ephemeral=None, flavor_updates=None):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
self.type_data = dict(self._get_instance_type_by_name(instance_type))
if instance_type_updates:
self.type_data.update(instance_type_updates)
self.type_data = dict(self._get_flavor_by_name(flavor))
if flavor_updates:
self.type_data.update(flavor_updates)
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
@ -393,15 +393,15 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
instance_type='m1.large', powered_on=True,
ephemeral=None, bdi=None, instance_type_updates=None):
flavor='m1.large', powered_on=True,
ephemeral=None, bdi=None, flavor_updates=None):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
instance_type=instance_type,
flavor=flavor,
ephemeral=ephemeral,
instance_type_updates=instance_type_updates)
flavor_updates=flavor_updates)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None, allocations={},
@ -550,9 +550,9 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
vmwareapi_fake.assertPathExists(self, str(path))
vmwareapi_fake.assertPathExists(self, str(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
def _iso_disk_type_created(self, flavor='m1.large'):
self.image.disk_format = 'iso'
self._create_vm(instance_type=instance_type)
self._create_vm(flavor=flavor)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
@ -564,7 +564,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
self._iso_disk_type_created(flavor='m1.micro')
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathNotExists(self, str(path))
@ -766,7 +766,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_root_size_0(self):
self._create_vm(instance_type='m1.micro')
self._create_vm(flavor='m1.micro')
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
cache = ('[%s] vmware_base/%s/%s.vmdk' %
@ -1197,7 +1197,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_spawn_hw_versions(self):
updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}}
self._create_vm(instance_type_updates=updates)
self._create_vm(flavor_updates=updates)
vm = self._get_vm_record()
version = vm.get("version")
self.assertEqual('vmx-08', version)
@ -1273,7 +1273,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self._test_snapshot()
def test_snapshot_no_root_disk(self):
self._iso_disk_type_created(instance_type='m1.micro')
self._iso_disk_type_created(flavor='m1.micro')
self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
@ -2343,8 +2343,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
self.context, self.instance, vif)
def test_resize_to_smaller_disk(self):
self._create_vm(instance_type='m1.large')
flavor = self._get_instance_type_by_name('m1.small')
self._create_vm(flavor='m1.large')
flavor = self._get_flavor_by_name('m1.small')
self.assertRaises(exception.InstanceFaultRollback,
self.conn.migrate_disk_and_power_off, self.context,
self.instance, 'fake_dest', flavor, None)

View File

@ -1279,7 +1279,7 @@ def get_mem_encryption_constraint(
cannot be called since it relies on being run from the compute
node in order to retrieve CONF.libvirt.hw_machine_type.
:param instance_type: Flavor object
:param flavor: Flavor object
:param image: an ImageMeta object
:param machine_type: a string representing the machine type (optional)
:raises: nova.exception.FlavorImageConflict

View File

@ -9032,8 +9032,7 @@ class LibvirtDriver(driver.ComputeDriver):
def post_claim_migrate_data(self, context, instance, migrate_data, claim):
migrate_data.dst_numa_info = self._get_live_migrate_numa_info(
claim.claimed_numa_topology, claim.instance_type,
claim.image_meta)
claim.claimed_numa_topology, claim.flavor, claim.image_meta)
return migrate_data
def _get_resources(self, instance, prefix=None):