Replace pci_request flavor storage with proper object usage

This makes the PCI code use the InstancePCIRequests object to store
and retrieve the pci_request information, instead of stashing it in
system_metadata.

Related to blueprint pci-passthrough-sriov

Co-Authored-By: Baodong (Robert) Li <baoli@cisco.com>
Change-Id: I012ee5c118265e044ff41fb58b732728946ee85a
This commit is contained in:
Dan Smith 2014-09-03 10:14:03 -04:00 committed by Robert Li
parent 2d998c8df2
commit d273e33b77
19 changed files with 328 additions and 348 deletions

View File

@ -61,6 +61,7 @@ from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.pci import pci_request
import nova.policy
from nova import quota
from nova import rpc
@ -783,6 +784,9 @@ class API(base.Base):
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
@ -810,6 +814,7 @@ class API(base.Base):
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_request_info': pci_request_info,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
@ -847,7 +852,9 @@ class API(base.Base):
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i, shutdown_terminate)
pci_requests = base_options['pci_request_info']
pci_requests.instance_uuid = instance.uuid
pci_requests.save(context)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING

View File

@ -19,10 +19,10 @@ Claim objects for use with resource tracking.
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_request
LOG = logging.getLogger(__name__)
@ -68,7 +68,7 @@ class Claim(NopClaim):
correct decisions with respect to host selection.
"""
def __init__(self, instance, tracker, resources, overhead=None,
def __init__(self, context, instance, tracker, resources, overhead=None,
limits=None):
super(Claim, self).__init__()
# Stash a copy of the instance at the current point of time
@ -85,6 +85,7 @@ class Claim(NopClaim):
overhead = {'memory_mb': 0}
self.overhead = overhead
self.context = context
# Check claim at constructor to avoid mess code
# Raise exception ComputeResourcesUnavailable if claim failed
@ -103,7 +104,7 @@ class Claim(NopClaim):
been aborted.
"""
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
self.tracker.abort_instance_claim(self.instance)
self.tracker.abort_instance_claim(self.context, self.instance)
def _claim_test(self, resources, limits=None):
"""Test if this claim can be satisfied given available resources and
@ -158,11 +159,12 @@ class Claim(NopClaim):
return self._test(type_, unit, total, used, requested, limit)
def _test_pci(self):
pci_requests = pci_request.get_instance_pci_requests(self.instance)
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, self.instance['uuid'])
if pci_requests:
if pci_requests.requests:
can_claim = self.tracker.pci_tracker.stats.support_requests(
pci_requests)
pci_requests.requests)
if not can_claim:
return _('Claim pci failed.')
@ -204,11 +206,13 @@ class ResizeClaim(Claim):
"""Claim used for holding resources for an incoming resize/migration
operation.
"""
def __init__(self, instance, instance_type, tracker, resources,
def __init__(self, context, instance, instance_type, tracker, resources,
overhead=None, limits=None):
self.instance_type = instance_type
super(ResizeClaim, self).__init__(instance, tracker, resources,
overhead=overhead, limits=limits)
self.context = context
super(ResizeClaim, self).__init__(context, instance, tracker,
resources, overhead=overhead,
limits=limits)
self.migration = None
@property
@ -221,11 +225,12 @@ class ResizeClaim(Claim):
return self.instance_type['memory_mb'] + self.overhead['memory_mb']
def _test_pci(self):
pci_requests = pci_request.get_instance_pci_requests(
self.instance, 'new_')
if pci_requests:
pci_requests = objects.InstancePCIRequests.\
get_by_instance_uuid_and_newness(
self.context, self.instance['uuid'], True)
if pci_requests.requests:
claim = self.tracker.pci_tracker.stats.support_requests(
pci_requests)
pci_requests.requests)
if not claim:
return _('Claim pci failed.')
@ -238,4 +243,5 @@ class ResizeClaim(Claim):
been aborted.
"""
LOG.debug("Aborting claim: %s" % self, instance=self.instance)
self.tracker.drop_resize_claim(self.instance, self.instance_type)
self.tracker.drop_resize_claim(self.context, self.instance,
self.instance_type)

View File

@ -32,7 +32,6 @@ from nova.i18n import _
from nova.i18n import _LE
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
from nova import utils
flavor_opts = [
@ -310,7 +309,6 @@ def save_flavor_info(metadata, instance_type, prefix=''):
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
pci_request.save_flavor_pci_info(metadata, instance_type, prefix)
return metadata
@ -323,7 +321,6 @@ def delete_flavor_info(metadata, *prefixes):
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
pci_request.delete_flavor_pci_info(metadata, *prefixes)
return metadata

View File

@ -3333,7 +3333,7 @@ class ComputeManager(manager.Manager):
migration.save(context.elevated())
rt = self._get_resource_tracker(migration.source_node)
rt.drop_resize_claim(instance, old_instance_type)
rt.drop_resize_claim(context, instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
@ -3409,7 +3409,7 @@ class ComputeManager(manager.Manager):
migration.save(context.elevated())
rt = self._get_resource_tracker(instance.node)
rt.drop_resize_claim(instance)
rt.drop_resize_claim(context, instance)
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute,

View File

@ -29,7 +29,6 @@ from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
@ -126,13 +125,14 @@ class ResourceTracker(object):
"MB", {'flavor': instance_ref['memory_mb'],
'overhead': overhead['memory_mb']})
claim = claims.Claim(instance_ref, self, self.compute_node,
claim = claims.Claim(context, instance_ref, self, self.compute_node,
overhead=overhead, limits=limits)
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(self.compute_node, instance_ref)
self._update_usage_from_instance(context, self.compute_node,
instance_ref)
elevated = context.elevated()
# persist changes to the compute node:
@ -167,7 +167,7 @@ class ResourceTracker(object):
'overhead': overhead['memory_mb']})
instance_ref = obj_base.obj_to_primitive(instance)
claim = claims.ResizeClaim(instance_ref, instance_type, self,
claim = claims.ResizeClaim(context, instance_ref, instance_type, self,
self.compute_node, overhead=overhead,
limits=limits)
@ -217,33 +217,34 @@ class ResourceTracker(object):
instance_ref['node'] = self.nodename
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, instance):
def abort_instance_claim(self, context, instance):
"""Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(self.compute_node, instance)
self._update_usage_from_instance(context, self.compute_node, instance)
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
self._update(context.elevated(), self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_resize_claim(self, instance, instance_type=None, prefix='new_'):
def drop_resize_claim(self, context, instance, instance_type=None,
prefix='new_'):
"""Remove usage for an incoming/outgoing migration."""
if instance['uuid'] in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.get_admin_context()
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix)
if instance_type['id'] == itype['id']:
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(instance,
self.pci_tracker.update_pci_for_migration(context,
instance,
sign=-1)
self._update_usage(self.compute_node, itype, sign=-1)
self._update_usage(context, self.compute_node, itype, sign=-1)
ctxt = context.get_admin_context()
ctxt = context.elevated()
self._update(ctxt, self.compute_node)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
@ -259,7 +260,8 @@ class ResourceTracker(object):
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(self.compute_node, instance)
self._update_usage_from_instance(context, self.compute_node,
instance)
self._update(context.elevated(), self.compute_node)
@property
@ -318,10 +320,11 @@ class ResourceTracker(object):
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename)
context, self.host, self.nodename,
expected_attrs=['system_metadata'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(resources, instances)
self._update_usage_from_instances(context, resources, instances)
# Grab all in-progress migrations:
capi = self.conductor_api
@ -333,7 +336,7 @@ class ResourceTracker(object):
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(resources, orphans)
self._update_usage_from_orphans(context, resources, orphans)
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
@ -498,7 +501,7 @@ class ResourceTracker(object):
self.scheduler_client.update_resource_stats(
context, (self.host, self.nodename), stats)
def _update_usage(self, resources, usage, sign=1):
def _update_usage(self, context, resources, usage, sign=1):
mem_usage = usage['memory_mb']
overhead = self.driver.estimate_instance_overhead(usage)
@ -559,8 +562,8 @@ class ResourceTracker(object):
if itype:
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(instance)
self._update_usage(resources, itype)
self.pci_tracker.update_pci_for_migration(context, instance)
self._update_usage(context, resources, itype)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(
self.pci_tracker.stats)
@ -607,7 +610,7 @@ class ResourceTracker(object):
"migration."), instance_uuid=uuid)
continue
def _update_usage_from_instance(self, resources, instance):
def _update_usage_from_instance(self, context, resources, instance):
"""Update usage for a single instance."""
uuid = instance['uuid']
@ -625,12 +628,12 @@ class ResourceTracker(object):
self.stats.update_stats_for_instance(instance)
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(instance)
self.pci_tracker.update_pci_for_instance(context, instance)
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
self._update_usage(resources, instance, sign=sign)
self._update_usage(context, resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
if self.pci_tracker:
@ -638,7 +641,7 @@ class ResourceTracker(object):
else:
resources['pci_stats'] = jsonutils.dumps([])
def _update_usage_from_instances(self, resources, instances):
def _update_usage_from_instances(self, context, resources, instances):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
@ -665,7 +668,7 @@ class ResourceTracker(object):
for instance in instances:
if instance['vm_state'] != vm_states.DELETED:
self._update_usage_from_instance(resources, instance)
self._update_usage_from_instance(context, resources, instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
@ -688,7 +691,7 @@ class ResourceTracker(object):
return orphans
def _update_usage_from_orphans(self, resources, orphans):
def _update_usage_from_orphans(self, context, resources, orphans):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
@ -699,7 +702,7 @@ class ResourceTracker(object):
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(resources, usage)
self._update_usage(context, resources, usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",

View File

@ -24,7 +24,6 @@ from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova.pci import pci_device
from nova.pci import pci_request
from nova.pci import pci_stats
LOG = logging.getLogger(__name__)
@ -150,12 +149,12 @@ class PciDevTracker(object):
self.pci_devs.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, instance, prefix=''):
pci_requests = pci_request.get_instance_pci_requests(
instance, prefix)
if not pci_requests:
def _claim_instance(self, context, instance, prefix=''):
pci_requests = objects.InstancePCIRequests.get_by_instance(
context, instance)
if not pci_requests.requests:
return None
devs = self.stats.consume_requests(pci_requests)
devs = self.stats.consume_requests(pci_requests.requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
@ -184,7 +183,7 @@ class PciDevTracker(object):
dev['instance_uuid'] == instance['uuid']):
self._free_device(dev)
def update_pci_for_instance(self, instance):
def update_pci_for_instance(self, context, instance):
"""Update instance's pci usage information.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
@ -210,12 +209,12 @@ class PciDevTracker(object):
self.allocations[uuid] = devs
elif (uuid not in self.allocations and
uuid not in self.claims):
devs = self._claim_instance(instance)
devs = self._claim_instance(context, instance)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
def update_pci_for_migration(self, instance, sign=1):
def update_pci_for_migration(self, context, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
@ -225,7 +224,7 @@ class PciDevTracker(object):
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(instance, 'new_')
devs = self._claim_instance(context, instance, 'new_')
if devs:
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:

View File

@ -43,10 +43,10 @@ from oslo.config import cfg
import six
from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_utils
from nova import utils
pci_alias_opts = [
cfg.MultiStrOpt('pci_alias',
@ -141,9 +141,10 @@ def _translate_alias_to_requests(alias_spec):
if name not in pci_aliases:
raise exception.PciRequestAliasNotDefined(alias=name)
else:
request = {'count': int(count),
'spec': copy.deepcopy(pci_aliases[name]),
'alias_name': name}
request = objects.InstancePCIRequest(
count=int(count),
spec=copy.deepcopy(pci_aliases[name]),
alias_name=name)
pci_requests.append(request)
return pci_requests
@ -182,57 +183,9 @@ def get_pci_requests_from_flavor(flavor):
:param flavor: the flavor to be checked
:returns: a list of pci requests
"""
if 'extra_specs' not in flavor:
return []
pci_requests = []
if 'pci_passthrough:alias' in flavor['extra_specs']:
if ('extra_specs' in flavor and
'pci_passthrough:alias' in flavor['extra_specs']):
pci_requests = _translate_alias_to_requests(
flavor['extra_specs']['pci_passthrough:alias'])
return pci_requests
def get_instance_pci_requests(instance, prefix=""):
"""Get instance's pci allocation requirement.
After a flavor's pci requirement is translated into pci requests,
the requests are kept in instance's system metadata to avoid
future flavor access and translation. This function get the
pci requests from instance system metadata directly.
As save_flavor_pci_info(), the prefix can be used to stash
information about another flavor for later use, like in resize.
"""
if 'system_metadata' not in instance:
return []
system_metadata = utils.instance_sys_meta(instance)
pci_requests = system_metadata.get('%spci_requests' % prefix)
if not pci_requests:
return []
return jsonutils.loads(pci_requests)
def save_flavor_pci_info(metadata, instance_type, prefix=''):
"""Save flavor's pci information to metadata.
To reduce flavor access and pci request translation, the
translated pci requests are saved into instance's system
metadata.
As save_flavor_info(), the prefix can be used to stash information
about another flavor for later use, like in resize.
"""
pci_requests = get_pci_requests_from_flavor(instance_type)
if pci_requests:
to_key = '%spci_requests' % prefix
metadata[to_key] = jsonutils.dumps(pci_requests)
def delete_flavor_pci_info(metadata, *prefixes):
"""Delete pci requests information from instance's system_metadata."""
for prefix in prefixes:
to_key = '%spci_requests' % prefix
if to_key in metadata:
del metadata[to_key]
return objects.InstancePCIRequests(requests=pci_requests)

View File

@ -302,7 +302,7 @@ class FilterScheduler(driver.Scheduler):
# Now consume the resources so the filter/weights
# will change for the next instance.
chosen_host.obj.consume_from_instance(instance_properties)
chosen_host.obj.consume_from_instance(context, instance_properties)
if update_group_hosts is True:
filter_properties['group_hosts'].add(chosen_host.obj.host)
return selected_hosts

View File

@ -44,7 +44,7 @@ class PciPassthroughFilter(filters.BaseHostFilter):
pci_requests = filter_properties.get('pci_requests')
if not pci_requests:
return True
if not host_state.pci_stats.support_requests(pci_requests):
if not host_state.pci_stats.support_requests(pci_requests.requests):
LOG.debug("%(host_state)s doesn't have the required PCI devices"
" (%(requests)s)",
{'host_state': host_state, 'requests': pci_requests})

View File

@ -27,10 +27,10 @@ from nova.compute import vm_states
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.pci import pci_request
from nova.pci import pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
@ -226,7 +226,7 @@ class HostState(object):
# update metrics
self._update_metrics_from_compute_node(compute)
def consume_from_instance(self, instance):
def consume_from_instance(self, context, instance):
"""Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
@ -239,9 +239,10 @@ class HostState(object):
# Track number of instances on host
self.num_instances += 1
pci_requests = pci_request.get_instance_pci_requests(instance)
if pci_requests and self.pci_stats:
self.pci_stats.apply_requests(pci_requests)
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance['uuid'])
if pci_requests.requests and self.pci_stats:
self.pci_stats.apply_requests(pci_requests.requests)
vm_state = instance.get('vm_state', vm_states.BUILDING)
task_state = instance.get('task_state')

View File

@ -18,11 +18,12 @@
import re
import uuid
import mock
import six
from nova.compute import claims
from nova import exception
from nova.openstack.common import jsonutils
from nova import objects
from nova.pci import pci_manager
from nova import test
@ -53,6 +54,8 @@ class DummyTracker(object):
self.pci_tracker = pci_manager.PciDevTracker()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
class ClaimTestCase(test.NoDBTestCase):
def setUp(self):
@ -64,7 +67,7 @@ class ClaimTestCase(test.NoDBTestCase):
instance = self._fake_instance(**kwargs)
if overhead is None:
overhead = {'memory_mb': 0}
return claims.Claim(instance, self.tracker, self.resources,
return claims.Claim('context', instance, self.tracker, self.resources,
overhead=overhead, limits=limits)
def _fake_instance(self, **kwargs):
@ -114,22 +117,22 @@ class ClaimTestCase(test.NoDBTestCase):
except e as ee:
self.assertTrue(re.search(re_obj, six.text_type(ee)))
def test_memory_unlimited(self):
def test_memory_unlimited(self, mock_get):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self):
def test_disk_unlimited_root(self, mock_get):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self):
def test_disk_unlimited_ephemeral(self, mock_get):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self):
def test_memory_with_overhead(self, mock_get):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self):
def test_memory_with_overhead_insufficient(self, mock_get):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
@ -137,33 +140,33 @@ class ClaimTestCase(test.NoDBTestCase):
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self):
def test_memory_oversubscription(self, mock_get):
self._claim(memory_mb=4096)
def test_memory_insufficient(self):
def test_memory_insufficient(self, mock_get):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self):
def test_disk_oversubscription(self, mock_get):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self):
def test_disk_insufficient(self, mock_get):
limits = {'disk_gb': 45}
self.assertRaisesRegexp(re.compile("disk", re.IGNORECASE),
exception.ComputeResourcesUnavailable,
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self):
def test_disk_and_memory_insufficient(self, mock_get):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegexp(re.compile("memory.*disk", re.IGNORECASE),
exception.ComputeResourcesUnavailable,
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
def test_pci_pass(self):
def test_pci_pass(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
@ -173,18 +176,13 @@ class ClaimTestCase(test.NoDBTestCase):
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
self._set_pci_request(claim)
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
claim._test_pci()
def _set_pci_request(self, claim):
request = [{'count': 1,
'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
}]
claim.instance.update(
system_metadata={'pci_requests': jsonutils.dumps(request)})
def test_pci_fail(self):
def test_pci_fail(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
@ -194,10 +192,13 @@ class ClaimTestCase(test.NoDBTestCase):
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
self._set_pci_request(claim)
self.assertEqual("Claim pci failed.", claim._test_pci())
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
claim._test_pci()
def test_pci_pass_no_requests(self):
def test_pci_pass_no_requests(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
@ -207,15 +208,14 @@ class ClaimTestCase(test.NoDBTestCase):
self.tracker.new_pci_tracker()
self.tracker.pci_tracker.set_hvdevs([dev_dict])
claim = self._claim()
self._set_pci_request(claim)
claim._test_pci()
def test_ext_resources(self):
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
def test_abort(self):
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
@ -240,22 +240,19 @@ class ResizeClaimTestCase(ClaimTestCase):
instance_type = self._fake_instance_type(**kwargs)
if overhead is None:
overhead = {'memory_mb': 0}
return claims.ResizeClaim(self.instance, instance_type, self.tracker,
self.resources, overhead=overhead,
limits=limits)
return claims.ResizeClaim('context', self.instance, instance_type,
self.tracker, self.resources,
overhead=overhead, limits=limits)
def _set_pci_request(self, claim):
request = [{'count': 1,
'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
}]
claim.instance.update(
system_metadata={'new_pci_requests': jsonutils.dumps(request)})
def test_ext_resources(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
def test_abort(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)

View File

@ -648,7 +648,9 @@ class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
@ -674,7 +676,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self._assert(claim_gb, 'local_gb_used')
self._assert(1, 'current_workload')
def test_claim_and_audit(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
@ -714,7 +718,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
def test_claim_and_abort(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
@ -740,7 +746,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
def test_instance_claim_with_oversubscription(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
@ -757,7 +765,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
def test_additive_claims(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
flavor = self._fake_flavor_create(
@ -776,7 +786,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
def test_context_claim_with_exception(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
@ -790,7 +802,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def test_instance_context_claim(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
instance = self._fake_instance(flavor=flavor)
@ -817,7 +831,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
def test_update_load_stats_for_instance(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
@ -831,7 +847,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def test_cpu_stats(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
@ -912,7 +930,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
# This hits the stub in setUp()
migration.create('fake')
def test_claim(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
@ -920,7 +940,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_abort(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
@ -933,7 +955,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_additive_claims(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
@ -949,7 +973,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
def test_claim_and_audit(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
@ -959,7 +985,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_same_host(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_same_host(self, mock_get):
self.limits['vcpu'] = 3
src_dict = {
@ -999,10 +1027,12 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
def test_revert(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.drop_resize_claim(self.instance)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
@ -1010,7 +1040,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
def test_revert_reserve_source(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert_reserve_source(self, mock_get):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
@ -1095,7 +1127,9 @@ class ResizeClaimTestCase(BaseTrackerTestCase):
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
def test_set_instance_host_and_node(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])

View File

@ -206,7 +206,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def fake_claim(context, instance, limits):
instance.host = self.compute.host
return claims.Claim(db_instance, self.rt, _fake_resources())
return claims.Claim(context, db_instance,
self.rt, _fake_resources())
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
@ -294,7 +295,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.context, instance, {'source_compute': '',
'dest_compute': self.compute.host})
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(db_instance, self.rt, _fake_resources()))
claims.Claim(self.context, db_instance, self.rt,
_fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
injected_files=[], admin_password=None,
network_info=[],

View File

@ -15,6 +15,8 @@
import copy
import mock
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
@ -23,7 +25,6 @@ from nova import exception
from nova import objects
from nova.pci import pci_device
from nova.pci import pci_manager
from nova.pci import pci_request
from nova import test
from nova.tests.api.openstack import fakes
@ -90,15 +91,18 @@ class PciDevTrackerTestCase(test.TestCase):
def _fake_pci_device_destroy(self, ctxt, node_id, address):
self.destroy_called += 1
def _fake_get_instance_pci_requests(self, instance, prefix=''):
return self.pci_requests
def _create_pci_requests_object(self, mock_get, requests):
pci_reqs = []
for request in requests:
pci_req_obj = objects.InstancePCIRequest(count=request['count'],
spec=request['spec'])
pci_reqs.append(pci_req_obj)
mock_get.return_value = objects.InstancePCIRequests(requests=pci_reqs)
def setUp(self):
super(PciDevTrackerTestCase, self).setUp()
self.stubs.Set(db, 'pci_device_get_all_by_node',
self._fake_get_pci_devices)
self.stubs.Set(pci_request, 'get_instance_pci_requests',
self._fake_get_instance_pci_requests)
self._create_fake_instance()
self.tracker = pci_manager.PciDevTracker(1)
@ -143,9 +147,11 @@ class PciDevTrackerTestCase(test.TestCase):
if dev['status'] == 'removed']),
2)
def test_set_hvdev_changed_stal(self):
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}]
self.tracker._claim_instance(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_set_hvdev_changed_stal(self, mock_get):
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker._claim_instance(mock.sentinel.context, self.inst)
fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
copy.deepcopy(fake_pci_3)]
@ -153,66 +159,75 @@ class PciDevTrackerTestCase(test.TestCase):
self.assertEqual(len(self.tracker.stale), 1)
self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
def test_update_pci_for_instance_active(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_active(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
def test_update_pci_for_instance_fail(self):
self.pci_requests = copy.deepcopy(fake_pci_requests)
self.pci_requests[0]['count'] = 4
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_fail(self, mock_get):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
self._create_pci_requests_object(mock_get, pci_requests)
self.assertRaises(exception.PciDeviceRequestFailed,
self.tracker.update_pci_for_instance,
None,
self.inst)
def test_update_pci_for_instance_deleted(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_deleted(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.inst.vm_state = vm_states.DELETED
self.tracker.update_pci_for_instance(self.inst)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
def test_update_pci_for_instance_resize_source(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_resize_source(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.inst.task_state = task_states.RESIZE_MIGRATED
self.tracker.update_pci_for_instance(self.inst)
self.tracker.update_pci_for_instance(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
def test_update_pci_for_instance_resize_dest(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_instance_resize_dest(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
self.inst.task_state = task_states.RESIZE_FINISH
self.tracker.update_pci_for_instance(self.inst)
self.tracker.update_pci_for_instance(None, self.inst)
self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2)
self.assertNotIn('fake-inst-uuid', self.tracker.claims)
def test_update_pci_for_migration_in(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_migration_in(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
def test_update_pci_for_migration_out(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
self.tracker.update_pci_for_migration(self.inst, sign=-1)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_update_pci_for_migration_out(self, mock_get):
self._create_pci_requests_object(mock_get, fake_pci_requests)
self.tracker.update_pci_for_migration(None, self.inst)
self.tracker.update_pci_for_migration(None, self.inst, sign=-1)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
@ -258,16 +273,19 @@ class PciDevTrackerTestCase(test.TestCase):
for dev in self.tracker.pci_devs:
self.assertEqual(dev.compute_node_id, 1)
def test_clean_usage(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage(self, mock_get):
inst_2 = copy.copy(self.inst)
inst_2.uuid = 'uuid5'
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v'}]}]
self.tracker.update_pci_for_instance(self.inst)
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}]
self.tracker.update_pci_for_instance(inst_2)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
self.tracker.update_pci_for_instance(None, self.inst)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker.update_pci_for_instance(None, inst_2)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.assertEqual(free_devs[0]['vendor_id'], 'v')
@ -279,16 +297,19 @@ class PciDevTrackerTestCase(test.TestCase):
set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
def test_clean_usage_claims(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage_claims(self, mock_get):
inst_2 = copy.copy(self.inst)
inst_2.uuid = 'uuid5'
migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v'}]}]
self.tracker.update_pci_for_instance(self.inst)
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}]
self.tracker.update_pci_for_migration(inst_2)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
self.tracker.update_pci_for_instance(None, self.inst)
self._create_pci_requests_object(mock_get,
[{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
self.tracker.update_pci_for_migration(None, inst_2)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(free_devs), 1)
self.tracker.clean_usage([self.inst], [migr], [orph])
@ -298,12 +319,13 @@ class PciDevTrackerTestCase(test.TestCase):
set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
def test_clean_usage_no_request_match_no_claims(self):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
def test_clean_usage_no_request_match_no_claims(self, mock_get):
# Tests the case that there is no match for the request so the
# claims mapping is set to None for the instance when the tracker
# calls clean_usage.
self.pci_requests = None
self.tracker.update_pci_for_migration(instance=self.inst, sign=1)
self._create_pci_requests_object(mock_get, [])
self.tracker.update_pci_for_migration(None, instance=self.inst, sign=1)
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(3, len(free_devs))
self.tracker.clean_usage([], [], [])

View File

@ -16,7 +16,6 @@
"""Tests for PCI request."""
from nova import exception
from nova.openstack.common import jsonutils
from nova.pci import pci_request as pci_request
from nova import test
@ -152,6 +151,13 @@ class AliasTestCase(test.NoDBTestCase):
exception.PciInvalidAlias,
pci_request._get_alias_from_config)
def _verify_result(self, expected, real):
exp_real = zip(expected, real)
for exp, real in exp_real:
self.assertEqual(exp['count'], real.count)
self.assertEqual(exp['alias_name'], real.alias_name)
self.assertEqual(exp['spec'], real.spec)
def test_aliase_2_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
@ -170,9 +176,7 @@ class AliasTestCase(test.NoDBTestCase):
requests = pci_request._translate_alias_to_requests(
"QuicAssist : 3, IntelNIC: 1")
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
self._verify_result(expect_request, requests)
def test_aliase_2_request_invalid(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
@ -198,97 +202,12 @@ class AliasTestCase(test.NoDBTestCase):
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
self.assertEqual(set([1, 3]),
set([p.count for p in requests.requests]))
self._verify_result(expect_request, requests.requests)
def test_get_pci_requests_from_flavor_no_extra_spec(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
flavor = {}
requests = pci_request.get_pci_requests_from_flavor(flavor)
self.assertEqual([], requests)
def test_get_instance_pci_requests_no_meta(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
instance = {}
requests = pci_request.get_instance_pci_requests(instance)
self.assertEqual([], requests)
def test_get_instance_pci_requests_no_request(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
instance = {'system_metadata': {'a': 'b'}}
requests = pci_request.get_instance_pci_requests(instance)
self.assertEqual([], requests)
def test_get_instance_pci_requests(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [{
'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'}]
instance = {"system_metadata": {"pci_requests":
jsonutils.dumps(expect_request)}}
requests = pci_request.get_instance_pci_requests(instance)
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_get_instance_pci_requests_prefix(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [{
'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'}]
instance = {"system_metadata": {"new_pci_requests":
jsonutils.dumps(expect_request)}}
requests = pci_request.get_instance_pci_requests(instance, 'new_')
exp_real = zip(expect_request, requests)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_save_flavor_pci_info(self):
self.flags(pci_alias=[_fake_alias1, _fake_alias3])
expect_request = [
{'count': 3,
'spec': [{'vendor_id': '8086', 'product_id': '4443',
'device_type': "ACCEL",
'capability_type': 'pci'}],
'alias_name': 'QuicAssist'},
{'count': 1,
'spec': [{'vendor_id': '8086', 'product_id': '1111',
'device_type': "NIC",
'capability_type': 'pci'}],
'alias_name': 'IntelNIC'}, ]
flavor = {'extra_specs': {"pci_passthrough:alias":
"QuicAssist:3, IntelNIC: 1"}}
meta = {}
pci_request.save_flavor_pci_info(meta, flavor)
real = jsonutils.loads(meta['pci_requests'])
exp_real = zip(expect_request, real)
for exp, real in exp_real:
self.assertEqual(real, exp)
meta = {}
pci_request.save_flavor_pci_info(meta, flavor, "old_")
real = jsonutils.loads(meta['old_pci_requests'])
exp_real = zip(expect_request, real)
for exp, real in exp_real:
self.assertEqual(real, exp)
def test_delete_flavor_pci_info(self):
meta = {"pci_requests": "fake", "old_pci_requests": "fake"}
pci_request.delete_flavor_pci_info(meta, '')
self.assertNotIn('pci_requests', meta)
pci_request.delete_flavor_pci_info(meta, 'old_')
self.assertNotIn('old_pci_requests', meta)
self.assertEqual([], requests.requests)

View File

@ -80,7 +80,10 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.select_destinations,
self.context, fake_request_spec, {})
def test_select_destination_works(self):
@mock.patch.object(host_manager.objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=host_manager.objects.InstancePCIRequests(requests=[]))
def test_select_destination_works(self, mock_pci_req):
fake_request_spec = self._get_fake_request_spec()
fake_host = self._get_fake_host_state()
self.driver.all_host_states = [fake_host]
@ -109,6 +112,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
"root_gb": 1,
"ephemeral_gb": 1,
"vcpus": 1,
"uuid": 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
}
request_spec = {
"instance_type": flavor,
@ -129,7 +133,10 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
}
return host_state
def test_performance_check_select_destination(self):
@mock.patch.object(host_manager.objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=host_manager.objects.InstancePCIRequests(requests=[]))
def test_performance_check_select_destination(self, mock_pci_req):
hosts = 2
requests = 1

View File

@ -155,7 +155,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.driver.schedule_run_instance(fake_context, request_spec,
None, None, None, None, {}, False)
def test_schedule_happy_day(self):
@mock.patch.object(objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_schedule_happy_day(self, mock_pci_req):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
@ -186,7 +189,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}}
'os_type': 'Linux',
'uuid': 'fake-uuid'}}
self.mox.ReplayAll()
weighed_hosts = sched._schedule(fake_context, request_spec, {})
self.assertEqual(len(weighed_hosts), 10)
@ -456,7 +460,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self._group_details_in_filter_properties(group, 'get_by_name',
group.name, 'anti-affinity')
def test_schedule_host_pool(self):
@mock.patch.object(objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_schedule_host_pool(self, mock_pci_req):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
@ -469,11 +476,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
@ -485,7 +493,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# one host should be chosen
self.assertEqual(len(hosts), 1)
def test_schedule_large_host_pool(self):
@mock.patch.object(objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_schedule_large_host_pool(self, mock_pci_req):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
@ -500,11 +511,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
fakes.mox_host_manager_db_calls(self.mox, fake_context)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
@ -515,7 +527,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# one host should be chose
self.assertEqual(len(hosts), 1)
def test_schedule_chooses_best_host(self):
@mock.patch.object(objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_schedule_chooses_best_host(self, mock_pci_req):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be returned.
"""
@ -543,7 +558,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'}
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
@ -561,7 +577,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(50, hosts[0].weight)
def test_select_destinations(self):
@mock.patch.object(objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_select_destinations(self, mock_pci_req):
"""select_destinations is basically a wrapper around _schedule().
Similar to the _schedule tests, this just does a happy path test to
@ -598,7 +617,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux'},
'os_type': 'Linux',
'uuid': 'fake-uuid'},
'num_instances': 1}
self.mox.ReplayAll()
dests = sched.select_destinations(fake_context, request_spec, {})

View File

@ -25,6 +25,7 @@ import stubout
from nova.compute import arch
from nova import context
from nova import db
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.pci import pci_stats
@ -1755,7 +1756,9 @@ class HostFiltersTestCase(test.NoDBTestCase):
def test_pci_passthrough_pass(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
requests = objects.InstancePCIRequests(requests=[request])
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
@ -1764,11 +1767,13 @@ class HostFiltersTestCase(test.NoDBTestCase):
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = True
self.assertTrue(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
self.assertEqual(self.pci_requests, requests.requests)
def test_pci_passthrough_fail(self):
filt_cls = self.class_map['PciPassthroughFilter']()
requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
requests = objects.InstancePCIRequests(requests=[request])
filter_properties = {'pci_requests': requests}
self.stubs.Set(pci_stats.PciDeviceStats, 'support_requests',
self._fake_pci_support_requests)
@ -1777,7 +1782,7 @@ class HostFiltersTestCase(test.NoDBTestCase):
attribute_dict={'pci_stats': pci_stats.PciDeviceStats()})
self.pci_request_result = False
self.assertFalse(filt_cls.host_passes(host, filter_properties))
self.assertEqual(self.pci_requests, requests)
self.assertEqual(self.pci_requests, requests.requests)
def test_pci_passthrough_no_pci_request(self):
filt_cls = self.class_map['PciPassthroughFilter']()

View File

@ -15,6 +15,9 @@
"""
Tests For HostManager
"""
import mock
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
@ -474,18 +477,23 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.pci_stats)
self.assertEqual(hyper_ver_int, host.hypervisor_version)
def test_stat_consumption_from_instance(self):
@mock.patch.object(host_manager.objects.InstancePCIRequests,
'get_by_instance_uuid',
return_value=host_manager.objects.InstancePCIRequests(requests=[]))
def test_stat_consumption_from_instance(self, mock_pci_req):
host = host_manager.HostState("fakehost", "fakenode")
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.BUILDING,
task_state=task_states.SCHEDULING, os_type='Linux')
host.consume_from_instance(instance)
task_state=task_states.SCHEDULING, os_type='Linux',
uuid='fake-uuid')
host.consume_from_instance('fake-context', instance)
instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
project_id='12345', vm_state=vm_states.PAUSED,
task_state=None, os_type='Linux')
host.consume_from_instance(instance)
task_state=None, os_type='Linux',
uuid='fake-uuid')
host.consume_from_instance('fake-context', instance)
self.assertEqual(2, host.num_instances)
self.assertEqual(1, host.num_io_ops)