Merge "Support move ops with extended resource request"
This commit is contained in:
commit
e81211318a
|
@ -45,9 +45,8 @@ Extended resource request
|
|||
Since neutron 19.0.0 (Xena), neutron implements an extended resource request
|
||||
format via the the ``port-resource-request-groups`` neutron API extension. As
|
||||
of nova 24.0.0 (Xena), nova does not fully support the new extension. If the
|
||||
extension is enabled in neutron, then nova will reject server move
|
||||
operations, as well as interface attach operation. Admins should not enable
|
||||
this API extension in neutron.
|
||||
extension is enabled in neutron, then nova will reject interface attach
|
||||
operation. Admins should not enable this API extension in neutron.
|
||||
|
||||
Please note that Nova only supports the server create operation if every
|
||||
nova-compute service also upgraded to Xena version and the
|
||||
|
|
|
@ -72,9 +72,8 @@ Extended resource request
|
|||
Since neutron 19.0.0 (Xena), neutron implements an extended resource request
|
||||
format via the the ``port-resource-request-groups`` neutron API extension. As
|
||||
of nova 24.0.0 (Xena), Nova does not fully support the new extension. If the
|
||||
extension is enabled in neutron, then nova will reject server move operations,
|
||||
as well as interface attach operation. Admins should not enable this API
|
||||
extension in neutron.
|
||||
extension is enabled in neutron, then nova will reject interface attach
|
||||
operation. Admins should not enable this API extension in neutron.
|
||||
|
||||
Please note that Nova only supports the server create operation if every
|
||||
nova-compute service also upgraded to Xena version and the
|
||||
|
|
|
@ -26,7 +26,6 @@ from nova.compute import api as compute
|
|||
import nova.conf
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.network import neutron
|
||||
from nova.policies import evacuate as evac_policies
|
||||
from nova import utils
|
||||
|
||||
|
@ -40,7 +39,6 @@ class EvacuateController(wsgi.Controller):
|
|||
super(EvacuateController, self).__init__()
|
||||
self.compute_api = compute.API()
|
||||
self.host_api = compute.HostAPI()
|
||||
self.network_api = neutron.API()
|
||||
|
||||
def _get_on_shared_storage(self, req, evacuate_body):
|
||||
if api_version_request.is_supported(req, min_version='2.14'):
|
||||
|
@ -120,13 +118,6 @@ class EvacuateController(wsgi.Controller):
|
|||
msg = _("The target host can't be the same one.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
if self.network_api.instance_has_extended_resource_request(id):
|
||||
msg = _(
|
||||
"The evacuate server operation with port having extended "
|
||||
"resource request, like a port with both QoS minimum "
|
||||
"bandwidth and packet rate policies, is not yet supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
self.compute_api.evacuate(context, instance, host,
|
||||
on_shared_storage, password, force)
|
||||
|
@ -136,6 +127,7 @@ class EvacuateController(wsgi.Controller):
|
|||
except (
|
||||
exception.ComputeServiceInUse,
|
||||
exception.ForbiddenPortsWithAccelerator,
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except exception.ForbiddenWithAccelerators as e:
|
||||
|
|
|
@ -26,7 +26,6 @@ from nova.api import validation
|
|||
from nova.compute import api as compute
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.network import neutron
|
||||
from nova.policies import migrate_server as ms_policies
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -36,7 +35,6 @@ class MigrateServerController(wsgi.Controller):
|
|||
def __init__(self):
|
||||
super(MigrateServerController, self).__init__()
|
||||
self.compute_api = compute.API()
|
||||
self.network_api = neutron.API()
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((400, 403, 404, 409))
|
||||
|
@ -56,13 +54,6 @@ class MigrateServerController(wsgi.Controller):
|
|||
body['migrate'] is not None):
|
||||
host_name = body['migrate'].get('host')
|
||||
|
||||
if self.network_api.instance_has_extended_resource_request(id):
|
||||
msg = _(
|
||||
"The migrate server operation with port having extended "
|
||||
"resource request, like a port with both QoS minimum "
|
||||
"bandwidth and packet rate policies, is not yet supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
self.compute_api.resize(req.environ['nova.context'], instance,
|
||||
host_name=host_name)
|
||||
|
@ -81,9 +72,12 @@ class MigrateServerController(wsgi.Controller):
|
|||
'migrate', id)
|
||||
except exception.InstanceNotFound as e:
|
||||
raise exc.HTTPNotFound(explanation=e.format_message())
|
||||
except (exception.ComputeHostNotFound,
|
||||
exception.CannotMigrateToSameHost,
|
||||
exception.ForbiddenPortsWithAccelerator) as e:
|
||||
except (
|
||||
exception.ComputeHostNotFound,
|
||||
exception.CannotMigrateToSameHost,
|
||||
exception.ForbiddenPortsWithAccelerator,
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
@wsgi.response(202)
|
||||
|
@ -127,13 +121,6 @@ class MigrateServerController(wsgi.Controller):
|
|||
disk_over_commit = strutils.bool_from_string(disk_over_commit,
|
||||
strict=True)
|
||||
|
||||
if self.network_api.instance_has_extended_resource_request(id):
|
||||
msg = _(
|
||||
"The live migrate server operation with port having extended "
|
||||
"resource request, like a port with both QoS minimum "
|
||||
"bandwidth and packet rate policies, is not yet supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
self.compute_api.live_migrate(context, instance, block_migration,
|
||||
disk_over_commit, host, force,
|
||||
|
@ -164,7 +151,10 @@ class MigrateServerController(wsgi.Controller):
|
|||
raise exc.HTTPConflict(explanation=e.format_message())
|
||||
except exception.InstanceIsLocked as e:
|
||||
raise exc.HTTPConflict(explanation=e.format_message())
|
||||
except exception.ComputeHostNotFound as e:
|
||||
except (
|
||||
exception.ComputeHostNotFound,
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
)as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except exception.InstanceInvalidState as state_error:
|
||||
common.raise_http_conflict_for_instance_invalid_state(state_error,
|
||||
|
|
|
@ -40,7 +40,6 @@ from nova import context as nova_context
|
|||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.image import glance
|
||||
from nova.network import neutron
|
||||
from nova import objects
|
||||
from nova.policies import servers as server_policies
|
||||
from nova import utils
|
||||
|
@ -114,7 +113,6 @@ class ServersController(wsgi.Controller):
|
|||
def __init__(self):
|
||||
super(ServersController, self).__init__()
|
||||
self.compute_api = compute.API()
|
||||
self.network_api = neutron.API()
|
||||
|
||||
@wsgi.expected_errors((400, 403))
|
||||
@validation.query_schema(schema_servers.query_params_v275, '2.75')
|
||||
|
@ -1038,15 +1036,6 @@ class ServersController(wsgi.Controller):
|
|||
target={'user_id': instance.user_id,
|
||||
'project_id': instance.project_id})
|
||||
|
||||
if self.network_api.instance_has_extended_resource_request(
|
||||
instance_id
|
||||
):
|
||||
msg = _(
|
||||
"The resize server operation with port having extended "
|
||||
"resource request, like a port with both QoS minimum "
|
||||
"bandwidth and packet rate policies, is not yet supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
self.compute_api.resize(context, instance, flavor_id,
|
||||
auto_disk_config=auto_disk_config)
|
||||
|
@ -1073,11 +1062,14 @@ class ServersController(wsgi.Controller):
|
|||
msg = _("Image that the instance was started "
|
||||
"with could not be found.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except (exception.AutoDiskConfigDisabledByImage,
|
||||
exception.CannotResizeDisk,
|
||||
exception.CannotResizeToSameFlavor,
|
||||
exception.FlavorNotFound,
|
||||
exception.ForbiddenPortsWithAccelerator) as e:
|
||||
except (
|
||||
exception.AutoDiskConfigDisabledByImage,
|
||||
exception.CannotResizeDisk,
|
||||
exception.CannotResizeToSameFlavor,
|
||||
exception.FlavorNotFound,
|
||||
exception.ForbiddenPortsWithAccelerator,
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except INVALID_FLAVOR_IMAGE_EXCEPTIONS as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
|
|
@ -23,10 +23,7 @@ from nova.api.openstack.compute.schemas import shelve as shelve_schemas
|
|||
from nova.api.openstack import wsgi
|
||||
from nova.api import validation
|
||||
from nova.compute import api as compute
|
||||
from nova.compute import vm_states
|
||||
from nova import exception
|
||||
from nova.i18n import _
|
||||
from nova.network import neutron
|
||||
from nova.policies import shelve as shelve_policies
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -36,7 +33,6 @@ class ShelveController(wsgi.Controller):
|
|||
def __init__(self):
|
||||
super(ShelveController, self).__init__()
|
||||
self.compute_api = compute.API()
|
||||
self.network_api = neutron.API()
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((404, 403, 409, 400))
|
||||
|
@ -107,17 +103,6 @@ class ShelveController(wsgi.Controller):
|
|||
if support_az and unshelve_dict:
|
||||
new_az = unshelve_dict['availability_zone']
|
||||
|
||||
if (
|
||||
instance.vm_state == vm_states.SHELVED_OFFLOADED and
|
||||
self.network_api.instance_has_extended_resource_request(id)
|
||||
):
|
||||
msg = _(
|
||||
"The unshelve server operation on a shelve offloaded server "
|
||||
"with port having extended resource request, like a "
|
||||
"port with both QoS minimum bandwidth and packet rate "
|
||||
"policies, is not yet supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
try:
|
||||
self.compute_api.unshelve(context, instance, new_az=new_az)
|
||||
except (exception.InstanceIsLocked,
|
||||
|
@ -128,5 +113,8 @@ class ShelveController(wsgi.Controller):
|
|||
common.raise_http_conflict_for_instance_invalid_state(state_error,
|
||||
'unshelve',
|
||||
id)
|
||||
except exception.InvalidRequest as e:
|
||||
except (
|
||||
exception.InvalidRequest,
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
|
|
@ -70,6 +70,7 @@ from nova.objects import fields as fields_obj
|
|||
from nova.objects import image_meta as image_meta_obj
|
||||
from nova.objects import keypair as keypair_obj
|
||||
from nova.objects import quotas as quotas_obj
|
||||
from nova.objects import service as service_obj
|
||||
from nova.pci import request as pci_request
|
||||
from nova.policies import servers as servers_policies
|
||||
import nova.policy
|
||||
|
@ -111,6 +112,7 @@ SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD = 53
|
|||
SUPPORT_VNIC_TYPE_ACCELERATOR = 57
|
||||
|
||||
MIN_COMPUTE_BOOT_WITH_EXTENDED_RESOURCE_REQUEST = 58
|
||||
MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59
|
||||
|
||||
# FIXME(danms): Keep a global cache of the cells we find the
|
||||
# first time we look. This needs to be refreshed on a timer or
|
||||
|
@ -355,6 +357,20 @@ def block_port_accelerators():
|
|||
return inner
|
||||
|
||||
|
||||
def block_extended_resource_request(function):
|
||||
@functools.wraps(function)
|
||||
def inner(self, context, instance, *args, **kwargs):
|
||||
if self.network_api.instance_has_extended_resource_request(
|
||||
instance.uuid
|
||||
):
|
||||
version = service_obj.get_minimum_version_all_cells(
|
||||
context, ["nova-compute"])
|
||||
if version < MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST:
|
||||
raise exception.ExtendedResourceRequestOldCompute()
|
||||
return function(self, context, instance, *args, **kwargs)
|
||||
return inner
|
||||
|
||||
|
||||
@profiler.trace_cls("compute_api")
|
||||
class API:
|
||||
"""API for interacting with the compute manager."""
|
||||
|
@ -3806,16 +3822,18 @@ class API:
|
|||
# during the resize.
|
||||
if instance.get_network_info().has_port_with_allocation():
|
||||
# TODO(gibi): do not directly overwrite the
|
||||
# RequestSpec.requested_resources as others like cyborg might added
|
||||
# RequestSpec.requested_resources and
|
||||
# RequestSpec.request_level_paramsas others like cyborg might added
|
||||
# to things there already
|
||||
# NOTE(gibi): We need to collect the requested resource again as it
|
||||
# is intentionally not persisted in nova. Note that this needs to
|
||||
# be done here as the nova API code directly calls revert on the
|
||||
# dest compute service skipping the conductor.
|
||||
port_res_req = (
|
||||
port_res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid))
|
||||
reqspec.requested_resources = port_res_req
|
||||
reqspec.request_level_params = req_lvl_params
|
||||
|
||||
instance.task_state = task_states.RESIZE_REVERTING
|
||||
instance.save(expected_task_state=[None])
|
||||
|
@ -3946,8 +3964,11 @@ class API:
|
|||
min_compute_version, MIN_COMPUTE_CROSS_CELL_RESIZE)
|
||||
return False
|
||||
|
||||
if self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid):
|
||||
res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid)
|
||||
)
|
||||
if res_req:
|
||||
LOG.info(
|
||||
'Request is allowed by policy to perform cross-cell '
|
||||
'resize but the instance has ports with resource request '
|
||||
|
@ -4003,9 +4024,10 @@ class API:
|
|||
|
||||
# TODO(stephenfin): This logic would be so much easier to grok if we
|
||||
# finally split resize and cold migration into separate code paths
|
||||
@block_extended_resource_request
|
||||
@block_port_accelerators()
|
||||
# FIXME(sean-k-mooney): Cold migrate and resize to different hosts
|
||||
# probably works but they have not been tested so block them for now
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.RESIZE)
|
||||
@block_accelerators()
|
||||
@check_instance_lock
|
||||
|
@ -4340,6 +4362,7 @@ class API:
|
|||
"vol_zone": volume['availability_zone']}
|
||||
raise exception.MismatchVolumeAZException(reason=msg)
|
||||
|
||||
@block_extended_resource_request
|
||||
@check_instance_lock
|
||||
@check_instance_state(vm_state=[vm_states.SHELVED,
|
||||
vm_states.SHELVED_OFFLOADED])
|
||||
|
@ -5206,6 +5229,7 @@ class API:
|
|||
|
||||
return _metadata
|
||||
|
||||
@block_extended_resource_request
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.LIVE_MIGRATION)
|
||||
@block_accelerators()
|
||||
|
@ -5339,8 +5363,9 @@ class API:
|
|||
self.compute_rpcapi.live_migration_abort(context,
|
||||
instance, migration.id)
|
||||
|
||||
# FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
|
||||
@block_extended_resource_request
|
||||
@block_port_accelerators()
|
||||
# FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
|
||||
@reject_vdpa_instances(instance_actions.EVACUATE)
|
||||
@reject_vtpm_instances(instance_actions.EVACUATE)
|
||||
@block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD)
|
||||
|
|
|
@ -987,20 +987,21 @@ class ComputeTaskManager:
|
|||
# when populate_filter_properties accepts it
|
||||
filter_properties = request_spec.\
|
||||
to_legacy_filter_properties_dict()
|
||||
|
||||
external_resources = (
|
||||
res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid))
|
||||
context, instance.uuid)
|
||||
)
|
||||
extra_specs = request_spec.flavor.extra_specs
|
||||
device_profile = extra_specs.get('accel:device_profile')
|
||||
external_resources.extend(
|
||||
res_req.extend(
|
||||
cyborg.get_device_profile_request_groups(
|
||||
context, device_profile) if device_profile else [])
|
||||
# NOTE(gibi): When other modules want to handle similar
|
||||
# non-nova resources then here we have to collect all
|
||||
# the external resource requests in a single list and
|
||||
# add them to the RequestSpec.
|
||||
request_spec.requested_resources = external_resources
|
||||
request_spec.requested_resources = res_req
|
||||
request_spec.request_level_params = req_lvl_params
|
||||
|
||||
# NOTE(cfriesen): Ensure that we restrict the scheduler to
|
||||
# the cell specified by the instance mapping.
|
||||
|
@ -1184,14 +1185,13 @@ class ComputeTaskManager:
|
|||
# if we want to make sure that the next destination
|
||||
# is not forced to be the original host
|
||||
request_spec.reset_forced_destinations()
|
||||
|
||||
external_resources = []
|
||||
external_resources += (
|
||||
res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
context, instance.uuid))
|
||||
context, instance.uuid)
|
||||
)
|
||||
extra_specs = request_spec.flavor.extra_specs
|
||||
device_profile = extra_specs.get('accel:device_profile')
|
||||
external_resources.extend(
|
||||
res_req.extend(
|
||||
cyborg.get_device_profile_request_groups(
|
||||
context, device_profile)
|
||||
if device_profile else [])
|
||||
|
@ -1199,7 +1199,8 @@ class ComputeTaskManager:
|
|||
# non-nova resources then here we have to collect all
|
||||
# the external resource requests in a single list and
|
||||
# add them to the RequestSpec.
|
||||
request_spec.requested_resources = external_resources
|
||||
request_spec.requested_resources = res_req
|
||||
request_spec.request_level_params = req_lvl_params
|
||||
|
||||
try:
|
||||
# if this is a rebuild of instance on the same host with
|
||||
|
|
|
@ -372,16 +372,13 @@ class LiveMigrationTask(base.TaskBase):
|
|||
self._update_migrate_vifs_from_bindings(self.migrate_data.vifs,
|
||||
bindings)
|
||||
|
||||
@staticmethod
|
||||
def _get_port_profile_from_provider_mapping(port_id, provider_mappings):
|
||||
if port_id in provider_mappings:
|
||||
# NOTE(gibi): In the resource provider mapping there can be
|
||||
# more than one RP fulfilling a request group. But resource
|
||||
# requests of a Neutron port is always mapped to a
|
||||
# numbered request group that is always fulfilled by one
|
||||
# resource provider. So we only pass that single RP UUID
|
||||
# here.
|
||||
return {'allocation': provider_mappings[port_id][0]}
|
||||
def _get_port_profile_from_provider_mapping(
|
||||
self, port_id, provider_mappings
|
||||
):
|
||||
allocation = self.network_api.get_binding_profile_allocation(
|
||||
self.context, port_id, provider_mappings)
|
||||
if allocation:
|
||||
return {'allocation': allocation}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
@ -476,7 +473,7 @@ class LiveMigrationTask(base.TaskBase):
|
|||
# is not forced to be the original host
|
||||
request_spec.reset_forced_destinations()
|
||||
|
||||
port_res_req = (
|
||||
port_res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
self.context, self.instance.uuid))
|
||||
# NOTE(gibi): When cyborg or other module wants to handle
|
||||
|
@ -484,6 +481,7 @@ class LiveMigrationTask(base.TaskBase):
|
|||
# all the external resource requests in a single list and
|
||||
# add them to the RequestSpec.
|
||||
request_spec.requested_resources = port_res_req
|
||||
request_spec.request_level_params = req_lvl_params
|
||||
|
||||
scheduler_utils.setup_instance_group(self.context, request_spec)
|
||||
|
||||
|
|
|
@ -249,12 +249,15 @@ class MigrationTask(base.TaskBase):
|
|||
scheduler_utils.populate_retry(legacy_props,
|
||||
self.instance.uuid)
|
||||
|
||||
port_res_req = self.network_api.get_requested_resource_for_instance(
|
||||
self.context, self.instance.uuid)
|
||||
port_res_req, req_lvl_params = (
|
||||
self.network_api.get_requested_resource_for_instance(
|
||||
self.context, self.instance.uuid)
|
||||
)
|
||||
# NOTE(gibi): When cyborg or other module wants to handle similar
|
||||
# non-nova resources then here we have to collect all the external
|
||||
# resource requests in a single list and add them to the RequestSpec.
|
||||
self.request_spec.requested_resources = port_res_req
|
||||
self.request_spec.request_level_params = req_lvl_params
|
||||
|
||||
self._set_requested_destination_cell(legacy_props)
|
||||
|
||||
|
|
|
@ -1027,6 +1027,33 @@ class API:
|
|||
return True
|
||||
return False
|
||||
|
||||
def get_binding_profile_allocation(
|
||||
self,
|
||||
context: nova_context.RequestContext,
|
||||
port_id: str,
|
||||
resource_provider_mapping: ty.Dict[str, ty.List[str]],
|
||||
) -> ty.Union[None, str, ty.Dict[str, str]]:
|
||||
"""Calculate the value of the allocation key of the binding:profile
|
||||
based on the allocated resources.
|
||||
|
||||
:param context: the request context
|
||||
:param port_id: the uuid of the neutron port
|
||||
:param resource_provider_mapping: the mapping returned by the placement
|
||||
defining which request group get allocated from which resource
|
||||
providers
|
||||
:returns: None if the port has no resource request. Returns a single
|
||||
RP UUID if the port has a legacy resource request. Returns a dict
|
||||
of request group id: resource provider UUID mapping if the port has
|
||||
an extended resource request.
|
||||
"""
|
||||
neutron = get_client(context)
|
||||
port = self._show_port(context, port_id, neutron_client=neutron)
|
||||
if self._has_resource_request(context, port, neutron):
|
||||
return self._get_binding_profile_allocation(
|
||||
context, port, neutron, resource_provider_mapping)
|
||||
else:
|
||||
return None
|
||||
|
||||
def _get_binding_profile_allocation(
|
||||
self, context, port, neutron, resource_provider_mapping
|
||||
):
|
||||
|
@ -1698,7 +1725,7 @@ class API:
|
|||
neutron port only allocates from a single resource provider.
|
||||
"""
|
||||
neutron = get_client(context)
|
||||
port_allocation = {}
|
||||
port_allocation: ty.Dict = {}
|
||||
try:
|
||||
# NOTE(gibi): we need to read the port resource information from
|
||||
# neutron here as we might delete the port below
|
||||
|
@ -2378,12 +2405,18 @@ class API:
|
|||
raise exception.NetworkNotFound(network_id=id_str)
|
||||
return ports_needed_per_instance
|
||||
|
||||
def get_requested_resource_for_instance(self, context, instance_uuid):
|
||||
def get_requested_resource_for_instance(
|
||||
self,
|
||||
context: nova_context.RequestContext,
|
||||
instance_uuid: str
|
||||
) -> ty.Tuple[
|
||||
ty.List['objects.RequestGroup'], 'objects.RequestLevelParams']:
|
||||
"""Collect resource requests from the ports associated to the instance
|
||||
|
||||
:param context: nova request context
|
||||
:param instance_uuid: The UUID of the instance
|
||||
:return: A list of RequestGroup objects
|
||||
:return: A two tuple with a list of RequestGroup objects and a
|
||||
RequestLevelParams object.
|
||||
"""
|
||||
|
||||
# NOTE(gibi): We need to use an admin client as otherwise a non admin
|
||||
|
@ -2393,19 +2426,33 @@ class API:
|
|||
neutron = get_client(context, admin=True)
|
||||
# get the ports associated to this instance
|
||||
data = neutron.list_ports(
|
||||
device_id=instance_uuid, fields=['id', 'resource_request'])
|
||||
device_id=instance_uuid, fields=['id', constants.RESOURCE_REQUEST])
|
||||
resource_requests = []
|
||||
request_level_params = objects.RequestLevelParams()
|
||||
extended_rr = self.has_extended_resource_request_extension(
|
||||
context, neutron)
|
||||
|
||||
for port in data.get('ports', []):
|
||||
if port.get('resource_request'):
|
||||
# NOTE(gibi): explicitly orphan the RequestGroup by setting
|
||||
# context=None as we never intended to save it to the DB.
|
||||
resource_requests.append(
|
||||
objects.RequestGroup.from_port_request(
|
||||
context=None, port_uuid=port['id'],
|
||||
resource_request = port.get(constants.RESOURCE_REQUEST)
|
||||
if extended_rr and resource_request:
|
||||
resource_requests.extend(
|
||||
objects.RequestGroup.from_extended_port_request(
|
||||
context=None,
|
||||
port_resource_request=port['resource_request']))
|
||||
request_level_params.extend_with(
|
||||
objects.RequestLevelParams.from_port_request(
|
||||
port_resource_request=resource_request))
|
||||
else:
|
||||
# keep supporting the old format of the resource_request
|
||||
if resource_request:
|
||||
# NOTE(gibi): explicitly orphan the RequestGroup by setting
|
||||
# context=None as we never intended to save it to the DB.
|
||||
resource_requests.append(
|
||||
objects.RequestGroup.from_port_request(
|
||||
context=None, port_uuid=port['id'],
|
||||
port_resource_request=port['resource_request']))
|
||||
|
||||
return resource_requests
|
||||
return resource_requests, request_level_params
|
||||
|
||||
def validate_networks(self, context, requested_networks, num_instances):
|
||||
"""Validate that the tenant can use the requested networks.
|
||||
|
@ -3552,7 +3599,7 @@ class API:
|
|||
# allocation key in the port binding. However during resize, cold
|
||||
# migrate, evacuate and unshelve we have to set the binding here.
|
||||
# Also note that during unshelve no migration object is created.
|
||||
if p.get('resource_request') and (
|
||||
if self._has_resource_request(context, p, neutron) and (
|
||||
migration is None or not migration.is_live_migration
|
||||
):
|
||||
if not provider_mappings:
|
||||
|
@ -3579,13 +3626,9 @@ class API:
|
|||
"compute service but are required for ports with "
|
||||
"a resource request."))
|
||||
|
||||
# NOTE(gibi): In the resource provider mapping there can be
|
||||
# more than one RP fulfilling a request group. But resource
|
||||
# requests of a Neutron port is always mapped to a
|
||||
# numbered request group that is always fulfilled by one
|
||||
# resource provider. So we only pass that single RP UUID here.
|
||||
binding_profile[constants.ALLOCATION] = \
|
||||
provider_mappings[p['id']][0]
|
||||
binding_profile[constants.ALLOCATION] = (
|
||||
self._get_binding_profile_allocation(
|
||||
context, p, neutron, provider_mappings))
|
||||
updates[constants.BINDING_PROFILE] = binding_profile
|
||||
|
||||
port_updates.append((p['id'], updates))
|
||||
|
|
|
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
|
|||
|
||||
|
||||
# NOTE(danms): This is the global service version counter
|
||||
SERVICE_VERSION = 58
|
||||
SERVICE_VERSION = 59
|
||||
|
||||
|
||||
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
|
||||
|
@ -205,6 +205,10 @@ SERVICE_VERSION_HISTORY = (
|
|||
# Version 58: Compute RPC v6.0:
|
||||
# Add support for booting with neutron extended resource request
|
||||
{'compute_rpc': '6.0'},
|
||||
# Version 59: Compute RPC v6.0:
|
||||
# Add support for server move operations with neutron extended resource
|
||||
# request
|
||||
{'compute_rpc': '6.0'},
|
||||
)
|
||||
|
||||
# This is used to raise an error at service startup if older than N-1 computes
|
||||
|
|
|
@ -2567,8 +2567,6 @@ class ServerMoveWithPortResourceRequestTest(
|
|||
server, qos_normal_port, qos_sriov_port)
|
||||
|
||||
|
||||
# TODO(gibi): This would show a lot of failing tests. Disable it for now
|
||||
@unittest.skip
|
||||
class ServerMoveWithMultiGroupResourceRequestBasedSchedulingTest(
|
||||
ExtendedPortResourceRequestBasedSchedulingTestBase,
|
||||
ServerMoveWithPortResourceRequestTest,
|
||||
|
@ -2583,15 +2581,6 @@ class ServerMoveWithMultiGroupResourceRequestBasedSchedulingTest(
|
|||
super().setUp()
|
||||
self.neutron = self.useFixture(
|
||||
MultiGroupResourceRequestNeutronFixture(self))
|
||||
# Turn off the blanket rejections of the extended resource request.
|
||||
# This test class wants to prove that the extended resource request is
|
||||
# supported.
|
||||
patcher = mock.patch(
|
||||
'nova.network.neutron.API.instance_has_extended_resource_request',
|
||||
return_value=False,
|
||||
)
|
||||
self.addCleanup(patcher.stop)
|
||||
patcher.start()
|
||||
|
||||
|
||||
class LiveMigrateAbortWithPortResourceRequestTest(
|
||||
|
@ -2881,14 +2870,16 @@ class ExtendedResourceRequestOldCompute(
|
|||
hasn't been upgraded to a version that support extended resource request.
|
||||
So nova rejects the operations due to the old compute.
|
||||
"""
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.neutron = self.useFixture(
|
||||
ExtendedResourceRequestNeutronFixture(self))
|
||||
|
||||
@mock.patch.object(
|
||||
objects.service, 'get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=57)
|
||||
)
|
||||
def test_boot(self):
|
||||
self.neutron = self.useFixture(
|
||||
ExtendedResourceRequestNeutronFixture(self))
|
||||
ex = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._create_server,
|
||||
|
@ -2903,6 +2894,63 @@ class ExtendedResourceRequestOldCompute(
|
|||
str(ex)
|
||||
)
|
||||
|
||||
@mock.patch.object(
|
||||
objects.service, 'get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=58)
|
||||
)
|
||||
def _test_operation(self, op_callable):
|
||||
# boot a server, service version 58 already supports that
|
||||
server = self._create_server(
|
||||
flavor=self.flavor,
|
||||
networks=[{'port': self.neutron.port_with_resource_request['id']}],
|
||||
)
|
||||
self._wait_for_state_change(server, 'ACTIVE')
|
||||
|
||||
# still the move operations require service version 58 so they will
|
||||
# fail
|
||||
ex = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
op_callable,
|
||||
server,
|
||||
)
|
||||
self.assertEqual(400, ex.response.status_code)
|
||||
self.assertIn(
|
||||
'The port-resource-request-groups neutron API extension is not '
|
||||
'supported by old nova compute service. Upgrade your compute '
|
||||
'services to Xena (24.0.0) or later.',
|
||||
str(ex)
|
||||
)
|
||||
|
||||
def test_resize(self):
|
||||
self._test_operation(
|
||||
lambda server: self._resize_server(
|
||||
server, self.flavor_with_group_policy['id']
|
||||
)
|
||||
)
|
||||
|
||||
def test_migrate(self):
|
||||
self._test_operation(
|
||||
lambda server: self._migrate_server(server),
|
||||
)
|
||||
|
||||
def test_live_migrate(self):
|
||||
self._test_operation(
|
||||
lambda server: self._live_migrate(server),
|
||||
)
|
||||
|
||||
def test_evacuate(self):
|
||||
self._test_operation(
|
||||
lambda server: self._evacuate_server(server),
|
||||
)
|
||||
|
||||
def test_unshelve_after_shelve_offload(self):
|
||||
def shelve_offload_then_unshelve(server):
|
||||
self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
|
||||
self._unshelve_server(server)
|
||||
self._test_operation(
|
||||
lambda server: shelve_offload_then_unshelve(server),
|
||||
)
|
||||
|
||||
|
||||
class ExtendedResourceRequestTempNegativeTest(
|
||||
PortResourceRequestBasedSchedulingTestBase):
|
||||
|
@ -2943,32 +2991,6 @@ class ExtendedResourceRequestTempNegativeTest(
|
|||
str(ex)
|
||||
)
|
||||
|
||||
def test_resize(self):
|
||||
self._test_operation(
|
||||
'resize server operation',
|
||||
lambda server: self._resize_server(
|
||||
server, self.flavor_with_group_policy['id']
|
||||
)
|
||||
)
|
||||
|
||||
def test_migrate(self):
|
||||
self._test_operation(
|
||||
'migrate server operation',
|
||||
lambda server: self._migrate_server(server),
|
||||
)
|
||||
|
||||
def test_live_migrate(self):
|
||||
self._test_operation(
|
||||
'live migrate server operation',
|
||||
lambda server: self._live_migrate(server),
|
||||
)
|
||||
|
||||
def test_evacuate(self):
|
||||
self._test_operation(
|
||||
'evacuate server operation',
|
||||
lambda server: self._evacuate_server(server),
|
||||
)
|
||||
|
||||
def test_interface_attach(self):
|
||||
self._test_operation(
|
||||
'interface attach server operation',
|
||||
|
@ -2977,14 +2999,3 @@ class ExtendedResourceRequestTempNegativeTest(
|
|||
self.neutron.port_with_sriov_resource_request['id'],
|
||||
),
|
||||
)
|
||||
|
||||
def test_unshelve_after_shelve_offload(self):
|
||||
|
||||
def shelve_offload_then_unshelve(server):
|
||||
self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
|
||||
self._unshelve_server(server)
|
||||
|
||||
self._test_operation(
|
||||
'unshelve server operation on a shelve offloaded server',
|
||||
lambda server: shelve_offload_then_unshelve(server),
|
||||
)
|
||||
|
|
|
@ -86,6 +86,14 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.context = context.RequestContext(self.user_id,
|
||||
self.project_id)
|
||||
|
||||
self.useFixture(
|
||||
fixtures.MonkeyPatch(
|
||||
'nova.network.neutron.API.'
|
||||
'instance_has_extended_resource_request',
|
||||
mock.Mock(return_value=False),
|
||||
)
|
||||
)
|
||||
|
||||
def _get_vm_states(self, exclude_states=None):
|
||||
vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
|
||||
vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
|
||||
|
@ -1762,7 +1770,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
|
||||
@mock.patch('nova.virt.hardware.numa_get_constraints')
|
||||
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
return_value=([], objects.RequestLevelParams()))
|
||||
@mock.patch('nova.availability_zones.get_host_availability_zone',
|
||||
return_value='nova')
|
||||
@mock.patch('nova.objects.Quotas.check_deltas')
|
||||
|
@ -1854,6 +1862,9 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.assertEqual(
|
||||
[],
|
||||
mock_get_reqspec.return_value.requested_resources)
|
||||
self.assertEqual(
|
||||
mock_get_requested_resources.return_value[1],
|
||||
mock_get_reqspec.return_value.request_level_params)
|
||||
|
||||
def test_revert_resize(self):
|
||||
self._test_revert_resize(same_flavor=False)
|
||||
|
@ -2189,6 +2200,27 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.compute_api.resize,
|
||||
self.context, fake_inst, flavor_id=new_flavor.flavorid)
|
||||
|
||||
@mock.patch(
|
||||
'nova.compute.api.API.get_instance_host_status',
|
||||
new=mock.Mock(return_value=fields_obj.HostStatus.UP)
|
||||
)
|
||||
@mock.patch(
|
||||
'nova.objects.service.get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=58),
|
||||
)
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.instance_has_extended_resource_request',
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
def test_resize_with_extended_resource_request_old_compute(self):
|
||||
fake_inst = self._create_instance_obj()
|
||||
|
||||
self.assertRaises(
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
self.compute_api.resize,
|
||||
self.context, fake_inst, flavor_id=uuids.new_falvor
|
||||
)
|
||||
|
||||
def _test_migrate(self, *args, **kwargs):
|
||||
self._test_resize(*args, flavor_id_passed=False, **kwargs)
|
||||
|
||||
|
@ -2618,6 +2650,23 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
disk_over_commit=False)
|
||||
self.assertIsNone(instance.task_state)
|
||||
|
||||
@mock.patch(
|
||||
'nova.objects.service.get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=58),
|
||||
)
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.instance_has_extended_resource_request',
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
def test_live_migrate_with_extended_resource_request_old_compute(self):
|
||||
instance = self._create_instance_obj()
|
||||
self.assertRaises(
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
self.compute_api.live_migrate, self.context, instance,
|
||||
host_name='fake_host', block_migration='auto',
|
||||
disk_over_commit=False
|
||||
)
|
||||
|
||||
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
|
||||
@mock.patch.object(objects.Instance, 'save')
|
||||
@mock.patch.object(objects.InstanceAction, 'action_start')
|
||||
|
@ -7706,8 +7755,10 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
|||
mock_get_min_ver.assert_called_once_with(
|
||||
self.context, ['nova-compute'])
|
||||
|
||||
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance',
|
||||
return_value=[objects.RequestGroup()])
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.get_requested_resource_for_instance',
|
||||
return_value=([objects.RequestGroup()], objects.RequestLevelParams())
|
||||
)
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=compute_api.MIN_COMPUTE_CROSS_CELL_RESIZE)
|
||||
def test_allow_cross_cell_resize_false_port_with_resource_req(
|
||||
|
@ -7726,8 +7777,10 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
|||
self.context, ['nova-compute'])
|
||||
mock_get_res_req.assert_called_once_with(self.context, uuids.instance)
|
||||
|
||||
@mock.patch('nova.network.neutron.API.get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.get_requested_resource_for_instance',
|
||||
return_value=([], objects.RequestLevelParams())
|
||||
)
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=compute_api.MIN_COMPUTE_CROSS_CELL_RESIZE)
|
||||
def test_allow_cross_cell_resize_true(
|
||||
|
|
|
@ -12035,6 +12035,21 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
host='fake_dest_host', on_shared_storage=True,
|
||||
admin_password=None)
|
||||
|
||||
@mock.patch(
|
||||
'nova.objects.service.get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=58),
|
||||
)
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.instance_has_extended_resource_request',
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
def test_evacuate_with_extended_resource_request_old_compute(self):
|
||||
instance = self._create_fake_instance_obj(services=True)
|
||||
self.assertRaises(exception.ExtendedResourceRequestOldCompute,
|
||||
self.compute_api.evacuate, self.context.elevated(), instance,
|
||||
host='fake_dest_host', on_shared_storage=True,
|
||||
admin_password=None)
|
||||
|
||||
@mock.patch('nova.objects.MigrationList.get_by_filters')
|
||||
def test_get_migrations(self, mock_migration):
|
||||
migration = test_migration.fake_db_migration()
|
||||
|
|
|
@ -1028,6 +1028,25 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
|
|||
self.compute_api.unshelve,
|
||||
self.context, instance, new_az=new_az)
|
||||
|
||||
@mock.patch(
|
||||
'nova.objects.service.get_minimum_version_all_cells',
|
||||
new=mock.Mock(return_value=58),
|
||||
)
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.instance_has_extended_resource_request',
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
def test_unshelve_offloaded_with_extended_resource_request_old_compute(
|
||||
self
|
||||
):
|
||||
instance = self._get_specify_state_instance(
|
||||
vm_states.SHELVED_OFFLOADED)
|
||||
|
||||
self.assertRaises(
|
||||
exception.ExtendedResourceRequestOldCompute,
|
||||
self.compute_api.unshelve, self.context, instance, new_az="az1"
|
||||
)
|
||||
|
||||
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid',
|
||||
new_callable=mock.NonCallableMock)
|
||||
@mock.patch('nova.availability_zones.get_availability_zones')
|
||||
|
|
|
@ -78,7 +78,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
_p = mock.patch(
|
||||
'nova.network.neutron.API.'
|
||||
'get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
return_value=([], objects.RequestLevelParams()))
|
||||
self.mock_get_res_req = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
|
@ -600,8 +600,11 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
self.assertRaises(exception.MigrationPreCheckError,
|
||||
self.task._call_livem_checks_on_host, {}, {})
|
||||
|
||||
@mock.patch('nova.network.neutron.API.get_binding_profile_allocation')
|
||||
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
|
||||
def test_bind_ports_on_destination_merges_profiles(self, mock_bind_ports):
|
||||
def test_bind_ports_on_destination_merges_profiles(
|
||||
self, mock_bind_ports, mock_get_binding_profile_alloc
|
||||
):
|
||||
"""Assert that if both the migration_data and the provider mapping
|
||||
contains binding profile related information then such information is
|
||||
merged in the resulting profile.
|
||||
|
@ -615,6 +618,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
{'some-key': 'value'}))
|
||||
])
|
||||
provider_mappings = {uuids.port1: [uuids.dest_bw_rp]}
|
||||
mock_get_binding_profile_alloc.return_value = uuids.dest_bw_rp
|
||||
|
||||
self.task._bind_ports_on_destination('dest-host', provider_mappings)
|
||||
|
||||
|
@ -623,7 +627,59 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
vnic_types=None,
|
||||
port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp,
|
||||
'some-key': 'value'}})
|
||||
mock_get_binding_profile_alloc.assert_called_once_with(
|
||||
self.context, uuids.port1, provider_mappings)
|
||||
|
||||
@mock.patch('nova.network.neutron.API.get_binding_profile_allocation')
|
||||
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
|
||||
def test_bind_ports_on_destination_merges_profiles_extended_res_req(
|
||||
self, mock_bind_ports, mock_get_binding_profile_alloc
|
||||
):
|
||||
"""Assert that if both the migration_data and the provider mapping
|
||||
contains binding profile related information and the port has extended
|
||||
resource request then such information is merged in the resulting
|
||||
profile.
|
||||
"""
|
||||
|
||||
self.task.migrate_data = objects.LibvirtLiveMigrateData(
|
||||
vifs=[
|
||||
objects.VIFMigrateData(
|
||||
port_id=uuids.port1,
|
||||
profile_json=jsonutils.dumps(
|
||||
{'some-key': 'value'}))
|
||||
])
|
||||
provider_mappings = {
|
||||
uuids.bw_group: [uuids.dest_bw_rp],
|
||||
uuids.pps_group: [uuids.dest_pps_rp],
|
||||
uuids.accel_group: [uuids.cyborg_rp],
|
||||
}
|
||||
mock_get_binding_profile_alloc.return_value = {
|
||||
uuids.bw_group: uuids.dest_bw_rp,
|
||||
uuids.pps_group: uuids.dest_pps_rp,
|
||||
}
|
||||
|
||||
self.task._bind_ports_on_destination('dest-host', provider_mappings)
|
||||
|
||||
mock_bind_ports.assert_called_once_with(
|
||||
context=self.context, instance=self.instance, host='dest-host',
|
||||
vnic_types=None,
|
||||
port_profiles={
|
||||
uuids.port1: {
|
||||
'allocation': {
|
||||
uuids.bw_group: uuids.dest_bw_rp,
|
||||
uuids.pps_group: uuids.dest_pps_rp,
|
||||
},
|
||||
'some-key': 'value'
|
||||
}
|
||||
}
|
||||
)
|
||||
mock_get_binding_profile_alloc.assert_called_once_with(
|
||||
self.context, uuids.port1, provider_mappings)
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.get_binding_profile_allocation',
|
||||
new=mock.Mock(return_value=None)
|
||||
)
|
||||
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
|
||||
def test_bind_ports_on_destination_migration_data(self, mock_bind_ports):
|
||||
"""Assert that if only the migration_data contains binding profile
|
||||
|
@ -646,8 +702,11 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
vnic_types=None,
|
||||
port_profiles={uuids.port1: {'some-key': 'value'}})
|
||||
|
||||
@mock.patch('nova.network.neutron.API.get_binding_profile_allocation')
|
||||
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
|
||||
def test_bind_ports_on_destination_provider_mapping(self, mock_bind_ports):
|
||||
def test_bind_ports_on_destination_provider_mapping(
|
||||
self, mock_bind_ports, mock_get_binding_profile_alloc
|
||||
):
|
||||
"""Assert that if only the provider mapping contains binding
|
||||
profile related information then that is sent to neutron.
|
||||
"""
|
||||
|
@ -658,6 +717,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
port_id=uuids.port1)
|
||||
])
|
||||
provider_mappings = {uuids.port1: [uuids.dest_bw_rp]}
|
||||
mock_get_binding_profile_alloc.return_value = uuids.dest_bw_rp
|
||||
|
||||
self.task._bind_ports_on_destination('dest-host', provider_mappings)
|
||||
|
||||
|
@ -665,6 +725,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
context=self.context, instance=self.instance, host='dest-host',
|
||||
vnic_types=None,
|
||||
port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp}})
|
||||
mock_get_binding_profile_alloc.assert_called_once_with(
|
||||
self.context, uuids.port1, provider_mappings)
|
||||
|
||||
@mock.patch(
|
||||
'nova.compute.utils.'
|
||||
|
@ -682,7 +744,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
|||
self, mock_setup, mock_reset, mock_select, mock_check, mock_call,
|
||||
mock_fill_provider_mapping, mock_update_pci_req):
|
||||
resource_req = [objects.RequestGroup(requester_id=uuids.port_id)]
|
||||
self.mock_get_res_req.return_value = resource_req
|
||||
self.mock_get_res_req.return_value = (
|
||||
resource_req, objects.RequestLevelParams())
|
||||
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
|
||||
|
||||
self.assertEqual(("host1", "node1", fake_limits1),
|
||||
|
|
|
@ -98,7 +98,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
|||
gbf_mock.return_value = objects.MigrationList()
|
||||
mock_get_resources = \
|
||||
self.mock_network_api.get_requested_resource_for_instance
|
||||
mock_get_resources.return_value = []
|
||||
mock_get_resources.return_value = ([], objects.RequestLevelParams())
|
||||
|
||||
if requested_destination:
|
||||
self.request_spec.requested_destination = objects.Destination(
|
||||
|
@ -182,6 +182,10 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
|||
mock_get_resources.assert_called_once_with(
|
||||
self.context, self.instance.uuid)
|
||||
self.assertEqual([], self.request_spec.requested_resources)
|
||||
self.assertEqual(
|
||||
mock_get_resources.return_value[1],
|
||||
self.request_spec.request_level_params,
|
||||
)
|
||||
|
||||
def test_execute(self):
|
||||
self._test_execute()
|
||||
|
@ -222,7 +226,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
|||
mock_gbf.return_value = objects.MigrationList()
|
||||
mock_get_resources = \
|
||||
self.mock_network_api.get_requested_resource_for_instance
|
||||
mock_get_resources.return_value = []
|
||||
mock_get_resources.return_value = ([], objects.RequestLevelParams())
|
||||
|
||||
# We just need this hook point to set a uuid on the
|
||||
# migration before we use it for teardown
|
||||
|
|
|
@ -1635,12 +1635,16 @@ class _BaseTaskTestCase(object):
|
|||
mock_schedule.return_value = [[selection]]
|
||||
|
||||
res_req = [objects.RequestGroup()]
|
||||
mock_get_res_req.return_value = res_req
|
||||
mock_get_res_req.return_value = (res_req, objects.RequestLevelParams())
|
||||
|
||||
self.conductor_manager.unshelve_instance(
|
||||
self.context, instance, request_spec)
|
||||
|
||||
self.assertEqual(res_req, request_spec.requested_resources)
|
||||
self.assertEqual(
|
||||
mock_get_res_req.return_value[1],
|
||||
request_spec.request_level_params,
|
||||
)
|
||||
|
||||
mock_get_res_req.assert_called_once_with(self.context, instance.uuid)
|
||||
mock_schedule.assert_called_once_with(
|
||||
|
@ -1673,7 +1677,7 @@ class _BaseTaskTestCase(object):
|
|||
limits=None)
|
||||
mock_schedule.return_value = [[selection]]
|
||||
|
||||
mock_get_res_req.return_value = []
|
||||
mock_get_res_req.return_value = ([], objects.RequestLevelParams())
|
||||
|
||||
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
|
||||
objects.RequestGroup(requester_id='deviceprofile3')]
|
||||
|
@ -1684,6 +1688,10 @@ class _BaseTaskTestCase(object):
|
|||
self.context, instance, request_spec)
|
||||
|
||||
self.assertEqual(dp_groups, request_spec.requested_resources)
|
||||
self.assertEqual(
|
||||
mock_get_res_req.return_value[1],
|
||||
request_spec.request_level_params,
|
||||
)
|
||||
|
||||
mock_get_res_req.assert_called_once_with(self.context, instance.uuid)
|
||||
mock_schedule.assert_called_once_with(
|
||||
|
@ -1719,7 +1727,7 @@ class _BaseTaskTestCase(object):
|
|||
dp_groups = [objects.RequestGroup(requester_id='deviceprofile2'),
|
||||
objects.RequestGroup(requester_id='deviceprofile3')]
|
||||
mock_get_dp.return_value = dp_groups
|
||||
mock_get_res_req.return_value = []
|
||||
mock_get_res_req.return_value = ([], objects.RequestLevelParams())
|
||||
arqs = ["fake-arq-uuid"]
|
||||
mock_get_arqs.side_effect = exc.AcceleratorRequestBindingFailed(
|
||||
arqs=arqs, msg='')
|
||||
|
@ -2016,7 +2024,7 @@ class _BaseTaskTestCase(object):
|
|||
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
|
||||
mock.patch('nova.network.neutron.API.'
|
||||
'get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
return_value=([], objects.RequestLevelParams()))
|
||||
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd,
|
||||
fill_rp_mapping_mock, get_req_res_mock):
|
||||
self.conductor_manager.rebuild_instance(context=self.context,
|
||||
|
@ -2086,7 +2094,7 @@ class _BaseTaskTestCase(object):
|
|||
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
|
||||
mock.patch('nova.network.neutron.API.'
|
||||
'get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
return_value=([], objects.RequestLevelParams()))
|
||||
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd,
|
||||
fill_rp_mapping_mock, get_req_res_mock):
|
||||
self.conductor_manager.rebuild_instance(context=self.context,
|
||||
|
@ -2160,7 +2168,7 @@ class _BaseTaskTestCase(object):
|
|||
mock.patch('nova.scheduler.utils.fill_provider_mapping'),
|
||||
mock.patch('nova.network.neutron.API.'
|
||||
'get_requested_resource_for_instance',
|
||||
return_value=[])
|
||||
return_value=([], objects.RequestLevelParams()))
|
||||
) as (sig_mock, select_dest_mock, reset_fd,
|
||||
fill_rp_mapping_mock, get_req_res_mock):
|
||||
ex = self.assertRaises(exc.AcceleratorRequestBindingFailed,
|
||||
|
|
|
@ -4370,9 +4370,14 @@ class TestAPI(TestAPIBase):
|
|||
mock_client.update_port.assert_called_once_with('fake-port-id',
|
||||
port_req_body)
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_migration_profile(
|
||||
self, get_client_mock):
|
||||
self, get_client_mock
|
||||
):
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
self.api._has_port_binding_extension = mock.Mock(return_value=True)
|
||||
|
||||
|
@ -4406,9 +4411,14 @@ class TestAPI(TestAPIBase):
|
|||
constants.BINDING_PROFILE: {
|
||||
'fake_profile': 'fake_data'}}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_binding_profile_none(
|
||||
self, get_client_mock):
|
||||
self, get_client_mock
|
||||
):
|
||||
"""Tests _update_port_binding_for_instance when the binding:profile
|
||||
value is None.
|
||||
"""
|
||||
|
@ -4435,9 +4445,14 @@ class TestAPI(TestAPIBase):
|
|||
'compute:%s' %
|
||||
instance.availability_zone}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_same_host(self,
|
||||
get_client_mock):
|
||||
def test_update_port_bindings_for_instance_same_host(
|
||||
self, get_client_mock
|
||||
):
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
|
||||
# We test two ports, one with the same host as the host passed in and
|
||||
|
@ -4460,6 +4475,10 @@ class TestAPI(TestAPIBase):
|
|||
'device_owner': 'compute:%s' %
|
||||
instance.availability_zone}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_pci(self,
|
||||
|
@ -4567,6 +4586,10 @@ class TestAPI(TestAPIBase):
|
|||
instance.host,
|
||||
migration)
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_pci_no_migration(self,
|
||||
|
@ -4614,6 +4637,10 @@ class TestAPI(TestAPIBase):
|
|||
# No ports should be updated if the port's pci binding did not change.
|
||||
update_port_mock.assert_not_called()
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_same_host_failed_vif_type(
|
||||
self, get_client_mock):
|
||||
|
@ -4655,6 +4682,10 @@ class TestAPI(TestAPIBase):
|
|||
instance.availability_zone
|
||||
}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_diff_host_unbound_vif_type(
|
||||
self, get_client_mock):
|
||||
|
@ -4688,6 +4719,10 @@ class TestAPI(TestAPIBase):
|
|||
instance.availability_zone
|
||||
}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi.API, '_get_pci_mapping_for_migration')
|
||||
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
|
@ -4734,6 +4769,10 @@ class TestAPI(TestAPIBase):
|
|||
constants.BINDING_HOST_ID],
|
||||
'new-host')
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_resource_req(
|
||||
self, get_client_mock):
|
||||
|
@ -4760,6 +4799,68 @@ class TestAPI(TestAPIBase):
|
|||
'binding:profile': {'allocation': uuids.dest_compute_rp},
|
||||
'binding:host_id': 'new-host'}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_extended_resource_req(
|
||||
self, get_client_mock
|
||||
):
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
fake_ports = {
|
||||
'ports': [
|
||||
{
|
||||
'id': 'fake-port-1',
|
||||
'binding:vnic_type': 'normal',
|
||||
constants.BINDING_HOST_ID: 'old-host',
|
||||
constants.BINDING_PROFILE: {
|
||||
'allocation': {
|
||||
uuids.group1: uuids.source_compute_bw_rp,
|
||||
uuids.group2: uuids.source_compute_pps_rp,
|
||||
},
|
||||
},
|
||||
'resource_request': {
|
||||
'request_groups': [
|
||||
{
|
||||
"id": uuids.group1
|
||||
},
|
||||
{
|
||||
"id": uuids.group2
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
migration = objects.Migration(
|
||||
status='confirmed', migration_type='migration')
|
||||
list_ports_mock = mock.Mock(return_value=fake_ports)
|
||||
get_client_mock.return_value.list_ports = list_ports_mock
|
||||
|
||||
self.api._update_port_binding_for_instance(
|
||||
self.context, instance, 'new-host', migration,
|
||||
{
|
||||
uuids.group1: [uuids.dest_compute_bw_rp],
|
||||
uuids.group2: [uuids.dest_compute_pps_rp],
|
||||
}
|
||||
)
|
||||
get_client_mock.return_value.update_port.assert_called_once_with(
|
||||
'fake-port-1',
|
||||
{
|
||||
'port': {
|
||||
'device_owner': 'compute:None',
|
||||
'binding:profile': {
|
||||
'allocation': {
|
||||
uuids.group1: uuids.dest_compute_bw_rp,
|
||||
uuids.group2: uuids.dest_compute_pps_rp,
|
||||
}
|
||||
},
|
||||
'binding:host_id': 'new-host'
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
@mock.patch.object(neutronapi, 'get_client')
|
||||
def test_update_port_bindings_for_instance_with_resource_req_unshelve(
|
||||
self, get_client_mock):
|
||||
|
@ -4785,6 +4886,10 @@ class TestAPI(TestAPIBase):
|
|||
'binding:profile': {'allocation': uuids.dest_compute_rp},
|
||||
'binding:host_id': 'new-host'}})
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_resource_req_no_mapping(
|
||||
self, get_client_mock):
|
||||
|
@ -4811,6 +4916,10 @@ class TestAPI(TestAPIBase):
|
|||
"are required for ports with a resource request.",
|
||||
str(ex))
|
||||
|
||||
@mock.patch(
|
||||
'nova.network.neutron.API.has_extended_resource_request_extension',
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
|
||||
def test_update_port_bindings_for_instance_with_resource_req_live_mig(
|
||||
self, get_client_mock):
|
||||
|
@ -6620,27 +6729,34 @@ class TestAPI(TestAPIBase):
|
|||
]}
|
||||
mock_client.list_ports.return_value = ports
|
||||
|
||||
request_groups = self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
request_groups, req_lvl_params = (
|
||||
self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
)
|
||||
|
||||
mock_client.list_ports.assert_called_with(
|
||||
device_id=uuids.inst1, fields=['id', 'resource_request'])
|
||||
self.assertEqual([], request_groups)
|
||||
self.assertEqual([], req_lvl_params.same_subtree)
|
||||
|
||||
@mock.patch('nova.network.neutron.get_client')
|
||||
def test_get_requested_resource_for_instance_no_ports(
|
||||
self, mock_get_client):
|
||||
self, mock_get_client
|
||||
):
|
||||
mock_client = mock_get_client.return_value
|
||||
|
||||
ports = {'ports': []}
|
||||
mock_client.list_ports.return_value = ports
|
||||
|
||||
request_groups = self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
request_groups, req_lvl_params = (
|
||||
self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
)
|
||||
|
||||
mock_client.list_ports.assert_called_with(
|
||||
device_id=uuids.inst1, fields=['id', 'resource_request'])
|
||||
self.assertEqual([], request_groups)
|
||||
self.assertEqual([], req_lvl_params.same_subtree)
|
||||
|
||||
@mock.patch('nova.network.neutron.get_client')
|
||||
def test_get_requested_resource_for_instance_with_multiple_ports(
|
||||
|
@ -6662,8 +6778,10 @@ class TestAPI(TestAPIBase):
|
|||
]}
|
||||
mock_client.list_ports.return_value = ports
|
||||
|
||||
request_groups = self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
request_groups, req_lvl_params = (
|
||||
self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
)
|
||||
|
||||
mock_client.list_ports.assert_called_with(
|
||||
device_id=uuids.inst1, fields=['id', 'resource_request'])
|
||||
|
@ -6674,9 +6792,97 @@ class TestAPI(TestAPIBase):
|
|||
self.assertEqual(
|
||||
uuids.port1,
|
||||
request_groups[0].requester_id)
|
||||
self.assertEqual([], req_lvl_params.same_subtree)
|
||||
|
||||
mock_get_client.assert_called_once_with(self.context, admin=True)
|
||||
|
||||
@mock.patch(
|
||||
"nova.network.neutron.API.has_extended_resource_request_extension",
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
@mock.patch('nova.network.neutron.get_client')
|
||||
def test_get_requested_resource_for_instance_with_multiple_ports_extended(
|
||||
self, mock_get_client):
|
||||
mock_client = mock_get_client.return_value
|
||||
|
||||
ports = {'ports': [
|
||||
{
|
||||
'id': uuids.port1,
|
||||
'device_id': uuids.isnt1,
|
||||
'resource_request': {
|
||||
'request_groups': [
|
||||
{
|
||||
'id': uuids.group1,
|
||||
'resources': {
|
||||
'NET_BW_EGR_KILOBIT_PER_SEC': 10000
|
||||
}
|
||||
},
|
||||
{
|
||||
'id': uuids.group2,
|
||||
'resources': {
|
||||
'NET_KILOPACKET_PER_SEC': 100
|
||||
}
|
||||
}
|
||||
],
|
||||
'same_subtree': [uuids.group1, uuids.group2],
|
||||
}
|
||||
},
|
||||
{
|
||||
'id': uuids.port2,
|
||||
'device_id': uuids.isnt1,
|
||||
'resource_request': {
|
||||
'request_groups': [
|
||||
{
|
||||
'id': uuids.group3,
|
||||
'resources': {
|
||||
'NET_BW_EGR_KILOBIT_PER_SEC': 20000
|
||||
}
|
||||
},
|
||||
],
|
||||
'same_subtree': [uuids.group3],
|
||||
}
|
||||
},
|
||||
{
|
||||
'id': uuids.port3,
|
||||
'device_id': uuids.isnt1,
|
||||
'resource_request': {}
|
||||
},
|
||||
]}
|
||||
mock_client.list_ports.return_value = ports
|
||||
|
||||
request_groups, req_lvl_params = (
|
||||
self.api.get_requested_resource_for_instance(
|
||||
self.context, uuids.inst1)
|
||||
)
|
||||
|
||||
mock_client.list_ports.assert_called_with(
|
||||
device_id=uuids.inst1, fields=['id', 'resource_request'])
|
||||
self.assertEqual(3, len(request_groups))
|
||||
self.assertEqual(
|
||||
{'NET_BW_EGR_KILOBIT_PER_SEC': 10000},
|
||||
request_groups[0].resources)
|
||||
self.assertEqual(
|
||||
uuids.group1,
|
||||
request_groups[0].requester_id)
|
||||
self.assertEqual(
|
||||
{'NET_KILOPACKET_PER_SEC': 100},
|
||||
request_groups[1].resources)
|
||||
self.assertEqual(
|
||||
uuids.group2,
|
||||
request_groups[1].requester_id)
|
||||
self.assertEqual(
|
||||
{'NET_BW_EGR_KILOBIT_PER_SEC': 20000},
|
||||
request_groups[2].resources)
|
||||
self.assertEqual(
|
||||
uuids.group3,
|
||||
request_groups[2].requester_id)
|
||||
|
||||
mock_get_client.assert_called_once_with(self.context, admin=True)
|
||||
self.assertEqual(
|
||||
[[uuids.group1, uuids.group2], [uuids.group3]],
|
||||
req_lvl_params.same_subtree,
|
||||
)
|
||||
|
||||
def test_get_segment_ids_for_network_no_segment_ext(self):
|
||||
with mock.patch.object(
|
||||
self.api, '_has_segment_extension', return_value=False
|
||||
|
@ -6876,6 +7082,92 @@ class TestAPI(TestAPIBase):
|
|||
self.context, port_new_res_req, neutron=None)
|
||||
)
|
||||
|
||||
@mock.patch(
|
||||
"nova.network.neutron.API.has_extended_resource_request_extension",
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch("neutronclient.v2_0.client.Client.show_port")
|
||||
def test_get_binding_profile_allocation_no_request(self, mock_show_port):
|
||||
mock_show_port.return_value = {
|
||||
"port": {
|
||||
}
|
||||
}
|
||||
self.assertIsNone(
|
||||
self.api.get_binding_profile_allocation(
|
||||
self.context, uuids.port_uuid, {}))
|
||||
|
||||
@mock.patch(
|
||||
"nova.network.neutron.API.has_extended_resource_request_extension",
|
||||
new=mock.Mock(return_value=False),
|
||||
)
|
||||
@mock.patch("neutronclient.v2_0.client.Client.show_port")
|
||||
def test_get_binding_profile_allocation_legacy_request(
|
||||
self, mock_show_port
|
||||
):
|
||||
mock_show_port.return_value = {
|
||||
"port": {
|
||||
"id": uuids.port_uuid,
|
||||
"resource_request": {
|
||||
"resources": {
|
||||
"CUSTOM_FOO": 123,
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
self.assertEqual(
|
||||
uuids.rp,
|
||||
self.api.get_binding_profile_allocation(
|
||||
self.context, uuids.port_uuid,
|
||||
{
|
||||
uuids.port_uuid: [uuids.rp]
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
@mock.patch(
|
||||
"nova.network.neutron.API.has_extended_resource_request_extension",
|
||||
new=mock.Mock(return_value=True),
|
||||
)
|
||||
@mock.patch("neutronclient.v2_0.client.Client.show_port")
|
||||
def test_get_binding_profile_allocation_extended_request(
|
||||
self, mock_show_port
|
||||
):
|
||||
mock_show_port.return_value = {
|
||||
"port": {
|
||||
"id": uuids.port_uuid,
|
||||
"resource_request": {
|
||||
"request_groups": [
|
||||
{
|
||||
"id": uuids.group1,
|
||||
"resources": {
|
||||
"CUSTOM_FOO": 123,
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": uuids.group2,
|
||||
"resources": {
|
||||
"CUSTOM_BAR": 321,
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
}
|
||||
self.assertEqual(
|
||||
{
|
||||
uuids.group1: uuids.rp1,
|
||||
uuids.group2: uuids.rp2,
|
||||
},
|
||||
self.api.get_binding_profile_allocation(
|
||||
self.context, uuids.port_uuid,
|
||||
{
|
||||
uuids.group1: [uuids.rp1],
|
||||
uuids.group2: [uuids.rp2],
|
||||
uuids.non_port_related_group: [uuids.rp3],
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class TestInstanceHasExtendedResourceRequest(TestAPIBase):
|
||||
def setUp(self):
|
||||
|
|
Loading…
Reference in New Issue