smartnic support - reject server move and suspend
Server with ARQ in the port does not support move and suspend, reject these operations in API stage: - resize - shelve - live_migrate - evacuate - suspend - attach/detach a smartnic port Reject create server with smartnic in port if minimal compute service version less than 57 Reject create server with port which have a malformed device profile that request multi devices, like: { "resources:CUSTOM_ACCELERATOR_FPGA": "2", "trait:CUSTOM_INTEL_PAC_ARRIA10": "required", } Implements: blueprint sriov-smartnic-support Change-Id: Ia705a0341fb067e746a3b91ec4fc6d149bcaffb8
This commit is contained in:
parent
e19fa1a199
commit
1f53176d2f
@ -89,7 +89,26 @@ def get_arq_pci_device_profile(arq):
|
||||
|
||||
def get_device_profile_request_groups(context, dp_name, owner=None):
|
||||
cyclient = get_client(context)
|
||||
return cyclient.get_device_profile_groups(dp_name, owner)
|
||||
dp_groups = cyclient.get_device_profile_groups(dp_name)
|
||||
return cyclient.get_device_request_groups(dp_groups, owner)
|
||||
|
||||
|
||||
def get_device_amount_of_dp_groups(dp_groups):
|
||||
"""Get requested devices amount for the groups of
|
||||
a device_profile.
|
||||
|
||||
:param dp_groups: list of request groups in a device profile.
|
||||
"""
|
||||
devices_amount = 0
|
||||
for _ignore, dp_group in enumerate(dp_groups):
|
||||
for key, val in dp_group.items():
|
||||
match = schedutils.ResourceRequest.XS_KEYPAT.match(key)
|
||||
if not match:
|
||||
continue # could be 'accel:foo=bar', skip it
|
||||
prefix, _, _ = match.groups()
|
||||
if prefix == schedutils.ResourceRequest.XS_RES_PREFIX:
|
||||
devices_amount += int(val)
|
||||
return devices_amount
|
||||
|
||||
|
||||
class _CyborgClient(object):
|
||||
@ -125,18 +144,14 @@ class _CyborgClient(object):
|
||||
|
||||
return resp.json().get('device_profiles')
|
||||
|
||||
def get_device_profile_groups(self, dp_name, owner):
|
||||
"""Get list of profile group objects from the device profile.
|
||||
|
||||
Cyborg API returns: {"device_profiles": [<device_profile>]}
|
||||
See module notes above for further details.
|
||||
def get_device_profile_groups(self, dp_name):
|
||||
"""Get device groups from a device profile.
|
||||
|
||||
:param dp_name: string: device profile name
|
||||
Expected to be valid, not None or ''.
|
||||
:param owner: string: Port UUID that create the arq
|
||||
Expected to be valid or None.
|
||||
:returns: [objects.RequestGroup]
|
||||
:returns: [device profile group dict]
|
||||
:raises: DeviceProfileError
|
||||
Expected to be valid, not None or ''.
|
||||
"""
|
||||
dp_list = self._get_device_profile_list(dp_name)
|
||||
if not dp_list:
|
||||
@ -145,8 +160,16 @@ class _CyborgClient(object):
|
||||
if len(dp_list) != 1:
|
||||
err = _('Expected 1 device profile but got %s.') % len(dp_list)
|
||||
raise exception.DeviceProfileError(name=dp_name, msg=err)
|
||||
return dp_list[0]['groups']
|
||||
|
||||
dp_groups = dp_list[0]['groups']
|
||||
def get_device_request_groups(self, dp_groups, owner):
|
||||
"""Get list of profile group objects from the device profile.
|
||||
|
||||
:param dp_groups: device groups of a device profile.
|
||||
:param owner: The port UUID if the dp requested by port.
|
||||
:returns: [objects.RequestGroup]
|
||||
:raises: DeviceProfileError
|
||||
"""
|
||||
request_groups = []
|
||||
for dp_group_id, dp_group in enumerate(dp_groups):
|
||||
req_id = get_device_profile_group_requester_id(dp_group_id, owner)
|
||||
|
@ -176,7 +176,8 @@ class InterfaceAttachmentController(wsgi.Controller):
|
||||
exception.NetworkInterfaceTaggedAttachNotSupported,
|
||||
exception.NetworksWithQoSPolicyNotSupported,
|
||||
exception.InterfaceAttachPciClaimFailed,
|
||||
exception.InterfaceAttachResourceAllocationFailed) as e:
|
||||
exception.InterfaceAttachResourceAllocationFailed,
|
||||
exception.ForbiddenPortsWithAccelerator) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except (
|
||||
exception.OperationNotSupportedForVDPAInterface,
|
||||
@ -200,7 +201,7 @@ class InterfaceAttachmentController(wsgi.Controller):
|
||||
return self.show(req, server_id, vif['id'])
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((404, 409, 501))
|
||||
@wsgi.expected_errors((400, 404, 409, 501))
|
||||
def delete(self, req, server_id, id):
|
||||
"""Detach an interface from an instance."""
|
||||
context = req.environ['nova.context']
|
||||
@ -227,3 +228,5 @@ class InterfaceAttachmentController(wsgi.Controller):
|
||||
except exception.InstanceInvalidState as state_error:
|
||||
common.raise_http_conflict_for_instance_invalid_state(state_error,
|
||||
'detach_interface', server_id)
|
||||
except exception.ForbiddenPortsWithAccelerator as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
@ -126,7 +126,10 @@ class EvacuateController(wsgi.Controller):
|
||||
except exception.InstanceInvalidState as state_error:
|
||||
common.raise_http_conflict_for_instance_invalid_state(state_error,
|
||||
'evacuate', id)
|
||||
except exception.ComputeServiceInUse as e:
|
||||
except (
|
||||
exception.ComputeServiceInUse,
|
||||
exception.ForbiddenPortsWithAccelerator,
|
||||
) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except exception.ForbiddenWithAccelerators as e:
|
||||
raise exc.HTTPForbidden(explanation=e.format_message())
|
||||
|
@ -75,7 +75,8 @@ class MigrateServerController(wsgi.Controller):
|
||||
except exception.InstanceNotFound as e:
|
||||
raise exc.HTTPNotFound(explanation=e.format_message())
|
||||
except (exception.ComputeHostNotFound,
|
||||
exception.CannotMigrateToSameHost) as e:
|
||||
exception.CannotMigrateToSameHost,
|
||||
exception.ForbiddenPortsWithAccelerator) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
@wsgi.response(202)
|
||||
@ -132,7 +133,8 @@ class MigrateServerController(wsgi.Controller):
|
||||
exception.InvalidLocalStorage,
|
||||
exception.InvalidSharedStorage,
|
||||
exception.HypervisorUnavailable,
|
||||
exception.MigrationPreCheckError) as ex:
|
||||
exception.MigrationPreCheckError,
|
||||
exception.ForbiddenPortsWithAccelerator) as ex:
|
||||
if async_:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Unexpected exception received from "
|
||||
|
@ -762,7 +762,8 @@ class ServersController(wsgi.Controller):
|
||||
exception.CertificateValidationFailed,
|
||||
exception.CreateWithPortResourceRequestOldVersion,
|
||||
exception.DeviceProfileError,
|
||||
exception.ComputeHostNotFound) as error:
|
||||
exception.ComputeHostNotFound,
|
||||
exception.ForbiddenPortsWithAccelerator) as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.format_message())
|
||||
except INVALID_FLAVOR_IMAGE_EXCEPTIONS as error:
|
||||
raise exc.HTTPBadRequest(explanation=error.format_message())
|
||||
@ -974,7 +975,8 @@ class ServersController(wsgi.Controller):
|
||||
except (exception.AutoDiskConfigDisabledByImage,
|
||||
exception.CannotResizeDisk,
|
||||
exception.CannotResizeToSameFlavor,
|
||||
exception.FlavorNotFound) as e:
|
||||
exception.FlavorNotFound,
|
||||
exception.ForbiddenPortsWithAccelerator) as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
except INVALID_FLAVOR_IMAGE_EXCEPTIONS as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
@ -37,7 +37,7 @@ class ShelveController(wsgi.Controller):
|
||||
self.network_api = neutron.API()
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((404, 403, 409))
|
||||
@wsgi.expected_errors((404, 403, 409, 400))
|
||||
@wsgi.action('shelve')
|
||||
def _shelve(self, req, id, body):
|
||||
"""Move an instance into shelved mode."""
|
||||
@ -61,9 +61,11 @@ class ShelveController(wsgi.Controller):
|
||||
except exception.InstanceInvalidState as state_error:
|
||||
common.raise_http_conflict_for_instance_invalid_state(state_error,
|
||||
'shelve', id)
|
||||
except exception.ForbiddenPortsWithAccelerator as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((404, 409))
|
||||
@wsgi.expected_errors((400, 404, 409))
|
||||
@wsgi.action('shelveOffload')
|
||||
def _shelve_offload(self, req, id, body):
|
||||
"""Force removal of a shelved instance from the compute node."""
|
||||
@ -80,6 +82,9 @@ class ShelveController(wsgi.Controller):
|
||||
'shelveOffload',
|
||||
id)
|
||||
|
||||
except exception.ForbiddenPortsWithAccelerator as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((400, 404, 409))
|
||||
@wsgi.action('unshelve')
|
||||
|
@ -27,7 +27,7 @@ class SuspendServerController(wsgi.Controller):
|
||||
self.compute_api = compute.API()
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((403, 404, 409))
|
||||
@wsgi.expected_errors((403, 404, 409, 400))
|
||||
@wsgi.action('suspend')
|
||||
def _suspend(self, req, id, body):
|
||||
"""Permit admins to suspend the server."""
|
||||
@ -49,6 +49,8 @@ class SuspendServerController(wsgi.Controller):
|
||||
'suspend', id)
|
||||
except exception.ForbiddenWithAccelerators as e:
|
||||
raise exc.HTTPForbidden(explanation=e.format_message())
|
||||
except exception.ForbiddenPortsWithAccelerator as e:
|
||||
raise exc.HTTPBadRequest(explanation=e.format_message())
|
||||
|
||||
@wsgi.response(202)
|
||||
@wsgi.expected_errors((404, 409))
|
||||
|
@ -108,6 +108,8 @@ MIN_VER_NOVA_COMPUTE_MIXED_POLICY = 52
|
||||
|
||||
SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD = 53
|
||||
|
||||
SUPPORT_VNIC_TYPE_ACCELERATOR = 57
|
||||
|
||||
# FIXME(danms): Keep a global cache of the cells we find the
|
||||
# first time we look. This needs to be refreshed on a timer or
|
||||
# trigger.
|
||||
@ -333,6 +335,23 @@ def block_accelerators(until_service=None):
|
||||
return inner
|
||||
|
||||
|
||||
def block_port_accelerators():
|
||||
def inner(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(self, context, instance, *args, **kwargs):
|
||||
# Catch a request operating a instance with accelerators
|
||||
# attach to ports.
|
||||
nw_info = instance.get_network_info()
|
||||
for vif in nw_info:
|
||||
vnic_type = vif['vnic_type']
|
||||
if vnic_type in (network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
|
||||
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
|
||||
raise exception.ForbiddenPortsWithAccelerator()
|
||||
return func(self, context, instance, *args, **kwargs)
|
||||
return wrapper
|
||||
return inner
|
||||
|
||||
|
||||
@profiler.trace_cls("compute_api")
|
||||
class API:
|
||||
"""API for interacting with the compute manager."""
|
||||
@ -964,6 +983,19 @@ class API:
|
||||
flavor, root_bdm,
|
||||
validate_numa=validate_numa)
|
||||
|
||||
def _check_support_vnic_accelerator(self, context, requested_networks):
|
||||
if requested_networks:
|
||||
for request_net in requested_networks:
|
||||
if request_net.device_profile:
|
||||
min_version = (objects.service.
|
||||
get_minimum_version_all_cells(
|
||||
context,
|
||||
['nova-compute']))
|
||||
if min_version < SUPPORT_VNIC_TYPE_ACCELERATOR:
|
||||
msg = ("Port with cyborg profile is not avaliable"
|
||||
" until upgrade finished.")
|
||||
raise exception.ForbiddenPortsWithAccelerator(msg)
|
||||
|
||||
def _validate_and_build_base_options(
|
||||
self, context, flavor, boot_meta, image_href, image_id, kernel_id,
|
||||
ramdisk_id, display_name, display_description, key_name,
|
||||
@ -1033,6 +1065,8 @@ class API:
|
||||
affinity_policy=pci_numa_affinity_policy)
|
||||
network_metadata, port_resource_requests = result
|
||||
|
||||
self._check_support_vnic_accelerator(context, requested_networks)
|
||||
|
||||
# Creating servers with ports that have resource requests, like QoS
|
||||
# minimum bandwidth rules, is only supported in a requested minimum
|
||||
# microversion.
|
||||
@ -3949,6 +3983,7 @@ class API:
|
||||
# finally split resize and cold migration into separate code paths
|
||||
# FIXME(sean-k-mooney): Cold migrate and resize to different hosts
|
||||
# probably works but they have not been tested so block them for now
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.RESIZE)
|
||||
@block_accelerators()
|
||||
@check_instance_lock
|
||||
@ -4170,6 +4205,7 @@ class API:
|
||||
|
||||
# FIXME(sean-k-mooney): Shelve works but unshelve does not due to bug
|
||||
# #1851545, so block it for now
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.SHELVE)
|
||||
@reject_vtpm_instances(instance_actions.SHELVE)
|
||||
@block_accelerators(until_service=54)
|
||||
@ -4212,6 +4248,7 @@ class API:
|
||||
context, instance=instance, clean_shutdown=clean_shutdown,
|
||||
accel_uuids=accel_uuids)
|
||||
|
||||
@block_port_accelerators()
|
||||
@check_instance_lock
|
||||
@check_instance_state(vm_state=[vm_states.SHELVED])
|
||||
def shelve_offload(self, context, instance, clean_shutdown=True):
|
||||
@ -4360,6 +4397,7 @@ class API:
|
||||
# FIXME(sean-k-mooney): Suspend does not work because we do not unplug
|
||||
# the vDPA devices before calling managed save as we do with SR-IOV
|
||||
# devices
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.SUSPEND)
|
||||
@block_accelerators()
|
||||
@reject_sev_instances(instance_actions.SUSPEND)
|
||||
@ -5045,6 +5083,11 @@ class API:
|
||||
instance_uuid=instance.uuid,
|
||||
operation=instance_actions.ATTACH_INTERFACE)
|
||||
|
||||
if port.get('binding:vnic_type', 'normal') in (
|
||||
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
|
||||
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
|
||||
raise exception.ForbiddenPortsWithAccelerator()
|
||||
|
||||
return self.compute_rpcapi.attach_interface(context,
|
||||
instance=instance, network_id=network_id, port_id=port_id,
|
||||
requested_ip=requested_ip, tag=tag)
|
||||
@ -5067,6 +5110,10 @@ class API:
|
||||
raise exception.OperationNotSupportedForVDPAInterface(
|
||||
instance_uuid=instance.uuid,
|
||||
operation=instance_actions.DETACH_INTERFACE)
|
||||
if vif['vnic_type'] in (
|
||||
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
|
||||
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
|
||||
raise exception.ForbiddenPortsWithAccelerator()
|
||||
break
|
||||
else:
|
||||
# NOTE(sean-k-mooney) This should never happen but just in case the
|
||||
@ -5119,6 +5166,7 @@ class API:
|
||||
|
||||
return _metadata
|
||||
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.LIVE_MIGRATION)
|
||||
@block_accelerators()
|
||||
@reject_vtpm_instances(instance_actions.LIVE_MIGRATION)
|
||||
@ -5252,6 +5300,7 @@ class API:
|
||||
instance, migration.id)
|
||||
|
||||
# FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
|
||||
@block_port_accelerators()
|
||||
@reject_vdpa_instances(instance_actions.EVACUATE)
|
||||
@reject_vtpm_instances(instance_actions.EVACUATE)
|
||||
@block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD)
|
||||
|
@ -159,6 +159,10 @@ class ForbiddenWithAccelerators(Forbidden):
|
||||
msg_fmt = _("Forbidden with instances that have accelerators.")
|
||||
|
||||
|
||||
class ForbiddenPortsWithAccelerator(Forbidden):
|
||||
msg_fmt = _("Forbidden with Ports that have accelerators.")
|
||||
|
||||
|
||||
class AdminRequired(Forbidden):
|
||||
msg_fmt = _("User does not have admin privileges")
|
||||
|
||||
|
@ -2041,9 +2041,17 @@ class API:
|
||||
% (request_net.port_id))
|
||||
raise exception.DeviceProfileError(
|
||||
name=device_profile, msg=err)
|
||||
dp_request_groups = (
|
||||
cyborg.get_device_profile_request_groups(
|
||||
context, device_profile, request_net.port_id))
|
||||
cyclient = cyborg.get_client(context)
|
||||
dp_groups = cyclient.get_device_profile_groups(
|
||||
device_profile)
|
||||
dev_num = cyborg.get_device_amount_of_dp_groups(dp_groups)
|
||||
if dev_num > 1:
|
||||
err_msg = 'request multiple devices for single port.'
|
||||
raise exception.DeviceProfileError(name=device_profile,
|
||||
msg=err_msg)
|
||||
|
||||
dp_request_groups = (cyclient.get_device_request_groups(
|
||||
dp_groups, owner=request_net.port_id))
|
||||
LOG.debug("device_profile request group(ARQ): %s",
|
||||
dp_request_groups)
|
||||
# keep device_profile to avoid get vnic info again
|
||||
|
@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(danms): This is the global service version counter
|
||||
SERVICE_VERSION = 56
|
||||
SERVICE_VERSION = 57
|
||||
|
||||
|
||||
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
|
||||
@ -199,6 +199,9 @@ SERVICE_VERSION_HISTORY = (
|
||||
{'compute_rpc': '5.13'},
|
||||
# Version 56: Compute RPC v6.0:
|
||||
{'compute_rpc': '6.0'},
|
||||
# Version 57: Compute RPC v6.0:
|
||||
# Add support for vnic 'accelerator-direct'.
|
||||
{'compute_rpc': '6.0'},
|
||||
)
|
||||
|
||||
# This is used to raise an error at service startup if older than N-1 computes
|
||||
|
@ -116,8 +116,9 @@ class CyborgTestCase(test.NoDBTestCase):
|
||||
rg.add_trait(trait_name='CUSTOM_FPGA_CARD', trait_type='required')
|
||||
expected_groups = [rg]
|
||||
|
||||
actual_groups = self.client.get_device_profile_groups('mydp',
|
||||
owner=owner)
|
||||
dp_groups = self.client.get_device_profile_groups('mydp')
|
||||
actual_groups = self.client.get_device_request_groups(dp_groups,
|
||||
owner=owner)
|
||||
self.assertEqual(len(expected_groups), len(actual_groups))
|
||||
self.assertEqual(expected_groups[0].__dict__,
|
||||
actual_groups[0].__dict__)
|
||||
@ -135,8 +136,7 @@ class CyborgTestCase(test.NoDBTestCase):
|
||||
mock_get_dp_list.return_value = None
|
||||
self.assertRaises(exception.DeviceProfileError,
|
||||
self.client.get_device_profile_groups,
|
||||
dp_name='mydp',
|
||||
owner=None)
|
||||
dp_name='mydp')
|
||||
|
||||
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
|
||||
'_get_device_profile_list')
|
||||
@ -145,8 +145,7 @@ class CyborgTestCase(test.NoDBTestCase):
|
||||
mock_get_dp_list.return_value = [1, 2]
|
||||
self.assertRaises(exception.DeviceProfileError,
|
||||
self.client.get_device_profile_groups,
|
||||
dp_name='mydp',
|
||||
owner=None)
|
||||
dp_name='mydp')
|
||||
|
||||
def _get_arqs_and_request_groups(self):
|
||||
arq_common = {
|
||||
@ -510,3 +509,21 @@ class CyborgTestCase(test.NoDBTestCase):
|
||||
}
|
||||
bind_info = cyborg.get_arq_pci_device_profile(arq)
|
||||
self.assertEqual(expect_info, bind_info)
|
||||
|
||||
def test_get_device_amount_of_dp_groups(self):
|
||||
group1 = {
|
||||
"resources:FPGA": "1",
|
||||
"trait:CUSTOM_FPGA_CARD": "required"
|
||||
}
|
||||
group2 = {
|
||||
"resources:FPGA": "2",
|
||||
"trait:CUSTOM_FPGA_CARD": "required"
|
||||
}
|
||||
num = cyborg.get_device_amount_of_dp_groups([group1])
|
||||
self.assertEqual(1, num)
|
||||
|
||||
num = cyborg.get_device_amount_of_dp_groups([group2])
|
||||
self.assertEqual(2, num)
|
||||
|
||||
num = cyborg.get_device_amount_of_dp_groups([group1, group2])
|
||||
self.assertEqual(3, num)
|
||||
|
@ -40,7 +40,8 @@ class CommonMixin(object):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
|
||||
task_state=None, launched_at=timeutils.utcnow())
|
||||
task_state=None, launched_at=timeutils.utcnow(),
|
||||
expected_attrs=['info_cache'])
|
||||
self.mock_get.return_value = instance
|
||||
return instance
|
||||
|
||||
|
@ -169,7 +169,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
instance.launched_at = now
|
||||
instance.disable_terminate = False
|
||||
instance.info_cache = objects.InstanceInfoCache()
|
||||
instance.info_cache.network_info = model.NetworkInfo()
|
||||
instance.info_cache.network_info = model.NetworkInfo([])
|
||||
instance.numa_topology = None
|
||||
|
||||
if params:
|
||||
@ -7114,6 +7114,28 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
||||
self.context, image, flavor, root_bdm=None, validate_pci=True)
|
||||
mock_request.assert_called_once_with(flavor)
|
||||
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=56)
|
||||
def test_check_support_vnic_accelerator_version_before_57(self, mock_get):
|
||||
requested_networks = objects.NetworkRequestList(
|
||||
objects=[objects.NetworkRequest(device_profile='smartnic1')])
|
||||
self.assertRaisesRegex(exception.ForbiddenPortsWithAccelerator,
|
||||
'Port with cyborg profile is not avaliable until upgrade'
|
||||
' finished.',
|
||||
self.compute_api._check_support_vnic_accelerator,
|
||||
self.context,
|
||||
requested_networks)
|
||||
mock_get.assert_called_once_with(self.context, ['nova-compute'])
|
||||
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=57)
|
||||
def test_check_support_vnic_accelerator_version_after_57(self, mock_get):
|
||||
requested_networks = objects.NetworkRequestList(
|
||||
objects=[objects.NetworkRequest(device_profile='smartnic1')])
|
||||
self.compute_api._check_support_vnic_accelerator(self.context,
|
||||
requested_networks)
|
||||
mock_get.assert_called_once_with(self.context, ['nova-compute'])
|
||||
|
||||
def test_validate_and_build_base_options_translate_neutron_secgroup(self):
|
||||
"""Tests that _check_requested_secgroups will return a uuid for a
|
||||
requested Neutron security group and that will be returned from
|
||||
@ -7757,6 +7779,50 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
||||
# myfunc was not called
|
||||
self.assertEqual({}, args_info)
|
||||
|
||||
def _test_block_port_accelerators(self, instance, args_info):
|
||||
@compute_api.block_port_accelerators()
|
||||
def myfunc(self, context, instance, *args, **kwargs):
|
||||
args_info['args'] = (context, instance, *args)
|
||||
args_info['kwargs'] = dict(**kwargs)
|
||||
|
||||
args = ('arg1', 'arg2')
|
||||
kwargs = {'arg3': 'dummy3', 'arg4': 'dummy4'}
|
||||
myfunc(mock.ANY, self.context, instance, *args, **kwargs)
|
||||
expected_args = (self.context, instance, *args)
|
||||
return expected_args, kwargs
|
||||
|
||||
def test_block_port_accelerators_no_ACCELERATOR_DIRECT(self):
|
||||
instance = self._create_instance_obj()
|
||||
nw_info = model.NetworkInfo.hydrate([])
|
||||
instance.info_cache.network_info = nw_info
|
||||
args_info = {}
|
||||
expected_args, kwargs = self._test_block_port_accelerators(
|
||||
instance, args_info)
|
||||
self.assertEqual(expected_args, args_info['args'])
|
||||
self.assertEqual(kwargs, args_info['kwargs'])
|
||||
|
||||
def test_block_port_accelerators_with_ACCELERATOR_DIRECT(self):
|
||||
instance = self._create_instance_obj()
|
||||
nw_info = model.NetworkInfo.hydrate(
|
||||
[{'vnic_type': model.VNIC_TYPE_ACCELERATOR_DIRECT}])
|
||||
instance.info_cache.network_info = nw_info
|
||||
args_info = {}
|
||||
self.assertRaisesRegex(exception.ForbiddenPortsWithAccelerator,
|
||||
'Forbidden with Ports that have accelerators.',
|
||||
self._test_block_port_accelerators, instance, args_info)
|
||||
self.assertEqual({}, args_info)
|
||||
|
||||
def test_block_port_accelerators_with_ACCELERATOR_DIRECT_PHYSICAL(self):
|
||||
instance = self._create_instance_obj()
|
||||
nw_info = model.NetworkInfo.hydrate(
|
||||
[{'vnic_type': model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL}])
|
||||
instance.info_cache.network_info = nw_info
|
||||
args_info = {}
|
||||
self.assertRaisesRegex(exception.ForbiddenPortsWithAccelerator,
|
||||
'Forbidden with Ports that have accelerators.',
|
||||
self._test_block_port_accelerators, instance, args_info)
|
||||
self.assertEqual({}, args_info)
|
||||
|
||||
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
|
||||
'get_arq_uuids_for_instance')
|
||||
@mock.patch.object(compute_utils, 'create_image')
|
||||
@ -7828,6 +7894,31 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
||||
self.context,
|
||||
instance)
|
||||
|
||||
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
|
||||
'get_arq_uuids_for_instance')
|
||||
@mock.patch.object(compute_utils, 'create_image')
|
||||
def test_shelve_with_unsupport_port_accelerators(
|
||||
self, mock_create_img, mock_get_arq_uuids):
|
||||
instance = self._create_instance_obj()
|
||||
nw_info = model.NetworkInfo.hydrate(
|
||||
[{'vnic_type': model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL}])
|
||||
instance.info_cache.network_info = nw_info
|
||||
|
||||
with test.nested(
|
||||
mock.patch('nova.compute.utils.is_volume_backed_instance',
|
||||
return_value=False),
|
||||
mock.patch.object(objects.Instance, 'save'),
|
||||
mock.patch.object(self.compute_api, '_record_action_start'),
|
||||
) as (
|
||||
mock_volume_backend,
|
||||
mock_instance_save, mock_record_action
|
||||
):
|
||||
self.assertRaises(
|
||||
exception.ForbiddenPortsWithAccelerator,
|
||||
self.compute_api.shelve,
|
||||
self.context,
|
||||
instance)
|
||||
|
||||
@mock.patch('nova.accelerator.cyborg._CyborgClient.'
|
||||
'get_arq_uuids_for_instance')
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
|
@ -273,6 +273,9 @@ class BaseTestCase(test.TestCase):
|
||||
services=False, ctxt=None):
|
||||
ctxt = ctxt or self.context
|
||||
flavor = objects.Flavor.get_by_name(ctxt, type_name)
|
||||
info_cache = objects.InstanceInfoCache(
|
||||
network_info=network_model.NetworkInfo([]))
|
||||
|
||||
inst = objects.Instance(context=ctxt)
|
||||
inst.vm_state = vm_states.ACTIVE
|
||||
inst.task_state = None
|
||||
@ -304,6 +307,7 @@ class BaseTestCase(test.TestCase):
|
||||
inst.flavor = flavor
|
||||
inst.old_flavor = None
|
||||
inst.new_flavor = None
|
||||
inst.info_cache = info_cache
|
||||
if params:
|
||||
inst.flavor.update(params.pop('flavor', {}))
|
||||
inst.update(params)
|
||||
@ -6313,9 +6317,7 @@ class ComputeTestCase(BaseTestCase,
|
||||
# Confirm live_migration() works as expected correctly.
|
||||
# creating instance testdata
|
||||
c = context.get_admin_context()
|
||||
params = {'info_cache': objects.InstanceInfoCache(
|
||||
network_info=network_model.NetworkInfo([]))}
|
||||
instance = self._create_fake_instance_obj(params=params, ctxt=c)
|
||||
instance = self._create_fake_instance_obj(ctxt=c)
|
||||
instance.host = self.compute.host
|
||||
dest = 'desthost'
|
||||
|
||||
|
@ -814,7 +814,8 @@ class _BaseTaskTestCase(object):
|
||||
def test_build_instances_exhaust_host_list(self, _mock_save, mock_notify):
|
||||
# A list of three alternate hosts for one instance
|
||||
host_lists = copy.deepcopy(fake_host_lists_alt)
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
self.context, expected_attrs='system_metadata')
|
||||
image = {'fake-data': 'should_pass_silently'}
|
||||
|
||||
# build_instances() is a cast, we need to wait for it to complete
|
||||
@ -4028,7 +4029,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
||||
"""Tests that when populate_retry raises MaxRetriesExceeded in
|
||||
build_instances, we don't attempt to cleanup the build request.
|
||||
"""
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
instance = fake_instance.fake_instance_obj(
|
||||
self.context, expected_attrs=['system_metadata'])
|
||||
image = {'id': uuids.image_id}
|
||||
filter_props = {
|
||||
'retry': {
|
||||
|
@ -5910,15 +5910,21 @@ class TestAPI(TestAPIBase):
|
||||
port_resource_request=mock.sentinel.resource_request2),
|
||||
])
|
||||
|
||||
@mock.patch(
|
||||
'nova.accelerator.cyborg._CyborgClient.get_device_request_groups')
|
||||
@mock.patch(
|
||||
'nova.accelerator.cyborg._CyborgClient.get_device_profile_groups')
|
||||
@mock.patch.object(neutronapi.API, '_get_physnet_tunneled_info')
|
||||
@mock.patch('nova.accelerator.cyborg.get_device_profile_request_groups')
|
||||
@mock.patch('nova.accelerator.cyborg.get_device_amount_of_dp_groups')
|
||||
@mock.patch('nova.objects.request_spec.RequestGroup.from_port_request')
|
||||
@mock.patch.object(neutronapi.API, "_get_port_vnic_info")
|
||||
@mock.patch.object(neutronapi, 'get_client')
|
||||
def test_create_resource_requests_with_arq(self, getclient,
|
||||
mock_get_port_vnic_info, mock_from_port_request,
|
||||
mock_get_device_profile,
|
||||
mock_get_physnet_tunneled_info):
|
||||
mock_get_device_num,
|
||||
mock_get_physnet_tunneled_info,
|
||||
mock_get_dp_group,
|
||||
mock_get_rg):
|
||||
requested_networks = objects.NetworkRequestList(
|
||||
objects = [
|
||||
objects.NetworkRequest(port_id=uuids.portid_1)
|
||||
@ -5935,11 +5941,15 @@ class TestAPI(TestAPIBase):
|
||||
]
|
||||
rg = objects.RequestGroup(requester_id='request_group_1')
|
||||
rg.add_resource(rclass='CUSTOM_NIC_TRAIT', amount=1)
|
||||
mock_get_device_profile.return_value = [rg]
|
||||
mock_get_rg.return_value = [rg]
|
||||
mock_get_device_num.return_value = 1
|
||||
result = self.api.create_resource_requests(
|
||||
self.context, requested_networks, pci_requests=None)
|
||||
|
||||
network_metadata, port_resource_requests = result
|
||||
mock_get_dp_group.assert_called_once_with('smat_nic')
|
||||
mock_get_physnet_tunneled_info.assert_called_once_with(
|
||||
self.context, mock.ANY, 'netN')
|
||||
self.assertEqual({'physnet1'}, network_metadata.physnets)
|
||||
self.assertEqual([rg], port_resource_requests)
|
||||
|
||||
@ -5965,6 +5975,35 @@ class TestAPI(TestAPIBase):
|
||||
self.api.create_resource_requests,
|
||||
self.context, requested_networks, pci_requests=None)
|
||||
|
||||
@mock.patch.object(neutronapi.API, '_get_physnet_tunneled_info')
|
||||
@mock.patch('nova.accelerator.cyborg.get_device_amount_of_dp_groups')
|
||||
@mock.patch.object(neutronapi.API, "_get_port_vnic_info")
|
||||
@mock.patch.object(neutronapi, 'get_client')
|
||||
def test_create_resource_requests_arq_reject_multi_devices(self,
|
||||
getclient,
|
||||
mock_get_port_vnic_info,
|
||||
mock_get_device_num,
|
||||
mock_get_physnet_tunneled_info):
|
||||
requested_networks = objects.NetworkRequestList(
|
||||
objects = [
|
||||
objects.NetworkRequest(port_id=uuids.portid_1)
|
||||
])
|
||||
|
||||
mock_get_port_vnic_info.side_effect = [
|
||||
(model.VNIC_TYPE_ACCELERATOR_DIRECT, None, 'netN',
|
||||
None, None, 'smat_nic'),
|
||||
(model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL, None,
|
||||
'netN', None, None, 'smat_nic')
|
||||
]
|
||||
mock_get_physnet_tunneled_info.side_effect = [
|
||||
('physnet1', False), ('physnet2', False)
|
||||
]
|
||||
|
||||
mock_get_device_num.return_value = 2
|
||||
self.assertRaises(exception.DeviceProfileError,
|
||||
self.api.create_resource_requests,
|
||||
self.context, requested_networks, pci_requests=None)
|
||||
|
||||
@mock.patch.object(neutronapi, 'get_client')
|
||||
def test_associate_floating_ip_conflict(self, mock_get_client):
|
||||
"""Tests that if Neutron raises a Conflict we handle it and re-raise
|
||||
|
@ -1245,7 +1245,7 @@ class ServersPolicyTest(base.BasePolicyTest):
|
||||
self.project_member_context,
|
||||
id=1, uuid=uuids.fake_id, project_id=self.project_id,
|
||||
user_id='fake-user', vm_state=vm_states.ACTIVE,
|
||||
expected_attrs=['system_metadata'],
|
||||
expected_attrs=['system_metadata', 'info_cache'],
|
||||
launched_at=timeutils.utcnow())
|
||||
|
||||
mock_get.side_effect = fake_get
|
||||
|
@ -0,0 +1,9 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Add support for smartnic via Cyborg device profiles in Neutron ports with
|
||||
vnic type ``accelerator-direct``. When such port is used Cyborg will
|
||||
manage the smartnic and Nova will pass through the smartnic VF to the
|
||||
server. Note that while vnic type ``accelerator-direct-physical`` also
|
||||
exists in Neutron it is not yet supported by Nova and the server create
|
||||
request will fail with such port.
|
Loading…
Reference in New Issue
Block a user