Remove compute service level check for qos ops

To support move operations with qos ports both the source and the
destination compute hosts need to be on Ussuri level. We have service
level checks implemented in Ussuri. In Victoria we could remove those
checks as nova only supports compatibility between N and N-1 computes.
But we kept them there just for extra safety. In the meanwhile we
codified [1] the rule that nova does not support N-2 computes any
more. So in Wallaby we can assume that the oldest compute is already
on Victoria (Ussuri would be enough too).

So this patch removes the unnecessary service level checks and related
test cases.

[1] Ie15ec8299ae52ae8f5334d591ed3944e9585cf71

Change-Id: I14177e35b9d6d27d49e092604bf0f288cd05f57e
This commit is contained in:
Balazs Gibizer 2020-06-15 10:56:12 +02:00
parent be752b8175
commit c163205489
16 changed files with 9 additions and 1138 deletions

View File

@ -28,12 +28,9 @@ from nova.api.openstack import api_version_request
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.network import constants
from nova import objects
from nova.objects import service
from nova import quota
from nova import utils
@ -557,35 +554,3 @@ def supports_port_resource_request(req):
port resource request support, False otherwise.
"""
return api_version_request.is_supported(req, '2.72')
def supports_port_resource_request_during_move():
"""Check to see if the global compute service version is high enough to
support port resource request during move operation.
:returns: True if the compute service version is high enough for
port resource request move support, False otherwise.
"""
return service.get_minimum_version_all_cells(
nova_context.get_admin_context(), ['nova-compute']) >= 49
def instance_has_port_with_resource_request(instance_uuid, network_api):
# TODO(gibi): Use instance.info_cache to see if there is VIFs with
# allocation key in the profile. If there is no such VIF for an instance
# and the instance is not shelve offloaded then we can be sure that the
# instance has no port with resource request. If the instance is shelve
# offloaded then we still have to hit neutron.
search_opts = {'device_id': instance_uuid,
'fields': [constants.RESOURCE_REQUEST]}
# NOTE(gibi): We need to use an admin context to query neutron ports as
# neutron does not fill the resource_request field in the port response if
# we query with a non admin context.
admin_context = nova_context.get_admin_context()
ports = network_api.list_ports(
admin_context, **search_opts).get('ports', [])
for port in ports:
if port.get(constants.RESOURCE_REQUEST):
return True
return False

View File

@ -120,22 +120,6 @@ class EvacuateController(wsgi.Controller):
msg = _("The target host can't be the same one.")
raise exc.HTTPBadRequest(explanation=msg)
# We could potentially move this check to conductor and avoid the
# extra API call to neutron when we support move operations with ports
# having resource requests.
if (common.instance_has_port_with_resource_request(
instance.uuid, self.network_api) and not
common.supports_port_resource_request_during_move()):
LOG.warning("The evacuate action on a server with ports "
"having resource requests, like a port with a QoS "
"minimum bandwidth policy, is not supported until "
"every nova-compute is upgraded to Ussuri")
msg = _("The evacuate action on a server with ports having "
"resource requests, like a port with a QoS minimum "
"bandwidth policy, is not supported by this cluster right "
"now")
raise exc.HTTPBadRequest(explanation=msg)
try:
self.compute_api.evacuate(context, instance, host,
on_shared_storage, password, force)

View File

@ -27,13 +27,10 @@ from nova.compute import api as compute
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova import objects
from nova.policies import migrate_server as ms_policies
LOG = logging.getLogger(__name__)
MIN_COMPUTE_MOVE_BANDWIDTH = 39
class MigrateServerController(wsgi.Controller):
def __init__(self):
@ -59,19 +56,6 @@ class MigrateServerController(wsgi.Controller):
body['migrate'] is not None):
host_name = body['migrate'].get('host')
if common.instance_has_port_with_resource_request(
instance.uuid, self.network_api):
# TODO(gibi): Remove when nova only supports compute newer than
# Train
source_service = objects.Service.get_by_host_and_binary(
context, instance.host, 'nova-compute')
if source_service.version < MIN_COMPUTE_MOVE_BANDWIDTH:
msg = _("The migrate action on a server with ports having "
"resource requests, like a port with a QoS "
"minimum bandwidth policy, is not yet supported "
"on the source compute")
raise exc.HTTPConflict(explanation=msg)
try:
self.compute_api.resize(req.environ['nova.context'], instance,
host_name=host_name)
@ -134,22 +118,6 @@ class MigrateServerController(wsgi.Controller):
disk_over_commit = strutils.bool_from_string(disk_over_commit,
strict=True)
# We could potentially move this check to conductor and avoid the
# extra API call to neutron when we support move operations with ports
# having resource requests.
if (common.instance_has_port_with_resource_request(
instance.uuid, self.network_api) and not
common.supports_port_resource_request_during_move()):
LOG.warning("The os-migrateLive action on a server with ports "
"having resource requests, like a port with a QoS "
"minimum bandwidth policy, is not supported until "
"every nova-compute is upgraded to Ussuri")
msg = _("The os-migrateLive action on a server with ports having "
"resource requests, like a port with a QoS minimum "
"bandwidth policy, is not supported by this cluster right "
"now")
raise exc.HTTPBadRequest(explanation=msg)
try:
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host, force,

View File

@ -92,8 +92,6 @@ INVALID_FLAVOR_IMAGE_EXCEPTIONS = (
exception.InvalidMixedInstanceDedicatedMask,
)
MIN_COMPUTE_MOVE_BANDWIDTH = 39
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
@ -946,18 +944,6 @@ class ServersController(wsgi.Controller):
target={'user_id': instance.user_id,
'project_id': instance.project_id})
if common.instance_has_port_with_resource_request(
instance_id, self.network_api):
# TODO(gibi): Remove when nova only supports compute newer than
# Train
source_service = objects.Service.get_by_host_and_binary(
context, instance.host, 'nova-compute')
if source_service.version < MIN_COMPUTE_MOVE_BANDWIDTH:
msg = _("The resize action on a server with ports having "
"resource requests, like a port with a QoS "
"minimum bandwidth policy, is not yet supported.")
raise exc.HTTPConflict(explanation=msg)
try:
self.compute_api.resize(context, instance, flavor_id,
auto_disk_config=auto_disk_config)

View File

@ -23,9 +23,7 @@ from nova.api.openstack.compute.schemas import shelve as shelve_schemas
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova.network import neutron
from nova.policies import shelve as shelve_policies
@ -99,23 +97,6 @@ class ShelveController(wsgi.Controller):
if support_az and unshelve_dict:
new_az = unshelve_dict['availability_zone']
# We could potentially move this check to conductor and avoid the
# extra API call to neutron when we support move operations with ports
# having resource requests.
if (instance.vm_state == vm_states.SHELVED_OFFLOADED and
common.instance_has_port_with_resource_request(
instance.uuid, self.network_api) and
not common.supports_port_resource_request_during_move()):
LOG.warning("The unshelve action on a server with ports having "
"resource requests, like a port with a QoS minimum "
"bandwidth policy, is not supported until every "
"nova-compute is upgraded to Ussuri")
msg = _("The unshelve action on a server with ports having "
"resource requests, like a port with a QoS minimum "
"bandwidth policy, is not supported by this cluster right "
"now")
raise exc.HTTPBadRequest(explanation=msg)
try:
self.compute_api.unshelve(context, instance, new_az=new_az)
except (exception.InstanceIsLocked,

View File

@ -934,13 +934,6 @@ class ComputeAPI(object):
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def supports_resize_with_qos_port(self, ctxt):
"""Returns whether we can send 5.2, needed for migrating and resizing
servers with ports having resource request.
"""
client = self.router.client(ctxt)
return client.can_send_version('5.2')
# TODO(mriedem): Drop compat for request_spec being a legacy dict in v6.0.
def prep_resize(self, ctxt, instance, image, instance_type, host,
migration, request_spec, filter_properties, node,

View File

@ -227,106 +227,6 @@ class MigrationTask(base.TaskBase):
instance=self.instance)
return same_cell
def _support_resource_request(self, selection):
"""Returns true if the host is new enough to support resource request
during migration and that the RPC API version is not pinned during
rolling upgrade.
"""
svc = objects.Service.get_by_host_and_binary(
self.context, selection.service_host, 'nova-compute')
return (svc.version >= 39 and
self.compute_rpcapi.supports_resize_with_qos_port(
self.context))
# TODO(gibi): Remove this compat code when nova doesn't need to support
# Train computes any more.
def _get_host_supporting_request(self, selection_list):
"""Return the first compute selection from the selection_list where
the service is new enough to support resource request during migration
and the resources claimed successfully.
:param selection_list: a list of Selection objects returned by the
scheduler
:return: A two tuple. The first item is a Selection object
representing the host that supports the request. The second item
is a list of Selection objects representing the remaining alternate
hosts.
:raises MaxRetriesExceeded: if none of the hosts in the selection_list
is new enough to support the request or we cannot claim resource
on any of the hosts that are new enough.
"""
if not self.request_spec.requested_resources:
return selection_list[0], selection_list[1:]
# Scheduler allocated resources on the first host. So check if the
# first host is new enough
if self._support_resource_request(selection_list[0]):
return selection_list[0], selection_list[1:]
# First host is old, so we need to use an alternate. Therefore we have
# to remove the allocation from the first host.
self.reportclient.delete_allocation_for_instance(
self.context, self.instance.uuid)
LOG.debug(
'Scheduler returned host %(host)s as a possible migration target '
'but that host is not new enough to support the migration with '
'resource request %(request)s or the compute RPC is pinned to '
'less than 5.2. Trying alternate hosts.',
{'host': selection_list[0].service_host,
'request': self.request_spec.requested_resources},
instance=self.instance)
alternates = selection_list[1:]
for i, selection in enumerate(alternates):
if self._support_resource_request(selection):
# this host is new enough so we need to try to claim resources
# on it
if selection.allocation_request:
alloc_req = jsonutils.loads(
selection.allocation_request)
resource_claimed = scheduler_utils.claim_resources(
self.context, self.reportclient, self.request_spec,
self.instance.uuid, alloc_req,
selection.allocation_request_version)
if not resource_claimed:
LOG.debug(
'Scheduler returned alternate host %(host)s as a '
'possible migration target but resource claim '
'failed on that host. Trying another alternate.',
{'host': selection.service_host},
instance=self.instance)
else:
return selection, alternates[i + 1:]
else:
# Some deployments use different schedulers that do not
# use Placement, so they will not have an
# allocation_request to claim with. For those cases,
# there is no concept of claiming, so just assume that
# the resources are available.
return selection, alternates[i + 1:]
else:
LOG.debug(
'Scheduler returned alternate host %(host)s as a possible '
'migration target but that host is not new enough to '
'support the migration with resource request %(request)s '
'or the compute RPC is pinned to less than 5.2. '
'Trying another alternate.',
{'host': selection.service_host,
'request': self.request_spec.requested_resources},
instance=self.instance)
# if we reach this point then none of the hosts was new enough for the
# request or we failed to claim resources on every alternate
reason = ("Exhausted all hosts available during compute service level "
"check for instance %(instance_uuid)s." %
{"instance_uuid": self.instance.uuid})
raise exception.MaxRetriesExceeded(reason=reason)
def _execute(self):
# NOTE(sbauza): Force_hosts/nodes needs to be reset if we want to make
# sure that the next destination is not forced to be the original host.
@ -436,8 +336,8 @@ class MigrationTask(base.TaskBase):
# just need the first returned element.
selection_list = selection_lists[0]
selection, self.host_list = self._get_host_supporting_request(
selection_list)
# Scheduler allocated resources on the first host so try that first
selection, self.host_list = selection_list[0], selection_list[1:]
scheduler_utils.fill_provider_mapping(self.request_spec, selection)
return selection
@ -452,17 +352,6 @@ class MigrationTask(base.TaskBase):
selection = None
while self.host_list and not host_available:
selection = self.host_list.pop(0)
if (self.request_spec.requested_resources and not
self._support_resource_request(selection)):
LOG.debug(
'Scheduler returned alternate host %(host)s as a possible '
'migration target for re-schedule but that host is not '
'new enough to support the migration with resource '
'request %(request)s. Trying another alternate.',
{'host': selection.service_host,
'request': self.request_spec.requested_resources},
instance=self.instance)
continue
if selection.allocation_request:
alloc_req = jsonutils.loads(selection.allocation_request)
else:

View File

@ -5985,116 +5985,6 @@ class UnsupportedPortResourceRequestBasedSchedulingTest(
"until microversion 2.72.",
six.text_type(ex))
def test_live_migrate_server_with_port_resource_request_old_version(
self):
server = self._create_server(
flavor=self.flavor,
networks=[{'port': self.neutron.port_1['id']}])
self._wait_for_state_change(server, 'ACTIVE')
# We need to simulate that the above server has a port that has
# resource request; we cannot boot with such a port but legacy servers
# can exist with such a port.
self._add_resource_request_to_a_bound_port(self.neutron.port_1['id'])
post = {
'os-migrateLive': {
'host': None,
'block_migration': False,
}
}
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=48,
):
ex = self.assertRaises(
client.OpenStackApiException,
self.api.post_server_action, server['id'], post)
self.assertEqual(400, ex.response.status_code)
self.assertIn(
"The os-migrateLive action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported by this cluster right now",
six.text_type(ex))
self.assertIn(
"The os-migrateLive action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported until every nova-compute is upgraded to Ussuri",
self.stdlog.logger.output)
def test_evacuate_server_with_port_resource_request_old_version(
self):
server = self._create_server(
flavor=self.flavor,
networks=[{'port': self.neutron.port_1['id']}])
self._wait_for_state_change(server, 'ACTIVE')
# We need to simulate that the above server has a port that has
# resource request; we cannot boot with such a port but legacy servers
# can exist with such a port.
self._add_resource_request_to_a_bound_port(self.neutron.port_1['id'])
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=48,
):
ex = self.assertRaises(
client.OpenStackApiException,
self.api.post_server_action, server['id'], {'evacuate': {}})
self.assertEqual(400, ex.response.status_code)
self.assertIn(
"The evacuate action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported by this cluster right now",
six.text_type(ex))
self.assertIn(
"The evacuate action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported until every nova-compute is upgraded to Ussuri",
self.stdlog.logger.output)
def test_unshelve_offloaded_server_with_port_resource_request_old_version(
self):
server = self._create_server(
flavor=self.flavor,
networks=[{'port': self.neutron.port_1['id']}])
self._wait_for_state_change(server, 'ACTIVE')
# with default config shelve means immediate offload as well
req = {
'shelve': {}
}
self.api.post_server_action(server['id'], req)
self._wait_for_server_parameter(
server, {'status': 'SHELVED_OFFLOADED'})
# We need to simulate that the above server has a port that has
# resource request; we cannot boot with such a port but legacy servers
# can exist with such a port.
self._add_resource_request_to_a_bound_port(self.neutron.port_1['id'])
with mock.patch(
"nova.objects.service.get_minimum_version_all_cells",
return_value=48,
):
ex = self.assertRaises(
client.OpenStackApiException,
self.api.post_server_action, server['id'], {'unshelve': None})
self.assertEqual(400, ex.response.status_code)
self.assertIn(
"The unshelve action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported by this cluster right now",
six.text_type(ex))
self.assertIn(
"The unshelve action on a server with ports having resource "
"requests, like a port with a QoS minimum bandwidth policy, is "
"not supported until every nova-compute is upgraded to Ussuri",
self.stdlog.logger.output)
def test_unshelve_not_offloaded_server_with_port_resource_request(
self):
"""If the server is not offloaded then unshelving does not cause a new
@ -6572,118 +6462,6 @@ class ServerMoveWithPortResourceRequestTest(
self.flavor_with_group_policy_bigger['id'],
{'extra_specs': {'group_policy': 'isolate'}})
def test_migrate_server_with_qos_port_old_dest_compute_no_alternate(self):
"""Create a situation where the only migration target host returned
by the scheduler is too old and therefore the migration fails.
"""
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
qos_sriov_port = self.neutron.port_with_sriov_resource_request
server = self._create_server_with_ports_and_check_allocation(
non_qos_normal_port, qos_normal_port, qos_sriov_port)
orig_get_service = objects.Service.get_by_host_and_binary
def fake_get_service(context, host, binary):
# host2 is the only migration target, let's make it too old so the
# migration will fail
if host == 'host2':
service = orig_get_service(context, host, binary)
service.version = 38
return service
else:
return orig_get_service(context, host, binary)
with mock.patch(
'nova.objects.Service.get_by_host_and_binary',
side_effect=fake_get_service):
self.api.post_server_action(server['id'], {'migrate': None},
check_response_status=[202])
self._wait_for_server_parameter(server,
{'OS-EXT-STS:task_state': None})
self._assert_resize_migrate_action_fail(
server, instance_actions.MIGRATE, 'NoValidHost')
# check that the server still allocates from the original host
self._check_allocation(
server, self.compute1_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# but the migration allocation is gone
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
self._delete_server_and_check_allocations(
server, qos_normal_port, qos_sriov_port)
def test_migrate_server_with_qos_port_old_dest_compute_alternate(self):
"""Create a situation where the first migration target host returned
by the scheduler is too old and therefore the second host is selected
by the MigrationTask.
"""
self._start_compute('host3')
compute3_rp_uuid = self._get_provider_uuid_by_host('host3')
self._create_networking_rp_tree('host3', compute3_rp_uuid)
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
qos_sriov_port = self.neutron.port_with_sriov_resource_request
server = self._create_server_with_ports_and_check_allocation(
non_qos_normal_port, qos_normal_port, qos_sriov_port)
orig_get_service = objects.Service.get_by_host_and_binary
def fake_get_service(context, host, binary):
# host2 is the first migration target, let's make it too old so the
# migration will skip this host
if host == 'host2':
service = orig_get_service(context, host, binary)
service.version = 38
return service
# host3 is the second migration target, let's make it new enough so
# the migration task will choose this host
elif host == 'host3':
service = orig_get_service(context, host, binary)
service.version = 39
return service
else:
return orig_get_service(context, host, binary)
with mock.patch(
'nova.objects.Service.get_by_host_and_binary',
side_effect=fake_get_service):
self.api.post_server_action(server['id'], {'migrate': None})
self._wait_for_state_change(server, 'VERIFY_RESIZE')
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
# check that server allocates from host3 and the migration allocates
# from host1
self._check_allocation(
server, compute3_rp_uuid, non_qos_normal_port, qos_normal_port,
qos_sriov_port, self.flavor_with_group_policy, migration_uuid,
source_compute_rp_uuid=self.compute1_rp_uuid)
self._confirm_resize(server)
# check that allocation is still OK
self._check_allocation(
server, compute3_rp_uuid, non_qos_normal_port,
qos_normal_port, qos_sriov_port, self.flavor_with_group_policy)
# but the migration allocation is gone
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
self._delete_server_and_check_allocations(
server, qos_normal_port, qos_sriov_port)
def _test_resize_or_migrate_server_with_qos_ports(self, new_flavor=None):
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
@ -6965,69 +6743,6 @@ class ServerMoveWithPortResourceRequestTest(
server, self.compute1_rp_uuid, non_qos_port, qos_port,
qos_sriov_port, self.flavor_with_group_policy)
def test_migrate_server_with_qos_port_pinned_compute_rpc(self):
# Pin the compute rpc version to 5.1 to test what happens if
# resize RPC is called without RequestSpec.
# It is OK to set this after the nova services has started in setUp()
# as no compute rpc call is made so far.
self.flags(compute='5.1', group='upgrade_levels')
non_qos_normal_port = self.neutron.port_1
qos_normal_port = self.neutron.port_with_resource_request
server = self._create_server_with_ports(
non_qos_normal_port, qos_normal_port)
# This migration expected to fail as the old RPC does not provide
# enough information to do a proper port binding on the target host.
# The MigrationTask in the conductor checks that the RPC is new enough
# for this request for each possible destination provided by the
# scheduler and skips the old hosts. The actual response will be a 202
# so we have to wait for the failed instance action event.
self.api.post_server_action(server['id'], {'migrate': None})
self._assert_resize_migrate_action_fail(
server, instance_actions.MIGRATE, 'NoValidHost')
# The migration is put into error
self._wait_for_migration_status(server, ['error'])
# The migration is rejected so the instance remains on the source host
server = self.api.get_server(server['id'])
self.assertEqual('ACTIVE', server['status'])
self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
migration_uuid = self.get_migration_uuid_for_instance(server['id'])
# The migration allocation is deleted
migration_allocations = self.placement.get(
'/allocations/%s' % migration_uuid).body['allocations']
self.assertEqual({}, migration_allocations)
# The instance is still allocated from the source host
updated_non_qos_port = self.neutron.show_port(
non_qos_normal_port['id'])['port']
updated_qos_port = self.neutron.show_port(
qos_normal_port['id'])['port']
allocations = self.placement.get(
'/allocations/%s' % server['id']).body['allocations']
# We expect one set of allocations for the compute resources on the
# compute rp and one set for the networking resources on the ovs
# bridge rp due to the qos_port resource request
self.assertEqual(2, len(allocations))
self.assertComputeAllocationMatchesFlavor(
allocations, self.compute1_rp_uuid, self.flavor_with_group_policy)
ovs_allocations = allocations[
self.ovs_bridge_rp_per_host[self.compute1_rp_uuid]]['resources']
self.assertPortMatchesAllocation(qos_normal_port, ovs_allocations)
# binding:profile still points to the networking RP on the source host
qos_binding_profile = updated_qos_port['binding:profile']
self.assertEqual(self.ovs_bridge_rp_per_host[self.compute1_rp_uuid],
qos_binding_profile['allocation'])
# And we expect not to have any allocation set in the port binding for
# the port that doesn't have resource request
self.assertEqual({}, updated_non_qos_port['binding:profile'])
def _check_allocation_during_evacuate(
self, server, flavor, source_compute_rp_uuid, dest_compute_rp_uuid,
non_qos_port, qos_port, qos_sriov_port):

View File

@ -301,25 +301,6 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
expected_exc=webob.exc.HTTPInternalServerError,
check_response=False)
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=True)
def test_migrate_with_bandwidth_from_old_compute_not_supported(
self, mock_has_res_req, mock_get_service):
instance = self._stub_instance_get()
mock_get_service.return_value = objects.Service(host=instance['host'])
mock_get_service.return_value.version = 38
self.assertRaises(
webob.exc.HTTPConflict, self.controller._migrate, self.req,
instance['uuid'], body={'migrate': None})
mock_has_res_req.assert_called_once_with(
instance['uuid'], self.controller.network_api)
mock_get_service.assert_called_once_with(
self.req.environ['nova.context'], instance['host'], 'nova-compute')
class MigrateServerTestsV225(MigrateServerTestsV21):

View File

@ -1274,21 +1274,3 @@ class ServerActionsControllerTestV21(test.TestCase):
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
self.req, FAKE_UUID, body=body)
@mock.patch('nova.objects.Service.get_by_host_and_binary')
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=True)
def test_resize_with_bandwidth_from_old_compute_not_supported(
self, mock_has_res_req, mock_get_service):
body = dict(resize=dict(flavorRef="http://localhost/3"))
mock_get_service.return_value = objects.Service()
mock_get_service.return_value.version = 38
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
self.req, FAKE_UUID, body=body)
mock_has_res_req.assert_called_once_with(
FAKE_UUID, self.controller.network_api)
mock_get_service.assert_called_once_with(
self.req.environ['nova.context'], 'fake_host', 'nova-compute')

View File

@ -94,10 +94,6 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
self.req = fakes.HTTPRequest.blank(
'/%s/servers/a/action' % fakes.FAKE_PROJECT_ID,
use_admin_context=True, version=self.wsgi_api_version)
# These tests don't care about ports with QoS bandwidth resources.
self.stub_out('nova.api.openstack.common.'
'instance_has_port_with_resource_request',
lambda *a, **kw: False)
def fake_get_instance(self):
ctxt = self.req.environ['nova.context']

View File

@ -27,9 +27,7 @@ import webob.multidict
from nova.api.openstack import common
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.network import neutron as network
from nova import test
from nova.tests.unit.api.openstack import fakes
@ -451,23 +449,6 @@ class MiscFunctionsTest(test.TestCase):
self.assertRaises(exception.InvalidInput, common.is_all_tenants,
search_opts)
def test_instance_has_port_with_resource_request(self):
network_api = mock.Mock(spec=network.API())
network_api.list_ports.return_value = {'ports': [
{'resource_request': mock.sentinel.request}
]}
res = common.instance_has_port_with_resource_request(
mock.sentinel.uuid, network_api)
self.assertTrue(res)
network_api.list_ports.assert_called_once_with(
test.MatchType(context.RequestContext),
device_id=mock.sentinel.uuid, fields=['resource_request'])
# assert that the neutron call uses an admin context
ctxt = network_api.mock_calls[0][1][0]
self.assertTrue(ctxt.is_admin)
self.assertIsNone(ctxt.auth_token)
class TestCollectionLinks(test.NoDBTestCase):
"""Tests the _get_collection_links method."""

View File

@ -313,539 +313,6 @@ class MigrationTaskTestCase(test.NoDBTestCase):
self.instance.uuid, alloc_req, '1.19')
mock_fill_provider_mapping.assert_not_called()
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_no_resource_request(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources):
# no resource request so we expect the first host is simply returned
self.request_spec.requested_resources = []
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
alternate = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
selection_list = [first, alternate]
selected, alternates = task._get_host_supporting_request(
selection_list)
self.assertEqual(first, selected)
self.assertEqual([alternate], alternates)
mock_get_service.assert_not_called()
# The first host was good and the scheduler made allocation on that
# host. So we don't expect any resource claim manipulation
mock_delete_allocation.assert_not_called()
mock_claim_resources.assert_not_called()
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_first_host_is_new(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
alternate = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
selection_list = [first, alternate]
first_service = objects.Service(service_host='host1')
first_service.version = 39
mock_get_service.return_value = first_service
selected, alternates = task._get_host_supporting_request(
selection_list)
self.assertEqual(first, selected)
self.assertEqual([alternate], alternates)
mock_get_service.assert_called_once_with(
task.context, 'host1', 'nova-compute')
# The first host was good and the scheduler made allocation on that
# host. So we don't expect any resource claim manipulation
mock_delete_allocation.assert_not_called()
mock_claim_resources.assert_not_called()
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_first_host_is_old_no_alternates(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
selection_list = [first]
first_service = objects.Service(service_host='host1')
first_service.version = 38
mock_get_service.return_value = first_service
self.assertRaises(
exception.MaxRetriesExceeded, task._get_host_supporting_request,
selection_list)
mock_get_service.assert_called_once_with(
task.context, 'host1', 'nova-compute')
mock_delete_allocation.assert_called_once_with(
task.context, self.instance.uuid)
mock_claim_resources.assert_not_called()
@mock.patch.object(migrate.LOG, 'debug')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_first_host_is_old_second_good(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources, mock_debug):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
second = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
third = objects.Selection(
service_host="host3",
nodename="node3",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host3: resources}}),
allocation_request_version='1.19')
selection_list = [first, second, third]
first_service = objects.Service(service_host='host1')
first_service.version = 38
second_service = objects.Service(service_host='host2')
second_service.version = 39
mock_get_service.side_effect = [first_service, second_service]
selected, alternates = task._get_host_supporting_request(
selection_list)
self.assertEqual(second, selected)
self.assertEqual([third], alternates)
mock_get_service.assert_has_calls([
mock.call(task.context, 'host1', 'nova-compute'),
mock.call(task.context, 'host2', 'nova-compute'),
])
mock_delete_allocation.assert_called_once_with(
task.context, self.instance.uuid)
mock_claim_resources.assert_called_once_with(
self.context, task.reportclient, task.request_spec,
self.instance.uuid, {"allocations": {uuids.host2: resources}},
'1.19')
mock_debug.assert_called_once_with(
'Scheduler returned host %(host)s as a possible migration target '
'but that host is not new enough to support the migration with '
'resource request %(request)s or the compute RPC is pinned to '
'less than 5.2. Trying alternate hosts.',
{'host': 'host1',
'request': self.request_spec.requested_resources},
instance=self.instance)
@mock.patch.object(migrate.LOG, 'debug')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_first_host_is_old_second_claim_fails(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources, mock_debug):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
second = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
third = objects.Selection(
service_host="host3",
nodename="node3",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host3: resources}}),
allocation_request_version='1.19')
fourth = objects.Selection(
service_host="host4",
nodename="node4",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host4: resources}}),
allocation_request_version='1.19')
selection_list = [first, second, third, fourth]
first_service = objects.Service(service_host='host1')
first_service.version = 38
second_service = objects.Service(service_host='host2')
second_service.version = 39
third_service = objects.Service(service_host='host3')
third_service.version = 39
mock_get_service.side_effect = [
first_service, second_service, third_service]
# not called for the first host but called for the second and third
# make the second claim fail to force the selection of the third
mock_claim_resources.side_effect = [False, True]
selected, alternates = task._get_host_supporting_request(
selection_list)
self.assertEqual(third, selected)
self.assertEqual([fourth], alternates)
mock_get_service.assert_has_calls([
mock.call(task.context, 'host1', 'nova-compute'),
mock.call(task.context, 'host2', 'nova-compute'),
mock.call(task.context, 'host3', 'nova-compute'),
])
mock_delete_allocation.assert_called_once_with(
task.context, self.instance.uuid)
mock_claim_resources.assert_has_calls([
mock.call(
self.context, task.reportclient, task.request_spec,
self.instance.uuid,
{"allocations": {uuids.host2: resources}}, '1.19'),
mock.call(
self.context, task.reportclient, task.request_spec,
self.instance.uuid,
{"allocations": {uuids.host3: resources}}, '1.19'),
])
mock_debug.assert_has_calls([
mock.call(
'Scheduler returned host %(host)s as a possible migration '
'target but that host is not new enough to support the '
'migration with resource request %(request)s or the compute '
'RPC is pinned to less than 5.2. Trying alternate hosts.',
{'host': 'host1',
'request': self.request_spec.requested_resources},
instance=self.instance),
mock.call(
'Scheduler returned alternate host %(host)s as a possible '
'migration target but resource claim '
'failed on that host. Trying another alternate.',
{'host': 'host2'},
instance=self.instance),
])
@mock.patch.object(migrate.LOG, 'debug')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'delete_allocation_for_instance')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_get_host_supporting_request_both_first_and_second_too_old(
self, mock_get_service, mock_delete_allocation,
mock_claim_resources, mock_debug):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
second = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
third = objects.Selection(
service_host="host3",
nodename="node3",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host3: resources}}),
allocation_request_version='1.19')
fourth = objects.Selection(
service_host="host4",
nodename="node4",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host4: resources}}),
allocation_request_version='1.19')
selection_list = [first, second, third, fourth]
first_service = objects.Service(service_host='host1')
first_service.version = 38
second_service = objects.Service(service_host='host2')
second_service.version = 38
third_service = objects.Service(service_host='host3')
third_service.version = 39
mock_get_service.side_effect = [
first_service, second_service, third_service]
# not called for the first and second hosts but called for the third
mock_claim_resources.side_effect = [True]
selected, alternates = task._get_host_supporting_request(
selection_list)
self.assertEqual(third, selected)
self.assertEqual([fourth], alternates)
mock_get_service.assert_has_calls([
mock.call(task.context, 'host1', 'nova-compute'),
mock.call(task.context, 'host2', 'nova-compute'),
mock.call(task.context, 'host3', 'nova-compute'),
])
mock_delete_allocation.assert_called_once_with(
task.context, self.instance.uuid)
mock_claim_resources.assert_called_once_with(
self.context, task.reportclient, task.request_spec,
self.instance.uuid,
{"allocations": {uuids.host3: resources}}, '1.19')
mock_debug.assert_has_calls([
mock.call(
'Scheduler returned host %(host)s as a possible migration '
'target but that host is not new enough to support the '
'migration with resource request %(request)s or the compute '
'RPC is pinned to less than 5.2. Trying alternate hosts.',
{'host': 'host1',
'request': self.request_spec.requested_resources},
instance=self.instance),
mock.call(
'Scheduler returned alternate host %(host)s as a possible '
'migration target but that host is not new enough to support '
'the migration with resource request %(request)s or the '
'compute RPC is pinned to less than 5.2. Trying another '
'alternate.',
{'host': 'host2',
'request': self.request_spec.requested_resources},
instance=self.instance),
])
@mock.patch.object(migrate.LOG, 'debug')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_reschedule_old_compute_skipped(
self, mock_get_service, mock_claim_resources, mock_debug):
self.request_spec.requested_resources = [
objects.RequestGroup(requester_id=uuids.port1)
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources},
"mappings": {uuids.port1: [uuids.host1]}}),
allocation_request_version='1.35')
second = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources},
"mappings": {uuids.port1: [uuids.host2]}}),
allocation_request_version='1.35')
first_service = objects.Service(service_host='host1')
first_service.version = 38
second_service = objects.Service(service_host='host2')
second_service.version = 39
mock_get_service.side_effect = [first_service, second_service]
# set up task for re-schedule
task.host_list = [first, second]
selected = task._reschedule()
self.assertEqual(second, selected)
self.assertEqual([], task.host_list)
mock_get_service.assert_has_calls([
mock.call(task.context, 'host1', 'nova-compute'),
mock.call(task.context, 'host2', 'nova-compute'),
])
mock_claim_resources.assert_called_once_with(
self.context.elevated(), task.reportclient, task.request_spec,
self.instance.uuid,
{"allocations": {uuids.host2: resources},
"mappings": {uuids.port1: [uuids.host2]}}, '1.35')
mock_debug.assert_has_calls([
mock.call(
'Scheduler returned alternate host %(host)s as a possible '
'migration target for re-schedule but that host is not '
'new enough to support the migration with resource '
'request %(request)s. Trying another alternate.',
{'host': 'host1',
'request': self.request_spec.requested_resources},
instance=self.instance),
])
@mock.patch.object(migrate.LOG, 'debug')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch('nova.scheduler.utils.claim_resources')
@mock.patch('nova.objects.Service.get_by_host_and_binary')
def test_reschedule_old_computes_no_more_alternates(
self, mock_get_service, mock_claim_resources, mock_fill_mapping,
mock_debug):
self.request_spec.requested_resources = [
objects.RequestGroup()
]
task = self._generate_task()
resources = {
"resources": {
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 100}}
first = objects.Selection(
service_host="host1",
nodename="node1",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host1: resources}}),
allocation_request_version='1.19')
second = objects.Selection(
service_host="host2",
nodename="node2",
cell_uuid=uuids.cell1,
allocation_request=jsonutils.dumps(
{"allocations": {uuids.host2: resources}}),
allocation_request_version='1.19')
first_service = objects.Service(service_host='host1')
first_service.version = 38
second_service = objects.Service(service_host='host2')
second_service.version = 38
mock_get_service.side_effect = [first_service, second_service]
# set up task for re-schedule
task.host_list = [first, second]
self.assertRaises(exception.MaxRetriesExceeded, task._reschedule)
self.assertEqual([], task.host_list)
mock_get_service.assert_has_calls([
mock.call(task.context, 'host1', 'nova-compute'),
mock.call(task.context, 'host2', 'nova-compute'),
])
mock_claim_resources.assert_not_called()
mock_fill_mapping.assert_not_called()
mock_debug.assert_has_calls([
mock.call(
'Scheduler returned alternate host %(host)s as a possible '
'migration target for re-schedule but that host is not '
'new enough to support the migration with resource '
'request %(request)s. Trying another alternate.',
{'host': 'host1',
'request': self.request_spec.requested_resources},
instance=self.instance),
mock.call(
'Scheduler returned alternate host %(host)s as a possible '
'migration target for re-schedule but that host is not '
'new enough to support the migration with resource '
'request %(request)s. Trying another alternate.',
{'host': 'host2',
'request': self.request_spec.requested_resources},
instance=self.instance),
])
@mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
return_value=objects.InstanceMapping(
cell_mapping=objects.CellMapping(uuid=uuids.cell1)))

View File

@ -47,10 +47,6 @@ class EvacuatePolicyTest(base.BasePolicyTest):
user_id = self.user_req.environ['nova.context'].user_id
self.stub_out('nova.compute.api.HostAPI.service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stub_out(
'nova.api.openstack.common.'
'instance_has_port_with_resource_request',
lambda *args, **kwargs: False)
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.compute.api.API.get')).mock
uuid = uuids.fake_id

View File

@ -62,9 +62,7 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
]
@mock.patch('nova.compute.api.API.resize')
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=False)
def test_migrate_server_policy(self, mock_port, mock_resize):
def test_migrate_server_policy(self, mock_resize):
rule_name = ms_policies.POLICY_ROOT % 'migrate'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
@ -73,9 +71,7 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
body={'migrate': None})
@mock.patch('nova.compute.api.API.live_migrate')
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request', return_value=False)
def test_migrate_live_server_policy(self, mock_port, mock_live_migrate):
def test_migrate_live_server_policy(self, mock_live_migrate):
rule_name = ms_policies.POLICY_ROOT % 'migrate_live'
body = {'os-migrateLive': {
'host': 'hostname',

View File

@ -603,12 +603,9 @@ class ServersPolicyTest(base.BasePolicyTest):
self.req, self.instance.uuid,
body={'reboot': {'type': 'soft'}})
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request')
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_policy(self, mock_resize, mock_port):
def test_resize_server_policy(self, mock_resize):
rule_name = policies.SERVERS % 'resize'
mock_port.return_value = False
self.common_policy_check(self.admin_or_owner_authorized_contexts,
self.admin_or_owner_unauthorized_contexts,
rule_name,
@ -631,13 +628,10 @@ class ServersPolicyTest(base.BasePolicyTest):
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request')
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_overridden_policy_pass_with_same_user(
self, mock_resize, mock_port):
self, mock_resize):
rule_name = policies.SERVERS % 'resize'
mock_port.return_value = False
self.policy.set_rules({rule_name: "user_id:%(user_id)s"},
overwrite=False)
body = {'resize': {'flavorRef': 'f1'}}
@ -1226,12 +1220,10 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.api.openstack.common.'
'instance_has_port_with_resource_request')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(self,
mock_resize, mock_port, mock_get, mock_save, mock_rs,
mock_allow, m_net):
def test_cross_cell_resize_server_policy(
self, mock_resize, mock_get, mock_save, mock_rs, mock_allow,
m_net):
self.stub_out('nova.compute.api.API.get_instance_host_status',
lambda x, y: "UP")
@ -1241,7 +1233,6 @@ class ServersPolicyTest(base.BasePolicyTest):
rule = 'os_compute_api:os-migrate-server:migrate'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.CROSS_CELL_RESIZE
mock_port.return_value = False
req = fakes.HTTPRequest.blank('', version='2.56')
def fake_get(*args, **kwargs):