Drop pre-cinder 3.44 version compatibility
The new style volume attach flow semantics, which were needed to support multiattach volumes, relies on cinder v3.44 and was added in Queens: Ifc01dbf98545104c998ab96f65ff8623a6db0f28 Now that we're in Train, let's require Cinder API to be at least running Queens level code and drop the compat checks for queens level nova-compute code and Cinder API older than Queens. Lots of old style attachment test code is dropped as a result since we don't need the version boundary handling in the API code. The way a lot of the "new flow" tests were written were as copies of existing tests modified for the new flow so the old flow tests could just be dropped later, and that's what happens in this change. Change-Id: I6a777b4b7a5729488f939df8c40e49bd40aec3dd
This commit is contained in:
parent
be1e0b90c1
commit
f260f12ec7
|
@ -103,7 +103,6 @@ AGGREGATE_ACTION_UPDATE = 'Update'
|
|||
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
|
||||
AGGREGATE_ACTION_DELETE = 'Delete'
|
||||
AGGREGATE_ACTION_ADD = 'Add'
|
||||
CINDER_V3_ATTACH_MIN_COMPUTE_VERSION = 24
|
||||
MIN_COMPUTE_TRUSTED_CERTS = 31
|
||||
MIN_COMPUTE_ABORT_QUEUED_LIVE_MIGRATION = 34
|
||||
MIN_COMPUTE_VOLUME_TYPE = 36
|
||||
|
@ -4026,46 +4025,17 @@ class API(base.Base):
|
|||
if volume['multiattach'] and not supports_multiattach:
|
||||
raise exception.MultiattachNotSupportedOldMicroversion()
|
||||
|
||||
if 'id' in instance:
|
||||
# This is a volume attach to an existing instance, so
|
||||
# we only care about the cell the instance is in.
|
||||
min_compute_version = objects.Service.get_minimum_version(
|
||||
context, 'nova-compute')
|
||||
else:
|
||||
# The instance is being created and we don't know which
|
||||
# cell it's going to land in, so check all cells.
|
||||
# NOTE(danms): We don't require all cells to report here since
|
||||
# we're really concerned about the new-ness of cells that the
|
||||
# instance may be scheduled into. If a cell doesn't respond here,
|
||||
# then it won't be a candidate for the instance and thus doesn't
|
||||
# matter.
|
||||
min_compute_version = \
|
||||
objects.service.get_minimum_version_all_cells(
|
||||
context, ['nova-compute'])
|
||||
|
||||
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
|
||||
# Attempt a new style volume attachment, but fallback to old-style
|
||||
# in case Cinder API 3.44 isn't available.
|
||||
try:
|
||||
attachment_id = self.volume_api.attachment_create(
|
||||
context, volume_id, instance.uuid)['id']
|
||||
bdm.attachment_id = attachment_id
|
||||
# NOTE(ildikov): In case of boot from volume the BDM at this
|
||||
# point is not yet created in a cell database, so we can't
|
||||
# call save(). When attaching a volume to an existing
|
||||
# instance, the instance is already in a cell and the BDM has
|
||||
# been created in that same cell so updating here in that case
|
||||
# is "ok".
|
||||
if bdm.obj_attr_is_set('id'):
|
||||
bdm.save()
|
||||
except exception.CinderAPIVersionNotAvailable:
|
||||
LOG.debug('The available Cinder microversion is not high '
|
||||
'enough to create new style volume attachment.')
|
||||
self.volume_api.reserve_volume(context, volume_id)
|
||||
else:
|
||||
LOG.debug('The compute service version is not high enough to '
|
||||
'create a new style volume attachment.')
|
||||
self.volume_api.reserve_volume(context, volume_id)
|
||||
attachment_id = self.volume_api.attachment_create(
|
||||
context, volume_id, instance.uuid)['id']
|
||||
bdm.attachment_id = attachment_id
|
||||
# NOTE(ildikov): In case of boot from volume the BDM at this
|
||||
# point is not yet created in a cell database, so we can't
|
||||
# call save(). When attaching a volume to an existing
|
||||
# instance, the instance is already in a cell and the BDM has
|
||||
# been created in that same cell so updating here in that case
|
||||
# is "ok".
|
||||
if bdm.obj_attr_is_set('id'):
|
||||
bdm.save()
|
||||
|
||||
# TODO(stephenfin): Fold this back in now that cells v1 no longer needs to
|
||||
# override it.
|
||||
|
@ -4155,27 +4125,14 @@ class API(base.Base):
|
|||
if device and not block_device.match_device(device):
|
||||
raise exception.InvalidDevicePath(path=device)
|
||||
|
||||
# Check to see if the computes in this cell can support new-style
|
||||
# volume attachments.
|
||||
min_compute_version = objects.Service.get_minimum_version(
|
||||
context, 'nova-compute')
|
||||
if min_compute_version >= CINDER_V3_ATTACH_MIN_COMPUTE_VERSION:
|
||||
try:
|
||||
# Check to see if Cinder is new enough to create new-style
|
||||
# attachments.
|
||||
cinder.is_microversion_supported(context, '3.44')
|
||||
except exception.CinderAPIVersionNotAvailable:
|
||||
pass
|
||||
else:
|
||||
# Make sure the volume isn't already attached to this instance
|
||||
# because based on the above checks, we'll use the new style
|
||||
# attachment flow in _check_attach_and_reserve_volume and
|
||||
# Cinder will allow multiple attachments between the same
|
||||
# volume and instance but the old flow API semantics don't
|
||||
# allow that so we enforce it here.
|
||||
self._check_volume_already_attached_to_instance(context,
|
||||
instance,
|
||||
volume_id)
|
||||
# Make sure the volume isn't already attached to this instance
|
||||
# because we'll use the v3.44 attachment flow in
|
||||
# _check_attach_and_reserve_volume and Cinder will allow multiple
|
||||
# attachments between the same volume and instance but the old flow
|
||||
# API semantics don't allow that so we enforce it here.
|
||||
self._check_volume_already_attached_to_instance(context,
|
||||
instance,
|
||||
volume_id)
|
||||
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED
|
||||
|
|
|
@ -1636,191 +1636,7 @@ class EventReporterStub(fixtures.Fixture):
|
|||
lambda *args, **kwargs: mock.MagicMock()))
|
||||
|
||||
|
||||
class CinderFixture(fixtures.Fixture):
|
||||
"""A fixture to volume operations"""
|
||||
|
||||
# the default project_id in OSAPIFixtures
|
||||
tenant_id = '6f70656e737461636b20342065766572'
|
||||
|
||||
SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
|
||||
SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
|
||||
SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
|
||||
SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
|
||||
|
||||
# This represents a bootable image-backed volume to test
|
||||
# boot-from-volume scenarios.
|
||||
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
|
||||
|
||||
def __init__(self, test):
|
||||
super(CinderFixture, self).__init__()
|
||||
self.test = test
|
||||
self.swap_volume_instance_uuid = None
|
||||
self.swap_volume_instance_error_uuid = None
|
||||
self.reserved_volumes = list()
|
||||
# This is a map of instance UUIDs mapped to a list of volume IDs.
|
||||
# This map gets updated on attach/detach operations.
|
||||
self.attachments = collections.defaultdict(list)
|
||||
|
||||
def volume_ids_for_instance(self, instance_uuid):
|
||||
return self.attachments.get(instance_uuid)
|
||||
|
||||
def setUp(self):
|
||||
super(CinderFixture, self).setUp()
|
||||
|
||||
def fake_get(self_api, context, volume_id, microversion=None):
|
||||
# Check for the special swap volumes.
|
||||
if volume_id in (CinderFixture.SWAP_OLD_VOL,
|
||||
CinderFixture.SWAP_ERR_OLD_VOL):
|
||||
volume = {
|
||||
'status': 'available',
|
||||
'display_name': 'TEST1',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'multiattach': False,
|
||||
'size': 1
|
||||
}
|
||||
if ((self.swap_volume_instance_uuid and
|
||||
volume_id == CinderFixture.SWAP_OLD_VOL) or
|
||||
(self.swap_volume_instance_error_uuid and
|
||||
volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
|
||||
instance_uuid = (self.swap_volume_instance_uuid
|
||||
if volume_id == CinderFixture.SWAP_OLD_VOL
|
||||
else self.swap_volume_instance_error_uuid)
|
||||
|
||||
volume.update({
|
||||
'status': 'in-use',
|
||||
'attachments': {
|
||||
instance_uuid: {
|
||||
'mountpoint': '/dev/vdb',
|
||||
'attachment_id': volume_id
|
||||
}
|
||||
},
|
||||
'attach_status': 'attached'
|
||||
})
|
||||
return volume
|
||||
|
||||
# Check to see if the volume is attached.
|
||||
for instance_uuid, volumes in self.attachments.items():
|
||||
if volume_id in volumes:
|
||||
# The volume is attached.
|
||||
volume = {
|
||||
'status': 'in-use',
|
||||
'display_name': volume_id,
|
||||
'attach_status': 'attached',
|
||||
'id': volume_id,
|
||||
'multiattach': False,
|
||||
'size': 1,
|
||||
'attachments': {
|
||||
instance_uuid: {
|
||||
'attachment_id': volume_id,
|
||||
'mountpoint': '/dev/vdb'
|
||||
}
|
||||
}
|
||||
}
|
||||
break
|
||||
else:
|
||||
# This is a test that does not care about the actual details.
|
||||
reserved_volume = (volume_id in self.reserved_volumes)
|
||||
volume = {
|
||||
'status': 'attaching' if reserved_volume else 'available',
|
||||
'display_name': 'TEST2',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'multiattach': False,
|
||||
'size': 1
|
||||
}
|
||||
|
||||
# Check for our special image-backed volume.
|
||||
if volume_id == self.IMAGE_BACKED_VOL:
|
||||
# Make it a bootable volume.
|
||||
volume['bootable'] = True
|
||||
# Add the image_id metadata.
|
||||
volume['volume_image_metadata'] = {
|
||||
# There would normally be more image metadata in here...
|
||||
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
|
||||
}
|
||||
|
||||
return volume
|
||||
|
||||
def fake_initialize_connection(self, context, volume_id, connector):
|
||||
if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
|
||||
# Return a tuple in order to raise an exception.
|
||||
return ()
|
||||
return {}
|
||||
|
||||
def fake_migrate_volume_completion(self, context, old_volume_id,
|
||||
new_volume_id, error):
|
||||
return {'save_volume_id': new_volume_id}
|
||||
|
||||
def fake_reserve_volume(self_api, context, volume_id):
|
||||
self.reserved_volumes.append(volume_id)
|
||||
|
||||
def fake_unreserve_volume(self_api, context, volume_id):
|
||||
# NOTE(mnaser): It's possible that we unreserve a volume that was
|
||||
# never reserved (ex: instance.volume_attach.error
|
||||
# notification tests)
|
||||
if volume_id in self.reserved_volumes:
|
||||
self.reserved_volumes.remove(volume_id)
|
||||
|
||||
def fake_attach(_self, context, volume_id, instance_uuid,
|
||||
mountpoint, mode='rw'):
|
||||
# Check to see if the volume is already attached to any server.
|
||||
for instance, volumes in self.attachments.items():
|
||||
if volume_id in volumes:
|
||||
raise exception.InvalidInput(
|
||||
reason='Volume %s is already attached to '
|
||||
'instance %s' % (volume_id, instance))
|
||||
# It's not attached so let's "attach" it.
|
||||
self.attachments[instance_uuid].append(volume_id)
|
||||
|
||||
self.test.stub_out('nova.volume.cinder.API.attach',
|
||||
fake_attach)
|
||||
|
||||
def fake_detach(_self, context, volume_id, instance_uuid=None,
|
||||
attachment_id=None):
|
||||
# NOTE(mnaser): It's possible that we unreserve a volume that was
|
||||
# never reserved (ex: instance.volume_attach.error
|
||||
# notification tests)
|
||||
if volume_id in self.reserved_volumes:
|
||||
self.reserved_volumes.remove(volume_id)
|
||||
|
||||
if instance_uuid is not None:
|
||||
# If the volume isn't attached to this instance it will
|
||||
# result in a ValueError which indicates a broken test or
|
||||
# code, so we just let that raise up.
|
||||
self.attachments[instance_uuid].remove(volume_id)
|
||||
else:
|
||||
for instance, volumes in self.attachments.items():
|
||||
if volume_id in volumes:
|
||||
volumes.remove(volume_id)
|
||||
break
|
||||
|
||||
self.test.stub_out('nova.volume.cinder.API.detach', fake_detach)
|
||||
|
||||
self.test.stub_out('nova.volume.cinder.API.begin_detaching',
|
||||
lambda *args, **kwargs: None)
|
||||
self.test.stub_out('nova.volume.cinder.API.get',
|
||||
fake_get)
|
||||
self.test.stub_out('nova.volume.cinder.API.initialize_connection',
|
||||
fake_initialize_connection)
|
||||
self.test.stub_out(
|
||||
'nova.volume.cinder.API.migrate_volume_completion',
|
||||
fake_migrate_volume_completion)
|
||||
self.test.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.test.stub_out('nova.volume.cinder.API.roll_detaching',
|
||||
lambda *args, **kwargs: None)
|
||||
self.test.stub_out('nova.volume.cinder.API.terminate_connection',
|
||||
lambda *args, **kwargs: None)
|
||||
self.test.stub_out('nova.volume.cinder.API.unreserve_volume',
|
||||
fake_unreserve_volume)
|
||||
self.test.stub_out('nova.volume.cinder.API.check_attached',
|
||||
lambda *args, **kwargs: None)
|
||||
|
||||
|
||||
# TODO(mriedem): We can probably pull some of the common parts from the
|
||||
# CinderFixture into a common mixin class for things like the variables
|
||||
# and fake_get.
|
||||
# TODO(mriedem): Just rename this to be CinderFixture.
|
||||
class CinderFixtureNewAttachFlow(fixtures.Fixture):
|
||||
"""A fixture to volume operations with the new Cinder attach/detach API"""
|
||||
|
||||
|
@ -1874,8 +1690,8 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
|
|||
# Check for the special swap volumes.
|
||||
attachments = self.volume_to_attachment[volume_id]
|
||||
|
||||
if volume_id in (CinderFixture.SWAP_OLD_VOL,
|
||||
CinderFixture.SWAP_ERR_OLD_VOL):
|
||||
if volume_id in (self.SWAP_OLD_VOL,
|
||||
self.SWAP_ERR_OLD_VOL):
|
||||
volume = {
|
||||
'status': 'available',
|
||||
'display_name': 'TEST1',
|
||||
|
@ -1885,11 +1701,11 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
|
|||
'size': 1
|
||||
}
|
||||
if ((self.swap_volume_instance_uuid and
|
||||
volume_id == CinderFixture.SWAP_OLD_VOL) or
|
||||
volume_id == self.SWAP_OLD_VOL) or
|
||||
(self.swap_volume_instance_error_uuid and
|
||||
volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
|
||||
volume_id == self.SWAP_ERR_OLD_VOL)):
|
||||
instance_uuid = (self.swap_volume_instance_uuid
|
||||
if volume_id == CinderFixture.SWAP_OLD_VOL
|
||||
if volume_id == self.SWAP_OLD_VOL
|
||||
else self.swap_volume_instance_error_uuid)
|
||||
|
||||
if attachments:
|
||||
|
@ -1956,7 +1772,7 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
|
|||
|
||||
return volume
|
||||
|
||||
def fake_migrate_volume_completion(self, context, old_volume_id,
|
||||
def fake_migrate_volume_completion(_self, context, old_volume_id,
|
||||
new_volume_id, error):
|
||||
return {'save_volume_id': new_volume_id}
|
||||
|
||||
|
@ -2021,7 +1837,7 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
|
|||
'connection_info': {'data':
|
||||
{'foo': 'bar',
|
||||
'target_lun': '1'}}}
|
||||
if attachment_id == CinderFixtureNewAttachFlow.SWAP_ERR_ATTACH_ID:
|
||||
if attachment_id == self.SWAP_ERR_ATTACH_ID:
|
||||
# This intentionally triggers a TypeError for the
|
||||
# instance.volume_swap.error versioned notification tests.
|
||||
attachment_ref = {'connection_info': ()}
|
||||
|
|
|
@ -17,7 +17,6 @@ import datetime
|
|||
|
||||
from oslo_utils.fixture import uuidsentinel as uuids
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import context
|
||||
from nova import objects
|
||||
from nova.tests import fixtures
|
||||
|
@ -28,10 +27,6 @@ from nova.tests.unit import fake_block_device
|
|||
from nova.tests.unit import fake_instance
|
||||
|
||||
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
|
||||
|
||||
class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
|
||||
sample_dir = "os-volumes"
|
||||
|
||||
|
@ -263,11 +258,9 @@ class VolumeAttachmentsSample(test_servers.ServersSampleBase):
|
|||
return subs
|
||||
|
||||
def test_attach_volume_to_server(self):
|
||||
self.stub_out('nova.objects.Service.get_minimum_version',
|
||||
lambda *a, **k: COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
lambda *a, **k: None)
|
||||
self.stub_out('nova.volume.cinder.API.attachment_create',
|
||||
lambda *a, **k: {'id': uuids.attachment_id})
|
||||
device_name = '/dev/vdd'
|
||||
bdm = objects.BlockDeviceMapping()
|
||||
bdm['device_name'] = device_name
|
||||
|
@ -294,8 +287,6 @@ class VolumeAttachmentsSample(test_servers.ServersSampleBase):
|
|||
response, 200)
|
||||
|
||||
def test_attach_volume_to_server_new_flow(self):
|
||||
self.stub_out('nova.volume.cinder.is_microversion_supported',
|
||||
lambda *a, **k: None)
|
||||
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
|
||||
self.stub_out('nova.volume.cinder.API.attachment_create',
|
||||
lambda *a, **k: {'id': uuids.volume})
|
||||
|
|
|
@ -12,10 +12,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslo_utils.fixture import uuidsentinel as uuids
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import exception
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
|
@ -24,9 +22,9 @@ from nova.tests.unit import fake_notifier
|
|||
|
||||
class FakeCinderError(object):
|
||||
"""Poor man's Mock because we're stubbing out and not mock.patching. Stubs
|
||||
out both terminate_connection and attachment_delete. We keep a raise and
|
||||
call count to simulate a single volume error while being able to assert
|
||||
that we still got called for all of an instance's volumes.
|
||||
out attachment_delete. We keep a raise and call count to simulate a single
|
||||
volume error while being able to assert that we still got called for all
|
||||
of an instance's volumes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
|
@ -55,55 +53,6 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
|
|||
# nodenames. See comments in test_bug_1702454.py.
|
||||
self.compute2 = self.start_service('compute', host='host2')
|
||||
|
||||
# To get the old Cinder flow we need to hack the service version, otherwise
|
||||
# the new flow is attempted and CinderFixture complains about auth because
|
||||
# it's not stubbing out the new flow methods.
|
||||
@mock.patch(
|
||||
'nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
def test_live_migrate_terminate_connection_fails(self, _):
|
||||
self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
server = self.api.post_server({
|
||||
'server': {
|
||||
'flavorRef': 1,
|
||||
'imageRef': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
'name': 'live-migrate-terminate-connection-fail-test',
|
||||
'networks': 'none',
|
||||
'block_device_mapping_v2': [
|
||||
{'boot_index': 0,
|
||||
'uuid': uuids.broken_volume,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'},
|
||||
{'boot_index': 1,
|
||||
'uuid': uuids.working_volume,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'}]}})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
|
||||
source = server['OS-EXT-SRV-ATTR:host']
|
||||
if source == self.compute.host:
|
||||
dest = self.compute2.host
|
||||
else:
|
||||
dest = self.compute.host
|
||||
|
||||
post = {
|
||||
'os-migrateLive': {
|
||||
'host': dest,
|
||||
'block_migration': False,
|
||||
}
|
||||
}
|
||||
stub_terminate_connection = FakeCinderError()
|
||||
self.stub_out('nova.volume.cinder.API.terminate_connection',
|
||||
stub_terminate_connection)
|
||||
self.api.post_server_action(server['id'], post)
|
||||
# Live migration should complete despite a volume failing to detach.
|
||||
# Waiting for ACTIVE on dest is essentially an assert for just that.
|
||||
self._wait_for_server_parameter(self.api, server,
|
||||
{'OS-EXT-SRV-ATTR:host': dest,
|
||||
'status': 'ACTIVE'})
|
||||
self.assertEqual(2, stub_terminate_connection.call_count)
|
||||
self.assertEqual(1, stub_terminate_connection.raise_count)
|
||||
|
||||
def test_live_migrate_attachment_delete_fails(self):
|
||||
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
|
||||
server = self.api.post_server({
|
||||
|
|
|
@ -14,7 +14,6 @@ import time
|
|||
|
||||
import mock
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.tests import fixtures
|
||||
|
@ -23,9 +22,6 @@ from nova.tests.functional.notification_sample_tests \
|
|||
import notification_sample_base
|
||||
from nova.tests.unit import fake_notifier
|
||||
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
|
||||
|
||||
class TestInstanceNotificationSampleWithMultipleCompute(
|
||||
notification_sample_base.NotificationSampleTestBase):
|
||||
|
@ -321,26 +317,6 @@ class TestInstanceNotificationSampleWithMultipleCompute(
|
|||
actual=fake_notifier.VERSIONED_NOTIFICATIONS[5])
|
||||
|
||||
|
||||
class TestInstanceNotificationSampleWithMultipleComputeOldAttachFlow(
|
||||
TestInstanceNotificationSampleWithMultipleCompute):
|
||||
|
||||
def setUp(self):
|
||||
self.flags(compute_driver='fake.FakeLiveMigrateDriver')
|
||||
self.flags(use_neutron=True)
|
||||
self.flags(bdms_in_notifications='True', group='notifications')
|
||||
super(TestInstanceNotificationSampleWithMultipleCompute, self).setUp()
|
||||
self.neutron = fixtures.NeutronFixture(self)
|
||||
self.useFixture(self.neutron)
|
||||
self.cinder = fixtures.CinderFixture(self)
|
||||
self.useFixture(self.cinder)
|
||||
|
||||
patcher = self.mock_min_service_version = \
|
||||
mock.patch('nova.objects.Service.get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
self.mock_min_service_version = patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
|
||||
class TestInstanceNotificationSample(
|
||||
notification_sample_base.NotificationSampleTestBase):
|
||||
|
||||
|
@ -1973,40 +1949,3 @@ class TestInstanceNotificationSample(
|
|||
'reservation_id': server['reservation_id'],
|
||||
'uuid': server['id']},
|
||||
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
|
||||
|
||||
|
||||
class TestInstanceNotificationSampleOldAttachFlow(
|
||||
TestInstanceNotificationSample):
|
||||
|
||||
def setUp(self):
|
||||
self.flags(use_neutron=True)
|
||||
self.flags(bdms_in_notifications='True', group='notifications')
|
||||
super(TestInstanceNotificationSample, self).setUp()
|
||||
self.neutron = fixtures.NeutronFixture(self)
|
||||
self.useFixture(self.neutron)
|
||||
self.cinder = fixtures.CinderFixture(self)
|
||||
self.useFixture(self.cinder)
|
||||
|
||||
patcher = self.mock_min_service_version = \
|
||||
mock.patch('nova.objects.Service.get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
self.mock_min_service_version = patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
|
||||
def _do_setup_server_and_error_flag(self):
|
||||
server = self._boot_a_server(
|
||||
extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
|
||||
self._attach_volume_to_server(server, self.cinder.SWAP_ERR_OLD_VOL)
|
||||
|
||||
self.cinder.swap_volume_instance_error_uuid = server['id']
|
||||
|
||||
return server
|
||||
|
||||
@mock.patch('nova.volume.cinder.API.attach')
|
||||
def _test_attach_volume_error(self, server, mock_attach):
|
||||
self._do_test_attach_volume_error(server, mock_attach)
|
||||
|
||||
def test_rebuild_server_with_trusted_cert(self):
|
||||
# Skipping this test as trusted cert support needs a later service
|
||||
# version than this test class is limited to.
|
||||
pass
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional import integrated_helpers
|
||||
|
||||
|
@ -60,38 +57,13 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
|
|||
})
|
||||
return self._wait_for_state_change(self.api, server, 'ERROR')
|
||||
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
def test_delete_with_reserved_volumes(self, mock_version_get=None):
|
||||
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
|
||||
# Create a server which should go to ERROR state because we don't
|
||||
# have any active computes.
|
||||
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
server = self._create_error_server(volume_id)
|
||||
|
||||
# The status of the volume at this point should be 'attaching' as it
|
||||
# is reserved by Nova by the API.
|
||||
self.assertIn(volume_id, self.cinder.reserved_volumes)
|
||||
|
||||
# Delete this server, which should delete BDMs and remove the
|
||||
# reservation on the instances.
|
||||
self.api.delete_server(server['id'])
|
||||
|
||||
# The volume should no longer be reserved as the deletion of the
|
||||
# server should have released all the resources.
|
||||
self.assertNotIn(volume_id, self.cinder.reserved_volumes)
|
||||
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION)
|
||||
def test_delete_with_reserved_volumes_new(self, mock_version_get=None):
|
||||
def test_delete_with_reserved_volumes_new(self):
|
||||
self.cinder = self.useFixture(
|
||||
nova_fixtures.CinderFixtureNewAttachFlow(self))
|
||||
|
||||
# Create a server which should go to ERROR state because we don't
|
||||
# have any active computes.
|
||||
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
|
||||
server = self._create_error_server(volume_id)
|
||||
server_id = server['id']
|
||||
|
||||
|
|
|
@ -14,10 +14,8 @@
|
|||
|
||||
import time
|
||||
|
||||
import mock
|
||||
from oslo_log import log as logging
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova import test
|
||||
from nova.tests import fixtures as nova_fixtures
|
||||
from nova.tests.functional.api import client
|
||||
|
@ -28,9 +26,6 @@ from nova.tests.unit import policy_fixture
|
|||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
|
||||
|
||||
class TestLocalDeleteAttachedVolumes(test.TestCase):
|
||||
"""Test local delete in the API of a server with a volume attached.
|
||||
|
@ -174,12 +169,3 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
|
|||
# Now that the bug is fixed, assert the volume was detached.
|
||||
self.assertNotIn(volume_id,
|
||||
self.cinder.volume_ids_for_instance(server_id))
|
||||
|
||||
|
||||
@mock.patch('nova.objects.Service.get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
class TestLocalDeleteAttachedVolumesOldFlow(TestLocalDeleteAttachedVolumes):
|
||||
|
||||
def setUp(self):
|
||||
super(TestLocalDeleteAttachedVolumesOldFlow, self).setUp()
|
||||
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
|
|
|
@ -46,6 +46,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
|
|||
|
||||
def test_volume_backed_rebuild_same_image(self):
|
||||
# First create our server as normal.
|
||||
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
|
||||
server_req_body = {
|
||||
# There is no imageRef because this is boot from volume.
|
||||
'server': {
|
||||
|
@ -56,7 +57,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
|
|||
'networks': 'none',
|
||||
'block_device_mapping_v2': [{
|
||||
'boot_index': 0,
|
||||
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
|
||||
'uuid': volume_id,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'
|
||||
}]
|
||||
|
|
|
@ -63,7 +63,7 @@ class TestRescheduleWithVolumesAttached(
|
|||
|
||||
def test_reschedule_with_volume_attached(self):
|
||||
# Boot a volume backed instance
|
||||
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
|
||||
server_request = {
|
||||
'name': 'server',
|
||||
'flavorRef': self.flavor_id,
|
||||
|
|
|
@ -34,7 +34,6 @@ import six
|
|||
from nova.compute import api as compute_api
|
||||
from nova.compute import instance_actions
|
||||
from nova.compute import manager as compute_manager
|
||||
from nova.compute import rpcapi
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import objects
|
||||
|
@ -1121,95 +1120,6 @@ class ServerTestV220(ServersTestBase):
|
|||
'destination_type': 'volume',
|
||||
'volume_id': '5d721593-f033-4f6d-ab6f-b5b067e61bc4'})])
|
||||
|
||||
def test_attach_detach_vol_to_shelved_server(self):
|
||||
self.flags(shelved_offload_time=-1)
|
||||
found_server = self._shelve_server()
|
||||
self.assertEqual('SHELVED', found_server['status'])
|
||||
server_id = found_server['id']
|
||||
|
||||
# Test attach volume
|
||||
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
|
||||
with test.nested(mock.patch.object(volume.cinder,
|
||||
'is_microversion_supported'),
|
||||
mock.patch.object(compute_api.API,
|
||||
'_check_attach_and_reserve_volume'),
|
||||
mock.patch.object(rpcapi.ComputeAPI,
|
||||
'attach_volume')) as (mock_cinder_mv,
|
||||
mock_reserve,
|
||||
mock_attach):
|
||||
mock_cinder_mv.side_effect = \
|
||||
exception.CinderAPIVersionNotAvailable(version='3.44')
|
||||
volume_attachment = {"volumeAttachment": {"volumeId":
|
||||
"5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
|
||||
self.api.api_post(
|
||||
'/servers/%s/os-volume_attachments' % (server_id),
|
||||
volume_attachment)
|
||||
self.assertTrue(mock_reserve.called)
|
||||
self.assertTrue(mock_attach.called)
|
||||
|
||||
# Test detach volume
|
||||
with test.nested(mock.patch.object(volume.cinder.API,
|
||||
'begin_detaching'),
|
||||
mock.patch.object(objects.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid'),
|
||||
mock.patch.object(rpcapi.ComputeAPI,
|
||||
'detach_volume')
|
||||
) as (mock_check, mock_get_bdms, mock_rpc):
|
||||
|
||||
mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
|
||||
attachment_id = mock_get_bdms.return_value[0]['volume_id']
|
||||
|
||||
self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
|
||||
(server_id, attachment_id))
|
||||
self.assertTrue(mock_check.called)
|
||||
self.assertTrue(mock_rpc.called)
|
||||
|
||||
self._delete_server(server_id)
|
||||
|
||||
def test_attach_detach_vol_to_shelved_offloaded_server(self):
|
||||
self.flags(shelved_offload_time=0)
|
||||
found_server = self._shelve_server()
|
||||
self.assertEqual('SHELVED_OFFLOADED', found_server['status'])
|
||||
server_id = found_server['id']
|
||||
|
||||
# Test attach volume
|
||||
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
|
||||
with test.nested(mock.patch.object(volume.cinder,
|
||||
'is_microversion_supported'),
|
||||
mock.patch.object(compute_api.API,
|
||||
'_check_attach_and_reserve_volume'),
|
||||
mock.patch.object(volume.cinder.API,
|
||||
'attach')) as (mock_cinder_mv,
|
||||
mock_reserve, mock_vol):
|
||||
mock_cinder_mv.side_effect = \
|
||||
exception.CinderAPIVersionNotAvailable(version='3.44')
|
||||
volume_attachment = {"volumeAttachment": {"volumeId":
|
||||
"5d721593-f033-4f6d-ab6f-b5b067e61bc4"}}
|
||||
attach_response = self.api.api_post(
|
||||
'/servers/%s/os-volume_attachments' % (server_id),
|
||||
volume_attachment).body['volumeAttachment']
|
||||
self.assertTrue(mock_reserve.called)
|
||||
self.assertTrue(mock_vol.called)
|
||||
self.assertIsNone(attach_response['device'])
|
||||
|
||||
# Test detach volume
|
||||
with test.nested(mock.patch.object(volume.cinder.API,
|
||||
'begin_detaching'),
|
||||
mock.patch.object(objects.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid'),
|
||||
mock.patch.object(compute_api.API,
|
||||
'_local_cleanup_bdm_volumes')
|
||||
) as (mock_check, mock_get_bdms, mock_clean_vols):
|
||||
|
||||
mock_get_bdms.return_value = self._get_fake_bdms(self.ctxt)
|
||||
attachment_id = mock_get_bdms.return_value[0]['volume_id']
|
||||
self.api.api_delete('/servers/%s/os-volume_attachments/%s' %
|
||||
(server_id, attachment_id))
|
||||
self.assertTrue(mock_check.called)
|
||||
self.assertTrue(mock_clean_vols.called)
|
||||
|
||||
self._delete_server(server_id)
|
||||
|
||||
def test_attach_detach_vol_to_shelved_offloaded_server_new_flow(self):
|
||||
self.flags(shelved_offload_time=0)
|
||||
found_server = self._shelve_server()
|
||||
|
@ -1219,9 +1129,7 @@ class ServerTestV220(ServersTestBase):
|
|||
|
||||
# Test attach volume
|
||||
self.stub_out('nova.volume.cinder.API.get', fakes.stub_volume_get)
|
||||
with test.nested(mock.patch.object(volume.cinder,
|
||||
'is_microversion_supported'),
|
||||
mock.patch.object(compute_api.API,
|
||||
with test.nested(mock.patch.object(compute_api.API,
|
||||
'_check_volume_already_attached_to_instance'),
|
||||
mock.patch.object(volume.cinder.API,
|
||||
'check_availability_zone'),
|
||||
|
@ -1229,7 +1137,7 @@ class ServerTestV220(ServersTestBase):
|
|||
'attachment_create'),
|
||||
mock.patch.object(volume.cinder.API,
|
||||
'attachment_complete')
|
||||
) as (mock_cinder_mv, mock_check_vol_attached,
|
||||
) as (mock_check_vol_attached,
|
||||
mock_check_av_zone, mock_attach_create,
|
||||
mock_attachment_complete):
|
||||
mock_attach_create.return_value = {'id': uuids.volume}
|
||||
|
|
|
@ -10,10 +10,8 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import six
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
from nova.policies import base as base_policies
|
||||
from nova.policies import servers as servers_policies
|
||||
from nova import test
|
||||
|
@ -248,42 +246,6 @@ class ServersPreSchedulingTestCase(test.TestCase,
|
|||
list_resp = list_resp.body['servers']
|
||||
self.assertEqual(0, len(list_resp))
|
||||
|
||||
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
|
||||
return_value=
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
def test_bfv_delete_build_request_pre_scheduling_old_flow(self, mock_get):
|
||||
cinder = self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
|
||||
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
server = self.api.post_server({
|
||||
'server': {
|
||||
'flavorRef': '1',
|
||||
'name': 'test_bfv_delete_build_request_pre_scheduling',
|
||||
'networks': 'none',
|
||||
'block_device_mapping_v2': [
|
||||
{
|
||||
'boot_index': 0,
|
||||
'uuid': volume_id,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume'
|
||||
},
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
# Since _IntegratedTestBase uses the CastAsCall fixture, when we
|
||||
# get the server back we know all of the volume stuff should be done.
|
||||
self.assertIn(volume_id, cinder.reserved_volumes)
|
||||
|
||||
# Now delete the server, which should go through the "local delete"
|
||||
# code in the API, find the build request and delete it along with
|
||||
# detaching the volume from the instance.
|
||||
self.api.delete_server(server['id'])
|
||||
|
||||
# The volume should no longer have any attachments as instance delete
|
||||
# should have removed them.
|
||||
self.assertNotIn(volume_id, cinder.reserved_volumes)
|
||||
|
||||
def test_bfv_delete_build_request_pre_scheduling(self):
|
||||
cinder = self.useFixture(
|
||||
nova_fixtures.CinderFixtureNewAttachFlow(self))
|
||||
|
@ -410,7 +372,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
|
|||
# we don't start compute so that scheduling fails; we don't really
|
||||
# care about successfully building an active server here.
|
||||
self.useFixture(func_fixtures.PlacementFixture())
|
||||
self.useFixture(nova_fixtures.CinderFixture(self))
|
||||
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
|
||||
self.start_service('conductor')
|
||||
self.start_service('scheduler')
|
||||
server_req = self._build_minimal_create_server_request(
|
||||
|
@ -419,7 +381,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
|
|||
flavor_id=self.zero_disk_flavor['id'])
|
||||
server_req.pop('imageRef', None)
|
||||
server_req['block_device_mapping_v2'] = [{
|
||||
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
|
||||
'uuid': nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'boot_index': 0
|
||||
|
|
|
@ -102,10 +102,6 @@ FAKE_IMAGE_REF = uuids.image_ref
|
|||
NODENAME = 'fakenode1'
|
||||
NODENAME2 = 'fakenode2'
|
||||
|
||||
COMPUTE_VERSION_NEW_ATTACH_FLOW = compute.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
|
||||
|
||||
def fake_not_implemented(*args, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
@ -1049,15 +1045,16 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
def fake_check_availability_zone(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_reserve_volume(*args, **kwargs):
|
||||
pass
|
||||
def fake_attachment_create(_self, ctxt, vol_id, *args, **kwargs):
|
||||
# Return a unique attachment id per volume.
|
||||
return {'id': getattr(uuids, vol_id)}
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.get_snapshot', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.stub_out('nova.volume.cinder.API.attachment_create',
|
||||
fake_attachment_create)
|
||||
|
||||
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
|
||||
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
|
||||
|
@ -1270,9 +1267,9 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidVolume(reason='error'))
|
||||
def test_validate_bdm_media_service_invalid_volume(self, mock_reserve_vol,
|
||||
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create,
|
||||
mock_check_av_zone,
|
||||
mock_get):
|
||||
volume_id = uuids.volume_id
|
||||
|
@ -1316,7 +1313,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, self.instance,
|
||||
self.context, self.instance_object,
|
||||
instance_type, bdms)
|
||||
mock_get.assert_called_with(self.context, volume_id)
|
||||
|
||||
|
@ -1345,8 +1342,9 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume')
|
||||
def test_validate_bdm_media_service_valid(self, mock_reserve_vol,
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
return_value={'id': uuids.attachment_id})
|
||||
def test_validate_bdm_media_service_valid(self, mock_att_create,
|
||||
mock_check_av_zone,
|
||||
mock_get):
|
||||
volume_id = uuids.volume_id
|
||||
|
@ -1369,12 +1367,13 @@ class ComputeVolumeTestCase(BaseTestCase):
|
|||
'multiattach': False}
|
||||
|
||||
mock_get.return_value = volume
|
||||
self.compute_api._validate_bdm(self.context, self.instance,
|
||||
self.compute_api._validate_bdm(self.context, self.instance_object,
|
||||
instance_type, bdms)
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_check_av_zone.assert_called_once_with(self.context, volume,
|
||||
instance=self.instance)
|
||||
mock_reserve_vol.assert_called_once_with(self.context, volume_id)
|
||||
mock_check_av_zone.assert_called_once_with(
|
||||
self.context, volume, instance=self.instance_object)
|
||||
mock_att_create.assert_called_once_with(
|
||||
self.context, volume_id, self.instance_object.uuid)
|
||||
|
||||
def test_volume_snapshot_create(self):
|
||||
self.assertRaises(messaging.ExpectedException,
|
||||
|
@ -2124,8 +2123,8 @@ class ComputeTestCase(BaseTestCase,
|
|||
def fake_check_availability_zone(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_reserve_volume(*args, **kwargs):
|
||||
pass
|
||||
def fake_attachment_create(*args, **kwargs):
|
||||
return {'id': uuids.attachment_id}
|
||||
|
||||
def fake_volume_get(self, context, volume_id):
|
||||
return {'id': volume_id,
|
||||
|
@ -2161,8 +2160,8 @@ class ComputeTestCase(BaseTestCase,
|
|||
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.stub_out('nova.volume.cinder.API.attachment_create',
|
||||
fake_attachment_create)
|
||||
self.stub_out('nova.volume.cinder.API.terminate_connection',
|
||||
fake_terminate_connection)
|
||||
self.stub_out('nova.volume.cinder.API.detach', fake_detach)
|
||||
|
@ -10614,49 +10613,6 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
mock.call(self.context, instance, self.compute.host,
|
||||
action='interface_detach', phase='start')])
|
||||
|
||||
def test_attach_volume(self):
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'})
|
||||
bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
|
||||
self.context,
|
||||
block_device_obj.BlockDeviceMapping(),
|
||||
fake_bdm)
|
||||
instance = self._create_fake_instance_obj()
|
||||
instance.id = 42
|
||||
fake_volume = {'id': uuids.volume, 'multiattach': False}
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW),
|
||||
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
|
||||
mock.patch.object(cinder.API, 'check_availability_zone'),
|
||||
mock.patch.object(cinder.API, 'reserve_volume'),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI,
|
||||
'reserve_block_device_name', return_value=bdm),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
) as (mock_get_version, mock_get, mock_check_availability_zone,
|
||||
mock_reserve_vol, mock_reserve_bdm, mock_attach):
|
||||
|
||||
self.compute_api.attach_volume(
|
||||
self.context, instance, uuids.volume,
|
||||
'/dev/vdb', 'ide', 'cdrom')
|
||||
|
||||
mock_reserve_bdm.assert_called_once_with(
|
||||
self.context, instance, '/dev/vdb', uuids.volume,
|
||||
disk_bus='ide', device_type='cdrom', tag=None,
|
||||
multiattach=False)
|
||||
self.assertEqual(mock_get.call_args,
|
||||
mock.call(self.context, uuids.volume))
|
||||
self.assertEqual(mock_check_availability_zone.call_args,
|
||||
mock.call(
|
||||
self.context, fake_volume, instance=instance))
|
||||
mock_reserve_vol.assert_called_once_with(
|
||||
self.context, uuids.volume)
|
||||
a, kw = mock_attach.call_args
|
||||
self.assertEqual(a[2].device_name, '/dev/vdb')
|
||||
self.assertEqual(a[2].volume_id, uuids.volume_id)
|
||||
|
||||
def test_attach_volume_new_flow(self):
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
|
@ -10670,10 +10626,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
fake_volume = {'id': uuids.volume, 'multiattach': False}
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW),
|
||||
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
|
||||
mock.patch.object(cinder, 'is_microversion_supported'),
|
||||
mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance'),
|
||||
mock.patch.object(cinder.API, 'check_availability_zone'),
|
||||
|
@ -10683,7 +10636,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
mock.patch.object(compute_rpcapi.ComputeAPI,
|
||||
'reserve_block_device_name', return_value=bdm),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
) as (mock_get_version, mock_get, mock_mv_supported, mock_no_bdm,
|
||||
) as (mock_get, mock_no_bdm,
|
||||
mock_check_availability_zone, mock_attachment_create,
|
||||
mock_bdm_save, mock_reserve_bdm, mock_attach):
|
||||
mock_no_bdm.side_effect = exception.VolumeBDMNotFound(
|
||||
|
@ -10708,58 +10661,6 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
self.assertEqual(a[2].device_name, '/dev/vdb')
|
||||
self.assertEqual(a[2].volume_id, uuids.volume_id)
|
||||
|
||||
def test_attach_volume_new_flow_microversion_not_available(self):
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'})
|
||||
bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
|
||||
self.context,
|
||||
block_device_obj.BlockDeviceMapping(),
|
||||
fake_bdm)
|
||||
instance = self._create_fake_instance_obj()
|
||||
instance.id = 42
|
||||
fake_volume = {'id': uuids.volume, 'multiattach': False}
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW),
|
||||
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
|
||||
mock.patch.object(cinder, 'is_microversion_supported'),
|
||||
mock.patch.object(cinder.API, 'check_availability_zone'),
|
||||
mock.patch.object(cinder.API, 'attachment_create'),
|
||||
mock.patch.object(cinder.API, 'reserve_volume'),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI,
|
||||
'reserve_block_device_name', return_value=bdm),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
) as (mock_get_version, mock_get, mock_mv_supported,
|
||||
mock_check_availability_zone, mock_attachment_create,
|
||||
mock_reserve_vol, mock_reserve_bdm, mock_attach):
|
||||
mock_mv_supported.side_effect = \
|
||||
exception.CinderAPIVersionNotAvailable(version='3.44')
|
||||
mock_attachment_create.side_effect = \
|
||||
exception.CinderAPIVersionNotAvailable(version='3.44')
|
||||
self.compute_api.attach_volume(
|
||||
self.context, instance, uuids.volume,
|
||||
'/dev/vdb', 'ide', 'cdrom')
|
||||
|
||||
mock_reserve_bdm.assert_called_once_with(
|
||||
self.context, instance, '/dev/vdb', uuids.volume,
|
||||
disk_bus='ide', device_type='cdrom', tag=None,
|
||||
multiattach=False)
|
||||
self.assertEqual(mock_get.call_args,
|
||||
mock.call(self.context, uuids.volume))
|
||||
self.assertEqual(mock_check_availability_zone.call_args,
|
||||
mock.call(
|
||||
self.context, fake_volume, instance=instance))
|
||||
mock_attachment_create.assert_called_once_with(self.context,
|
||||
uuids.volume,
|
||||
instance.uuid)
|
||||
mock_reserve_vol.assert_called_once_with(
|
||||
self.context, uuids.volume)
|
||||
a, kw = mock_attach.call_args
|
||||
self.assertEqual(a[2].device_name, '/dev/vdb')
|
||||
self.assertEqual(a[2].volume_id, uuids.volume_id)
|
||||
|
||||
def test_attach_volume_no_device_new_flow(self):
|
||||
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'source_type': 'volume', 'destination_type': 'volume',
|
||||
|
@ -10773,10 +10674,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
fake_volume = {'id': uuids.volume, 'multiattach': False}
|
||||
|
||||
with test.nested(
|
||||
mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW),
|
||||
mock.patch.object(cinder.API, 'get', return_value=fake_volume),
|
||||
mock.patch.object(cinder, 'is_microversion_supported'),
|
||||
mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance',
|
||||
side_effect=exception.VolumeBDMNotFound),
|
||||
|
@ -10787,7 +10685,7 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
mock.patch.object(compute_rpcapi.ComputeAPI,
|
||||
'reserve_block_device_name', return_value=bdm),
|
||||
mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
) as (mock_get_version, mock_get, mock_mv_supported, mock_no_bdm,
|
||||
) as (mock_get, mock_no_bdm,
|
||||
mock_check_availability_zone, mock_attachment_create,
|
||||
mock_bdm_save, mock_reserve_bdm, mock_attach):
|
||||
mock_no_bdm.side_effect = exception.VolumeBDMNotFound(
|
||||
|
@ -10883,54 +10781,6 @@ class ComputeAPITestCase(BaseTestCase):
|
|||
mock_attach_complete.assert_called_once_with(
|
||||
self.context, uuids.attachment_id)
|
||||
|
||||
def test_attach_volume_no_device(self):
|
||||
|
||||
called = {}
|
||||
|
||||
def fake_mv_supported(*args, **kwargs):
|
||||
raise exception.CinderAPIVersionNotAvailable(version='3.44')
|
||||
|
||||
def fake_check_availability_zone(*args, **kwargs):
|
||||
called['fake_check_availability_zone'] = True
|
||||
|
||||
def fake_reserve_volume(*args, **kwargs):
|
||||
called['fake_reserve_volume'] = True
|
||||
|
||||
def fake_volume_get(self, context, volume_id):
|
||||
called['fake_volume_get'] = True
|
||||
return {'id': volume_id, 'multiattach': False}
|
||||
|
||||
def fake_rpc_attach_volume(self, context, instance, bdm):
|
||||
called['fake_rpc_attach_volume'] = True
|
||||
|
||||
def fake_rpc_reserve_block_device_name(self, context, instance, device,
|
||||
volume_id, **kwargs):
|
||||
called['fake_rpc_reserve_block_device_name'] = True
|
||||
bdm = block_device_obj.BlockDeviceMapping(context=context)
|
||||
bdm['device_name'] = '/dev/vdb'
|
||||
return bdm
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
|
||||
self.stub_out('nova.volume.cinder.is_microversion_supported',
|
||||
fake_mv_supported)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.stub_out('nova.compute.rpcapi.ComputeAPI.'
|
||||
'reserve_block_device_name',
|
||||
fake_rpc_reserve_block_device_name)
|
||||
self.stub_out('nova.compute.rpcapi.ComputeAPI.attach_volume',
|
||||
fake_rpc_attach_volume)
|
||||
|
||||
instance = self._create_fake_instance_obj()
|
||||
self.compute_api.attach_volume(self.context, instance, 1, device=None)
|
||||
self.assertTrue(called.get('fake_check_availability_zone'))
|
||||
self.assertTrue(called.get('fake_reserve_volume'))
|
||||
self.assertTrue(called.get('fake_volume_get'))
|
||||
self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
|
||||
self.assertTrue(called.get('fake_rpc_attach_volume'))
|
||||
|
||||
@mock.patch('nova.compute.api.API._record_action_start')
|
||||
def test_detach_volume(self, mock_record):
|
||||
# Ensure volume can be detached from instance
|
||||
|
|
|
@ -72,10 +72,6 @@ SHELVED_IMAGE = 'fake-shelved-image'
|
|||
SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
|
||||
SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
|
||||
SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
|
||||
COMPUTE_VERSION_NEW_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION
|
||||
COMPUTE_VERSION_OLD_ATTACH_FLOW = \
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
|
@ -418,79 +414,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
|
||||
@mock.patch.object(compute_api.API, '_record_action_start')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_attach_volume(self, mock_attach, mock_get_min_ver, mock_reserve,
|
||||
mock_record):
|
||||
instance = self._create_instance_obj()
|
||||
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
|
||||
None, None, None, None, None)
|
||||
|
||||
fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
|
||||
mock_reserve.return_value = fake_bdm
|
||||
|
||||
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
|
||||
mock.MagicMock(spec=cinder.API))
|
||||
|
||||
with mock_volume_api as mock_v_api:
|
||||
mock_v_api.get.return_value = volume
|
||||
self.compute_api.attach_volume(
|
||||
self.context, instance, volume['id'])
|
||||
mock_v_api.check_availability_zone.assert_called_once_with(
|
||||
self.context, volume, instance=instance)
|
||||
mock_v_api.reserve_volume.assert_called_once_with(self.context,
|
||||
volume['id'])
|
||||
mock_attach.assert_called_once_with(self.context,
|
||||
instance, fake_bdm)
|
||||
mock_record.assert_called_once_with(
|
||||
self.context, instance, instance_actions.ATTACH_VOLUME)
|
||||
|
||||
@mock.patch.object(compute_api.API, '_record_action_start')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_tagged_volume_attach(self, mock_attach, mock_get_min_ver,
|
||||
mock_reserve, mock_record):
|
||||
instance = self._create_instance_obj()
|
||||
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
|
||||
None, None, None, None, None)
|
||||
|
||||
fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
|
||||
mock_reserve.return_value = fake_bdm
|
||||
|
||||
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
|
||||
mock.MagicMock(spec=cinder.API))
|
||||
|
||||
with mock_volume_api as mock_v_api:
|
||||
mock_v_api.get.return_value = volume
|
||||
self.compute_api.attach_volume(
|
||||
self.context, instance, volume['id'], tag='foo')
|
||||
mock_reserve.assert_called_once_with(self.context, instance, None,
|
||||
volume['id'],
|
||||
device_type=None,
|
||||
disk_bus=None, tag='foo',
|
||||
multiattach=False)
|
||||
mock_v_api.check_availability_zone.assert_called_once_with(
|
||||
self.context, volume, instance=instance)
|
||||
mock_v_api.reserve_volume.assert_called_once_with(self.context,
|
||||
volume['id'])
|
||||
mock_attach.assert_called_once_with(self.context,
|
||||
instance, fake_bdm)
|
||||
mock_record.assert_called_once_with(
|
||||
self.context, instance, instance_actions.ATTACH_VOLUME)
|
||||
|
||||
@mock.patch.object(compute_api.API, '_record_action_start')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch('nova.volume.cinder.is_microversion_supported')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_attach_volume_new_flow(self, mock_attach, mock_bdm,
|
||||
mock_cinder_mv_supported, mock_get_min_ver,
|
||||
mock_reserve, mock_record):
|
||||
mock_bdm.side_effect = exception.VolumeBDMNotFound(
|
||||
volume_id='fake-volume-id')
|
||||
|
@ -520,15 +447,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
|
||||
@mock.patch.object(compute_api.API, '_record_action_start')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch('nova.volume.cinder.is_microversion_supported')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_tagged_volume_attach_new_flow(self, mock_attach, mock_bdm,
|
||||
mock_cinder_mv_supported,
|
||||
mock_get_min_ver,
|
||||
mock_reserve, mock_record):
|
||||
mock_bdm.side_effect = exception.VolumeBDMNotFound(
|
||||
volume_id='fake-volume-id')
|
||||
|
@ -560,58 +482,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
mock_attach.assert_called_once_with(self.context,
|
||||
instance, fake_bdm)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
@mock.patch('nova.volume.cinder.API.get')
|
||||
def test_attach_volume_shelved_instance(self, mock_get, mock_get_min_ver):
|
||||
instance = self._create_instance_obj()
|
||||
instance.vm_state = vm_states.SHELVED_OFFLOADED
|
||||
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
|
||||
None, None, None, None, None)
|
||||
mock_get.return_value = volume
|
||||
self.assertRaises(exception.VolumeTaggedAttachToShelvedNotSupported,
|
||||
self.compute_api.attach_volume, self.context,
|
||||
instance, volume['id'], tag='foo')
|
||||
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_OLD_ATTACH_FLOW)
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_attach_volume_reserve_fails(self, mock_attach,
|
||||
mock_get_min_ver, mock_reserve):
|
||||
instance = self._create_instance_obj()
|
||||
volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
|
||||
None, None, None, None, None)
|
||||
|
||||
fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
|
||||
mock_reserve.return_value = fake_bdm
|
||||
|
||||
mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
|
||||
mock.MagicMock(spec=cinder.API))
|
||||
|
||||
with mock_volume_api as mock_v_api:
|
||||
mock_v_api.get.return_value = volume
|
||||
mock_v_api.reserve_volume.side_effect = test.TestingException()
|
||||
self.assertRaises(test.TestingException,
|
||||
self.compute_api.attach_volume,
|
||||
self.context, instance, volume['id'])
|
||||
mock_v_api.check_availability_zone.assert_called_once_with(
|
||||
self.context, volume, instance=instance)
|
||||
mock_v_api.reserve_volume.assert_called_once_with(self.context,
|
||||
volume['id'])
|
||||
self.assertEqual(0, mock_attach.call_count)
|
||||
fake_bdm.destroy.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch('nova.volume.cinder.is_microversion_supported')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
|
||||
def test_attach_volume_attachment_create_fails(self, mock_attach, mock_bdm,
|
||||
mock_cinder_mv_supported,
|
||||
mock_get_min_ver,
|
||||
mock_reserve):
|
||||
mock_bdm.side_effect = exception.VolumeBDMNotFound(
|
||||
volume_id='fake-volume-id')
|
||||
|
@ -4257,55 +4132,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.context,
|
||||
bdms, legacy_bdm=True)
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume')
|
||||
def test_validate_bdm_returns_attachment_id(self, mock_reserve_volume,
|
||||
mock_get,
|
||||
mock_get_min_ver_all):
|
||||
# Tests that bdm validation *always* returns an attachment_id even if
|
||||
# it's None.
|
||||
instance = self._create_instance_obj()
|
||||
del instance.id
|
||||
instance_type = self._create_flavor()
|
||||
volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
|
||||
volume_info = {'status': 'available',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id,
|
||||
'multiattach': False}
|
||||
mock_get.return_value = volume_info
|
||||
|
||||
# NOTE(mnaser): We use the AnonFakeDbBlockDeviceDict to make sure that
|
||||
# the attachment_id field does not get any defaults to
|
||||
# properly test this function.
|
||||
bdms = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.AnonFakeDbBlockDeviceDict(
|
||||
{
|
||||
'boot_index': 0,
|
||||
'volume_id': volume_id,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_name': 'vda',
|
||||
}))]
|
||||
self.compute_api._validate_bdm(self.context, instance, instance_type,
|
||||
bdms)
|
||||
self.assertIsNone(bdms[0].attachment_id)
|
||||
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_reserve_volume.assert_called_once_with(
|
||||
self.context, volume_id)
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_validate_bdm_with_error_volume(self, mock_reserve_volume,
|
||||
mock_get,
|
||||
mock_get_min_ver_all):
|
||||
def test_validate_bdm_with_error_volume(self, mock_attach_create,
|
||||
mock_get):
|
||||
# Tests that an InvalidInput exception raised from
|
||||
# volume_api.reserve_volume due to the volume status not being
|
||||
# 'available' results in _validate_bdm re-raising InvalidVolume.
|
||||
|
@ -4334,8 +4165,8 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
instance, instance_type, bdms)
|
||||
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_reserve_volume.assert_called_once_with(
|
||||
self.context, volume_id)
|
||||
mock_attach_create.assert_called_once_with(
|
||||
self.context, volume_id, instance.uuid)
|
||||
|
||||
@mock.patch.object(cinder.API, 'get_snapshot',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
|
@ -4373,14 +4204,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.context,
|
||||
instance, instance_type, bdms)
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_validate_bdm_with_error_volume_new_flow(self, mock_attach_create,
|
||||
mock_get,
|
||||
mock_get_min_ver_all):
|
||||
mock_get):
|
||||
# Tests that an InvalidInput exception raised from
|
||||
# volume_api.attachment_create due to the volume status not being
|
||||
# 'available' results in _validate_bdm re-raising InvalidVolume.
|
||||
|
@ -4619,7 +4447,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
@mock.patch.object(cinder.API, 'get',
|
||||
return_value={'id': '1', 'multiattach': False})
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_provision_instances_with_error_volume(self,
|
||||
mock_cinder_check_av_zone,
|
||||
|
@ -4628,18 +4456,13 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self._test_provision_instances_with_cinder_error(
|
||||
expected_exception=exception.InvalidVolume)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
return_value={'id': '1', 'multiattach': False})
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_provision_instances_with_error_volume_new_flow(self,
|
||||
mock_cinder_check_av_zone, mock_attach_create, mock_get,
|
||||
mock_get_min_ver_cells, mock_get_min_ver):
|
||||
mock_cinder_check_av_zone, mock_attach_create, mock_get):
|
||||
self._test_provision_instances_with_cinder_error(
|
||||
expected_exception=exception.InvalidVolume)
|
||||
|
||||
|
@ -4857,115 +4680,6 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
self.assertEqual(ctxt.user_id, inst_mapping_mock.user_id)
|
||||
do_test()
|
||||
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=
|
||||
compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION - 1)
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
return_value={'id': '1', 'multiattach': False})
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone',)
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
side_effect=(None, exception.InvalidInput(reason='error')))
|
||||
def test_provision_instances_cleans_up_when_volume_invalid(self,
|
||||
_mock_cinder_reserve_volume,
|
||||
_mock_cinder_check_availability_zone, _mock_cinder_get,
|
||||
_mock_get_min_ver_cells):
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_create_reqspec_buildreq_instmapping')
|
||||
@mock.patch('nova.compute.utils.check_num_instances_quota')
|
||||
@mock.patch.object(objects, 'Instance')
|
||||
@mock.patch.object(self.compute_api.security_group_api,
|
||||
'ensure_default')
|
||||
@mock.patch.object(self.compute_api, '_create_block_device_mapping')
|
||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||
@mock.patch.object(objects, 'BuildRequest')
|
||||
@mock.patch.object(objects, 'InstanceMapping')
|
||||
def do_test(mock_inst_mapping, mock_build_req,
|
||||
mock_req_spec_from_components, _mock_create_bdm,
|
||||
_mock_ensure_default, mock_inst, mock_check_num_inst_quota,
|
||||
mock_create_rs_br_im):
|
||||
|
||||
min_count = 1
|
||||
max_count = 2
|
||||
mock_check_num_inst_quota.return_value = 2
|
||||
req_spec_mock = mock.MagicMock()
|
||||
mock_req_spec_from_components.return_value = req_spec_mock
|
||||
inst_mocks = [mock.MagicMock() for i in range(max_count)]
|
||||
for inst_mock in inst_mocks:
|
||||
inst_mock.project_id = 'fake-project'
|
||||
mock_inst.side_effect = inst_mocks
|
||||
build_req_mocks = [mock.MagicMock() for i in range(max_count)]
|
||||
mock_build_req.side_effect = build_req_mocks
|
||||
inst_map_mocks = [mock.MagicMock() for i in range(max_count)]
|
||||
mock_inst_mapping.side_effect = inst_map_mocks
|
||||
|
||||
ctxt = context.RequestContext('fake-user', 'fake-project')
|
||||
flavor = self._create_flavor()
|
||||
boot_meta = {
|
||||
'id': 'fake-image-id',
|
||||
'properties': {'mappings': []},
|
||||
'status': 'fake-status',
|
||||
'location': 'far-away'}
|
||||
base_options = {'image_ref': 'fake-ref',
|
||||
'display_name': 'fake-name',
|
||||
'project_id': 'fake-project',
|
||||
'availability_zone': None,
|
||||
'metadata': {},
|
||||
'access_ip_v4': None,
|
||||
'access_ip_v6': None,
|
||||
'config_drive': None,
|
||||
'key_name': None,
|
||||
'reservation_id': None,
|
||||
'kernel_id': None,
|
||||
'ramdisk_id': None,
|
||||
'root_device_name': None,
|
||||
'user_data': None,
|
||||
'numa_topology': None,
|
||||
'pci_requests': None,
|
||||
'port_resource_requests': None}
|
||||
security_groups = {}
|
||||
block_device_mapping = objects.BlockDeviceMappingList(
|
||||
objects=[objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{
|
||||
'id': 1,
|
||||
'volume_id': 1,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_name': 'vda',
|
||||
'boot_index': 0,
|
||||
}))])
|
||||
shutdown_terminate = True
|
||||
instance_group = None
|
||||
check_server_group_quota = False
|
||||
filter_properties = {'scheduler_hints': None,
|
||||
'instance_type': flavor}
|
||||
tags = objects.TagList()
|
||||
trusted_certs = None
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.compute_api._provision_instances, ctxt,
|
||||
flavor, min_count, max_count, base_options,
|
||||
boot_meta, security_groups, block_device_mapping,
|
||||
shutdown_terminate, instance_group,
|
||||
check_server_group_quota, filter_properties,
|
||||
None, tags, trusted_certs, False)
|
||||
# First instance, build_req, mapping is created and destroyed
|
||||
mock_create_rs_br_im.assert_called_once_with(ctxt, req_spec_mock,
|
||||
build_req_mocks[0],
|
||||
inst_map_mocks[0])
|
||||
self.assertTrue(build_req_mocks[0].destroy.called)
|
||||
self.assertTrue(inst_map_mocks[0].destroy.called)
|
||||
# Second instance, build_req, mapping is not created nor destroyed
|
||||
self.assertFalse(inst_mocks[1].create.called)
|
||||
self.assertFalse(inst_mocks[1].destroy.called)
|
||||
self.assertFalse(build_req_mocks[1].destroy.called)
|
||||
self.assertFalse(inst_map_mocks[1].destroy.called)
|
||||
|
||||
do_test()
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch.object(objects.service, 'get_minimum_version_all_cells',
|
||||
return_value=COMPUTE_VERSION_NEW_ATTACH_FLOW)
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
return_value={'id': '1', 'multiattach': False})
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone',)
|
||||
|
@ -4975,10 +4689,9 @@ class _ComputeAPIUnitTestMixIn(object):
|
|||
@mock.patch.object(objects.BlockDeviceMapping, 'save')
|
||||
def test_provision_instances_cleans_up_when_volume_invalid_new_flow(self,
|
||||
_mock_bdm, _mock_cinder_attach_create,
|
||||
_mock_cinder_check_availability_zone, _mock_cinder_get,
|
||||
_mock_get_min_ver_cells, _mock_get_min_ver):
|
||||
_mock_cinder_check_availability_zone, _mock_cinder_get):
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_create_reqspec_buildreq_instmapping')
|
||||
'_create_reqspec_buildreq_instmapping')
|
||||
@mock.patch('nova.compute.utils.check_num_instances_quota')
|
||||
@mock.patch.object(objects, 'Instance')
|
||||
@mock.patch.object(self.compute_api.security_group_api,
|
||||
|
@ -6570,14 +6283,10 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
|
|||
self.context, volume, instance, bdm,
|
||||
supports_multiattach=False)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_minimum_version',
|
||||
return_value=compute_api.CINDER_V3_ATTACH_MIN_COMPUTE_VERSION)
|
||||
@mock.patch('nova.volume.cinder.API.get',
|
||||
return_value={'id': uuids.volumeid, 'multiattach': True})
|
||||
@mock.patch('nova.volume.cinder.is_microversion_supported',
|
||||
return_value=None)
|
||||
def test_attach_volume_shelved_offloaded_fails(
|
||||
self, is_microversion_supported, volume_get, get_min_version):
|
||||
self, volume_get):
|
||||
"""Tests that trying to attach a multiattach volume to a shelved
|
||||
offloaded instance fails because it's not supported.
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue