fup: Move _wait_for_volume_attach into InstanceHelperMixin

As discussed in I332d4f33ea6b9506cc24ac12e5c0994f208a3107 this is useful
enough to be copied into InstanceHelperMixin.

Most of this change is migrating the original functional test that
introduced _wait_for_volume_attach over to use _IntegratedTestBase.

Change-Id: I345dce93c6e2593c1ff1b863425f9a854c49ab9d
This commit is contained in:
Lee Yarwood 2021-07-27 22:11:40 +01:00
parent 2209b0007f
commit 753a2ae5d4
4 changed files with 26 additions and 118 deletions

View File

@ -180,6 +180,24 @@ class InstanceHelperMixin:
'actions: %s. Events in the last matching action: %s' 'actions: %s. Events in the last matching action: %s'
% (event_name, actions, events)) % (event_name, actions, events))
def _wait_for_volume_attach(self, server_id, volume_id):
timeout = 0.0
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
while volume_id not in attached_vols and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
if volume_id not in attached_vols:
self.fail('Timed out waiting for volume %s to be attached to '
'server %s. Currently attached volumes: %s' %
(volume_id, server_id, attached_vols))
def _assert_resize_migrate_action_fail(self, server, action, error_in_tb): def _assert_resize_migrate_action_fail(self, server, action, error_in_tb):
"""Waits for the conductor_migrate_server action event to fail for """Waits for the conductor_migrate_server action event to fail for
the given action and asserts the error is in the event traceback. the given action and asserts the error is in the event traceback.

View File

@ -12,19 +12,15 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time
from oslo_log import log as logging from oslo_log import log as logging
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client from nova.tests.functional.api import client
from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class TestLocalDeleteAttachedVolumes(test.TestCase): class TestLocalDeleteAttachedVolumes(integrated_helpers._IntegratedTestBase):
"""Test local delete in the API of a server with a volume attached. """Test local delete in the API of a server with a volume attached.
This test creates a server, then shelve-offloads it, attaches a This test creates a server, then shelve-offloads it, attaches a
@ -38,89 +34,18 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
and destroy the related BlockDeviceMappings. and destroy the related BlockDeviceMappings.
""" """
microversion = 'latest'
def setUp(self): def setUp(self):
super(TestLocalDeleteAttachedVolumes, self).setUp() super().setUp()
self.useFixture(nova_fixtures.RealPolicyFixture())
# We need the CinderFixture to stub out the volume API.
self.cinder = self.useFixture(
nova_fixtures.CinderFixture(self))
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
# Use the PlacementFixture to avoid annoying warnings in the logs.
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# We want to use 2.37 for passing networks='none' on server create.
# We also need this since you can only attach a volume to a
# shelved-offloaded server in microversion 2.20+.
self.api.microversion = 'latest'
# the image fake backend needed for image discovery
self.useFixture(nova_fixtures.GlanceFixture(self))
self.start_service('conductor')
self.start_service('scheduler')
self.start_service('compute')
self.useFixture(nova_fixtures.CastAsCallFixture(self))
self.flavor_id = self.api.get_flavors()[0]['id'] self.flavor_id = self.api.get_flavors()[0]['id']
def _wait_for_instance_status(self, server_id, status):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != status and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != status:
self.fail('Timed out waiting for server %s to have status: %s. '
'Current status: %s' %
(server_id, status, server['status']))
return server
def _wait_for_instance_delete(self, server_id):
timeout = 0.0
while timeout < 10.0:
try:
server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
# the instance is gone so we're happy
return
else:
time.sleep(.1)
timeout += .1
self.fail('Timed out waiting for server %s to be deleted. '
'Current vm_state: %s. Current task_state: %s' %
(server_id, server['OS-EXT-STS:vm_state'],
server['OS-EXT-STS:task_state']))
def _delete_server(self, server): def _delete_server(self, server):
try: try:
self.api.delete_server(server['id']) self.api.delete_server(server['id'])
except client.OpenStackApiNotFoundException: except client.OpenStackApiNotFoundException:
pass pass
def _wait_for_volume_attach(self, server_id, volume_id):
timeout = 0.0
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
while volume_id not in attached_vols and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
if volume_id not in attached_vols:
self.fail('Timed out waiting for volume %s to be attached to '
'server %s. Currently attached volumes: %s' %
(volume_id, server_id, attached_vols))
def test_local_delete_with_volume_attached(self, mock_version_get=None): def test_local_delete_with_volume_attached(self, mock_version_get=None):
LOG.info('Creating server and waiting for it to be ACTIVE.') LOG.info('Creating server and waiting for it to be ACTIVE.')
server = dict( server = dict(
@ -133,12 +58,12 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
server = self.api.post_server({'server': server}) server = self.api.post_server({'server': server})
server_id = server['id'] server_id = server['id']
self.addCleanup(self._delete_server, server) self.addCleanup(self._delete_server, server)
self._wait_for_instance_status(server_id, 'ACTIVE') self._wait_for_state_change(server, 'ACTIVE')
LOG.info('Shelve-offloading server %s', server_id) LOG.info('Shelve-offloading server %s', server_id)
self.api.post_server_action(server_id, {'shelve': None}) self.api.post_server_action(server_id, {'shelve': None})
# Wait for the server to be offloaded. # Wait for the server to be offloaded.
self._wait_for_instance_status(server_id, 'SHELVED_OFFLOADED') self._wait_for_state_change(server, 'SHELVED_OFFLOADED')
volume_id = '9a695496-44aa-4404-b2cc-ccab2501f87e' volume_id = '9a695496-44aa-4404-b2cc-ccab2501f87e'
LOG.info('Attaching volume %s to server %s', volume_id, server_id) LOG.info('Attaching volume %s to server %s', volume_id, server_id)
@ -159,7 +84,7 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
LOG.info('Deleting shelved-offloaded server %s.', server_id) LOG.info('Deleting shelved-offloaded server %s.', server_id)
self._delete_server(server) self._delete_server(server)
# Now wait for the server to be gone. # Now wait for the server to be gone.
self._wait_for_instance_delete(server_id) self._wait_until_deleted(server)
LOG.info('Validating that volume %s was detached from server %s.', LOG.info('Validating that volume %s was detached from server %s.',
volume_id, server_id) volume_id, server_id)

View File

@ -13,7 +13,6 @@
# under the License. # under the License.
import mock import mock
import time
from nova import context from nova import context
from nova import exception from nova import exception
@ -34,26 +33,6 @@ class TestDuplicateVolAttachRace(integrated_helpers._IntegratedTestBase):
microversion = 'latest' microversion = 'latest'
# TODO(lyarwood): Copied from test_bug_1675570.py, move both into
# _IntegratedTestBase.
def _wait_for_volume_attach(self, server_id, volume_id):
timeout = 0.0
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
while volume_id not in attached_vols and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
if volume_id not in attached_vols:
self.fail('Timed out waiting for volume %s to be attached to '
'server %s. Currently attached volumes: %s' %
(volume_id, server_id, attached_vols))
def test_duplicate_volume_attach_race(self): def test_duplicate_volume_attach_race(self):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()

View File

@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import time
from nova.tests import fixtures as nova_fixtures from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers from nova.tests.functional import integrated_helpers
@ -19,18 +17,6 @@ from nova.tests.functional import integrated_helpers
class ConfigurableMaxDiskDevicesTest(integrated_helpers._IntegratedTestBase): class ConfigurableMaxDiskDevicesTest(integrated_helpers._IntegratedTestBase):
def _wait_for_volume_attach(self, server_id, volume_id):
for i in range(0, 100):
server = self.api.get_server(server_id)
attached_vols = [vol['id'] for vol in
server['os-extended-volumes:volumes_attached']]
if volume_id in attached_vols:
return
time.sleep(.1)
self.fail('Timed out waiting for volume %s to be attached to '
'server %s. Currently attached volumes: %s' %
(volume_id, server_id, attached_vols))
def test_boot_from_volume(self): def test_boot_from_volume(self):
# Set the maximum to 1 and boot from 1 volume. This should pass. # Set the maximum to 1 and boot from 1 volume. This should pass.
self.flags(max_disk_devices_to_attach=1, group='compute') self.flags(max_disk_devices_to_attach=1, group='compute')