Refactor test_server_shares: Mock in Base Class and trait verification
Mock the connect and disconnect methods in the base class to centralize and streamline calls. Additionally, create a reusable function to verify traits Manila is the OpenStack Shared Filesystems service. These series of patches implement changes required in Nova to allow the shares provided by Manila to be associated with and attached to instances using virtiofs. Implements: blueprint libvirt-virtiofs-attach-manila-shares Change-Id: If8b2263c86f4eb580426b1a7b3de28c85cf5af1a
This commit is contained in:
parent
73a7c1b295
commit
5e6474dc97
@ -28,8 +28,6 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from unittest import mock
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -59,6 +57,22 @@ class ServerSharesTestBase(base.ServersTestBase):
|
||||
|
||||
self.host = self.computes[self.compute].driver._host
|
||||
|
||||
self.mock_connect = self.useFixture(fixtures.MockPatch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume')).mock
|
||||
|
||||
self.mock_disconnect = self.useFixture(fixtures.MockPatch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume')).mock
|
||||
|
||||
self.mock_connect_ceph = self.useFixture(fixtures.MockPatch(
|
||||
'nova.virt.libvirt.volume.cephfs.LibvirtCEPHFSVolumeDriver'
|
||||
'.connect_volume')).mock
|
||||
|
||||
self.mock_disconnect_ceph = self.useFixture(fixtures.MockPatch(
|
||||
'nova.virt.libvirt.volume.cephfs.LibvirtCEPHFSVolumeDriver'
|
||||
'.disconnect_volume')).mock
|
||||
|
||||
def _get_xml(self, server):
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
@ -107,146 +121,111 @@ class ServerSharesTestBase(base.ServersTestBase):
|
||||
device_share_and_tag.append((device['share_id'], device['tag']))
|
||||
self.assertIn((share_id, tag), device_share_and_tag)
|
||||
|
||||
def _check_traits(self):
|
||||
expected_traits = {
|
||||
"COMPUTE_STORAGE_VIRTIO_FS", "COMPUTE_MEM_BACKING_FILE"
|
||||
}
|
||||
|
||||
traits = self._get_provider_traits(self.compute_rp_uuids[self.compute])
|
||||
|
||||
self.assertEqual(expected_traits, expected_traits.intersection(traits))
|
||||
|
||||
|
||||
class ServerSharesTest(ServerSharesTestBase):
|
||||
|
||||
def test_server_share_metadata(self):
|
||||
"""Verify that share metadata are available"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
self._check_traits()
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_metadata_with_tag(self):
|
||||
"""Verify that share metadata are available with the provided tag"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
self._check_traits()
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
self._start_server(server)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
self._start_server(server)
|
||||
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), tag)
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), tag)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, tag)
|
||||
return (server, share_id)
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, tag)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_fails_with_tag_already_used(self):
|
||||
"""Verify that share create fails if we use an already assigned tag"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
self._check_traits()
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
tag = 'mytag'
|
||||
self._attach_share(server, share_id, tag=tag)
|
||||
|
||||
share_id = '1457bd85-7e7f-4835-92de-47834e1516b5'
|
||||
tag = 'mytag'
|
||||
share_id = '1457bd85-7e7f-4835-92de-47834e1516b5'
|
||||
tag = 'mytag'
|
||||
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._attach_share,
|
||||
server,
|
||||
share_id,
|
||||
tag=tag
|
||||
)
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._attach_share,
|
||||
server,
|
||||
share_id,
|
||||
tag=tag
|
||||
)
|
||||
|
||||
self.assertEqual(409, exc.response.status_code)
|
||||
self.assertIn(
|
||||
"Share '1457bd85-7e7f-4835-92de-47834e1516b5' or "
|
||||
"tag 'mytag' already associated to this server.",
|
||||
str(exc.response.text),
|
||||
)
|
||||
self.assertEqual(409, exc.response.status_code)
|
||||
self.assertIn(
|
||||
"Share '1457bd85-7e7f-4835-92de-47834e1516b5' or "
|
||||
"tag 'mytag' already associated to this server.",
|
||||
str(exc.response.text),
|
||||
)
|
||||
|
||||
def test_server_cephfs_share_metadata(self):
|
||||
"""Verify that cephfs share metadata are available"""
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.cephfs.LibvirtCEPHFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.cephfs.LibvirtCEPHFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
# update the mock to call the cephfs fake values
|
||||
self.manila_fixture.mock_get.side_effect = (
|
||||
self.manila_fixture.fake_get_cephfs
|
||||
)
|
||||
self.manila_fixture.mock_get_access.side_effect = (
|
||||
self.manila_fixture.fake_get_access_cephfs
|
||||
)
|
||||
# update the mock to call the cephfs fake values
|
||||
self.manila_fixture.mock_get.side_effect = (
|
||||
self.manila_fixture.fake_get_cephfs
|
||||
)
|
||||
self.manila_fixture.mock_get_access.side_effect = (
|
||||
self.manila_fixture.fake_get_access_cephfs
|
||||
)
|
||||
|
||||
traits = self._get_provider_traits(
|
||||
self.compute_rp_uuids[self.compute])
|
||||
for trait in (
|
||||
'COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
self._check_traits()
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
# tag is the filesystem target directory.
|
||||
# if post /server/{server_id}/share was called without a specific
|
||||
# tag then the tag is the share id.
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_share_after_hard_reboot(self):
|
||||
"""Verify that share is still available after a reboot"""
|
||||
@ -260,52 +239,47 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
|
||||
def test_server_share_mount_failure(self):
|
||||
os.environ['OS_DEBUG'] = "true"
|
||||
traits = self._get_provider_traits(self.compute_rp_uuids[self.compute])
|
||||
for trait in ('COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
self._check_traits()
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self.mock_connect.side_effect = processutils.ProcessExecutionError
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
self._attach_share(server, share_id)
|
||||
response = self._get_share(server, share_id)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
|
||||
self.assertEqual(
|
||||
response["share_id"], "4b021746-d0eb-4031-92aa-23c3bec182cd"
|
||||
)
|
||||
self.assertEqual(response["status"], "inactive")
|
||||
self._attach_share(server, share_id)
|
||||
response = self._get_share(server, share_id)
|
||||
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova compute. This should not happen without the fixture and
|
||||
# the api should answer with a 201 status code.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._start_server,
|
||||
server
|
||||
)
|
||||
self.assertEqual(
|
||||
response["share_id"], "4b021746-d0eb-4031-92aa-23c3bec182cd"
|
||||
)
|
||||
self.assertEqual(response["status"], "inactive")
|
||||
|
||||
self.assertIn("nova.exception.ShareMountError", str(exc))
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova compute. This should not happen without the fixture and
|
||||
# the api should answer with a 201 status code.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._start_server,
|
||||
server
|
||||
)
|
||||
|
||||
log_out = self.stdlog.logger.output
|
||||
self.assertIn("nova.exception.ShareMountError", str(exc))
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
return (server, share_id)
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_start_fails_share_in_error(self):
|
||||
"""Ensure a server can not start if its attached share is in error
|
||||
@ -332,49 +306,41 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
|
||||
# Reboot to do another mount attempt and fix the error.
|
||||
# But the error is not fixed.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
self.mock_connect.side_effect = processutils.ProcessExecutionError
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova api. This should not happen without the fixture.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._reboot_server,
|
||||
server,
|
||||
hard=True
|
||||
)
|
||||
|
||||
# Here we are using CastAsCallFixture so we got an exception from
|
||||
# nova api. This should not happen without the fixture.
|
||||
exc = self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._reboot_server,
|
||||
server,
|
||||
hard=True
|
||||
)
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
log_out = self.stdlog.logger.output
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd mount error "
|
||||
"from server",
|
||||
log_out)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'error')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
|
||||
# Reboot to do another mount attempt and fix the error.
|
||||
# But the error is fixed now.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume',
|
||||
):
|
||||
self._reboot_server(server, hard=True)
|
||||
self.mock_connect.side_effect = None
|
||||
self._reboot_server(server, hard=True)
|
||||
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'active')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
sm = share_mapping.ShareMapping.get_by_instance_uuid_and_share_id(
|
||||
self.context, server['id'], share_id)
|
||||
self.assertEqual(sm.status, 'active')
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
|
||||
def test_detach_server_share_in_error(self):
|
||||
"""Ensure share can still be detached even if
|
||||
@ -385,52 +351,41 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
|
||||
# Simulate an attempt to detach that fail due to umount error.
|
||||
# In that case we should have an umount error.
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.disconnect_volume',
|
||||
side_effect=processutils.ProcessExecutionError
|
||||
):
|
||||
self.mock_disconnect.side_effect = processutils.ProcessExecutionError
|
||||
# Ensure we failed to umount he share
|
||||
log_out = self.stdlog.logger.output
|
||||
|
||||
# Ensure we failed to umount he share
|
||||
log_out = self.stdlog.logger.output
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd umount error",
|
||||
log_out)
|
||||
|
||||
self.assertIn(
|
||||
"Share id 4b021746-d0eb-4031-92aa-23c3bec182cd umount error",
|
||||
log_out)
|
||||
# We detach the share. As a consequence, we are leaking the share
|
||||
# mounted on the compute.
|
||||
self._detach_share(server, share_id)
|
||||
|
||||
# We detach the share. As a consequence, we are leaking the share
|
||||
# mounted on the compute.
|
||||
self._detach_share(server, share_id)
|
||||
# Share is removed so not anymore in the DB.
|
||||
self.assertRaises(
|
||||
exception.ShareNotFound,
|
||||
share_mapping.ShareMapping.get_by_instance_uuid_and_share_id,
|
||||
self.context,
|
||||
server['id'],
|
||||
share_id
|
||||
)
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
|
||||
# Share is removed so not anymore in the DB.
|
||||
self.assertRaises(
|
||||
exception.ShareNotFound,
|
||||
share_mapping.ShareMapping.get_by_instance_uuid_and_share_id,
|
||||
self.context,
|
||||
server['id'],
|
||||
share_id
|
||||
)
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'error')
|
||||
# Reboot the server to restart it without the share.
|
||||
self._reboot_server(server, hard=True)
|
||||
|
||||
# Reboot the server to restart it without the share.
|
||||
self._reboot_server(server, hard=True)
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
|
||||
self.instance = instance.Instance.get_by_uuid(
|
||||
self.context, server['id'])
|
||||
self.assertEqual(self.instance.vm_state, 'active')
|
||||
|
||||
@mock.patch('nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.disconnect_volume',
|
||||
side_effect=processutils.ProcessExecutionError)
|
||||
@mock.patch('nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
|
||||
'.connect_volume')
|
||||
def test_server_share_umount_failure(self, mock_mount, mock_umount):
|
||||
def test_server_share_umount_failure(self):
|
||||
self.mock_disconnect.side_effect = processutils.ProcessExecutionError
|
||||
os.environ['OS_DEBUG'] = "true"
|
||||
traits = self._get_provider_traits(self.compute_rp_uuids[self.compute])
|
||||
for trait in ('COMPUTE_STORAGE_VIRTIO_FS', 'COMPUTE_MEM_BACKING_FILE'):
|
||||
self.assertIn(trait, traits)
|
||||
self._check_traits()
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
@ -465,50 +420,36 @@ class ServerSharesTest(ServerSharesTestBase):
|
||||
return (server, share_id)
|
||||
|
||||
def test_server_resume_with_shares(self):
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._suspend_server,
|
||||
server,
|
||||
)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self.assertRaises(
|
||||
client.OpenStackApiException,
|
||||
self._suspend_server,
|
||||
server,
|
||||
)
|
||||
|
||||
def test_server_rescue_with_shares(self):
|
||||
with mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'disconnect_volume'
|
||||
), mock.patch(
|
||||
'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver.'
|
||||
'connect_volume'
|
||||
):
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
server = self._create_server(networks='auto')
|
||||
self._stop_server(server)
|
||||
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self._rescue_server(server)
|
||||
share_id = '4b021746-d0eb-4031-92aa-23c3bec182cd'
|
||||
self._attach_share(server, share_id)
|
||||
self._start_server(server)
|
||||
self._rescue_server(server)
|
||||
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
|
||||
self._unrescue_server(server)
|
||||
self._unrescue_server(server)
|
||||
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
self._assert_filesystem_tag(self._get_xml(server), share_id)
|
||||
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
self._assert_share_in_metadata(
|
||||
self._get_metadata_url(server), share_id, share_id)
|
||||
return (server, share_id)
|
||||
|
Loading…
x
Reference in New Issue
Block a user