Attach Manila shares via virtiofs (drivers and compute manager part)

This patch does multiple things:
1) Extends the virt interface to pass share_info down to the driver an
   update the signatures in all in tree drivers.
2) Extends the compute manager to pass the share_info to the driver at
   certain actions.
3) Implement the share_info handling in the libvirt driver.
3.1) Checks requirements for shares and reports the new capabilities
     COMPUTE_STORAGE_VIRTIO_FS and
     COMPUTE_MEM_BACKING_FILE to signal that.
3.2) Implements mount / unmount.
4) Changes the scheduler to request new capabilities.

Manila is the OpenStack Shared Filesystems service.
These series of patches implement changes required in Nova to allow
the shares provided by Manila to be associated with and attached to
instances using virtiofs.

Implements: blueprint libvirt-virtiofs-attach-manila-shares

Depends-On: https://review.opendev.org/c/openstack/os-traits/+/832769
Depends-On: https://review.opendev.org/c/openstack/manila/+/926491
Change-Id: I3a4bca7f03cfa9a1bc20f3f9937bb9b44fe2cde3
This commit is contained in:
René Ribaud
2022-03-10 14:18:47 +01:00
parent bee0a5c54a
commit 2f0fc21d2a
22 changed files with 1730 additions and 112 deletions

View File

@ -6032,6 +6032,14 @@ class API:
host_statuses[instance.uuid] = host_status
return host_statuses
def allow_share(self, context, instance, share_mapping):
self.compute_rpcapi.allow_share(
context, instance, share_mapping)
def deny_share(self, context, instance, share_mapping):
self.compute_rpcapi.deny_share(
context, instance, share_mapping)
def target_host_cell(fn):
"""Target a host-based function to a cell.

View File

@ -45,6 +45,7 @@ import eventlet.semaphore
import eventlet.timeout
import futurist
from keystoneauth1 import exceptions as keystone_exception
from openstack import exceptions as sdk_exc
import os_traits
from oslo_log import log as logging
import oslo_messaging as messaging
@ -91,6 +92,7 @@ from nova import safe_utils
from nova.scheduler.client import query
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova.share import manila
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
@ -618,7 +620,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='6.3')
target = messaging.Target(version='6.4')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@ -634,6 +636,7 @@ class ComputeManager(manager.Manager):
self.virtapi = ComputeVirtAPI(self)
self.network_api = neutron.API()
self.volume_api = cinder.API()
self.manila_api = manila.API()
self.image_api = glance.API()
self._last_bw_usage_poll = 0.0
self.compute_api = compute.API()
@ -3100,11 +3103,14 @@ class ComputeManager(manager.Manager):
return timeout, retry_interval
def _power_off_instance(self, instance, clean_shutdown=True):
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
share_info = self._get_share_info(context, instance)
timeout, retry_interval = self._get_power_off_values(
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
share_info.deactivate_all()
self._umount_all_shares(context, instance, share_info)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
@ -3366,6 +3372,7 @@ class ComputeManager(manager.Manager):
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
@ -3397,7 +3404,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.START)
self._power_off_instance(instance, clean_shutdown)
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
@ -3416,9 +3423,14 @@ class ComputeManager(manager.Manager):
block_device_info = self._get_instance_block_device_info(context,
instance)
accel_info = self._get_accel_info(context, instance)
share_info = self._get_share_info(context, instance)
self._mount_all_shares(context, instance, share_info)
self.driver.power_on(context, instance,
network_info,
block_device_info, accel_info)
block_device_info, accel_info, share_info)
share_info.activate_all()
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
@ -3507,7 +3519,7 @@ class ComputeManager(manager.Manager):
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
self.driver.power_off(context, instance)
instance.power_state = self._get_power_state(instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
@ -3682,7 +3694,7 @@ class ComputeManager(manager.Manager):
detach_block_devices(context, bdms,
detach_root_bdm=detach_root_bdm)
else:
self._power_off_instance(instance, clean_shutdown=True)
self._power_off_instance(context, instance, clean_shutdown=True)
detach_block_devices(context, bdms,
detach_root_bdm=detach_root_bdm)
if reimage_boot_volume:
@ -4261,6 +4273,47 @@ class ComputeManager(manager.Manager):
for bdm in bdms_to_delete:
bdms.objects.remove(bdm)
def _get_share_info(self, context, instance):
share_info = objects.ShareMappingList(context)
for share_mapping in objects.ShareMappingList.get_by_instance_uuid(
context, instance.uuid
):
if (
share_mapping.status == fields.ShareMappingStatus.ATTACHING or
share_mapping.status == fields.ShareMappingStatus.DETACHING
):
# If the share status is attaching it means we are racing with
# the compute node. The mount is not completed yet or something
# really bad happened. So we set the instance in error state.
LOG.error(
"Share id '%s' attached to server id '%s' is "
"still in '%s' state. Setting the instance "
"in error.",
share_mapping.share_id,
instance.id,
share_mapping.status,
)
self._set_instance_obj_error_state(
instance, clean_task_state=True
)
raise exception.ShareErrorUnexpectedStatus(
share_id=share_mapping.share_id,
instance_uuid=instance.id,
)
if share_mapping.status == fields.ShareMappingStatus.ERROR:
LOG.warning(
"Share id '%s' attached to server id '%s' is in "
"error state.",
share_mapping.share_id,
instance.id
)
share_info.objects.append(share_mapping)
return share_info
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@ -4531,6 +4584,249 @@ class ComputeManager(manager.Manager):
LOG.debug('Instance disappeared during volume snapshot delete',
instance=instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def allow_share(self, context, instance, share_mapping):
@utils.synchronized(share_mapping.share_id)
def _allow_share(context, instance, share_mapping):
def _has_access():
access = self.manila_api.get_access(
context,
share_mapping.share_id,
access_type,
access_to
)
return access is not None and access.state == 'active'
def _apply_policy():
# self.manila_api.lock(share_mapping.share_id)
# Explicitly locking the share is not needed as
# create_access_rule() from the sdk will do it if the
# lock_visibility and lock_deletion flags are passed
self.manila_api.allow(
context,
share_mapping.share_id,
access_type,
access_to,
"rw",
)
def _wait_policy_to_be_applied():
# Ensure the share policy is updated, this will avoid
# a race condition mounting the share if it is not the case.
max_retries = CONF.manila.share_apply_policy_timeout
attempt_count = 0
while attempt_count < max_retries:
if _has_access():
LOG.debug(
"Allow policy set on share %s ",
share_mapping.share_id,
)
break
else:
LOG.debug(
"Waiting policy to be set on share %s ",
share_mapping.share_id,
)
time.sleep(1)
attempt_count += 1
if attempt_count >= max_retries:
raise exception.ShareAccessGrantError(
share_id=share_mapping.share_id,
reason="Failed to set allow policy on share, "
"too many retries",
)
try:
access_type = 'ip'
access_to = CONF.my_shared_fs_storage_ip
if not _has_access():
_apply_policy()
_wait_policy_to_be_applied()
# Set the share from attaching to inactive
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.INACTIVE
)
except (
exception.ShareNotFound,
exception.ShareAccessGrantError,
) as e:
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.ERROR
)
LOG.error(e.format_message())
raise
except (
sdk_exc.BadRequestException,
) as e:
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.ERROR
)
LOG.error(
"%s: %s error from url: %s, %s",
e.message,
e.source,
e.url,
e.details,
)
raise
except keystone_exception.http.Unauthorized as e:
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.ERROR
)
LOG.error(e)
raise
_allow_share(context, instance, share_mapping)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def deny_share(self, context, instance, share_mapping):
@utils.synchronized(share_mapping.share_id)
def _deny_share(context, instance, share_mapping):
def check_share_usage(context, instance_uuid):
share_mappings_used_by_share = (
objects.share_mapping.ShareMappingList.get_by_share_id(
context, share_mapping.share_id
)
)
# Logic explanation:
#
# Warning: Here we have a list of share_mapping using our
# share (usually share_mappings is a list of share_mapping used
# by an instance).
# A share IS NOT used (detachable) if:
# - The share status is INACTIVE or ERROR on our instance.
# - The share status is DETACHING on all other instances.
# +-- reverse the logic as the function check if a share
# | IS used.
# v
return not all(
(
(
sm.instance_uuid == instance_uuid and
(
sm.status
in (
fields.ShareMappingStatus.INACTIVE,
fields.ShareMappingStatus.ERROR,
)
)
) or
sm.status == fields.ShareMappingStatus.DETACHING
)
for sm in share_mappings_used_by_share
)
try:
still_used = check_share_usage(
context, instance.uuid
)
access_type = 'ip'
access_to = CONF.my_shared_fs_storage_ip
if not still_used:
# self.manila_api.unlock(share_mapping.share_id)
# Explicit unlocking the share is not needed as
# delete_access_rule() from the sdk will do it if the
# "unrestrict" parameter is passed
self.manila_api.deny(
context,
share_mapping.share_id,
access_type,
access_to,
)
share_mapping.delete()
except exception.ShareAccessRemovalError as e:
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.ERROR
)
LOG.error(e.format_message())
raise
except keystone_exception.http.Unauthorized as e:
self._set_share_mapping_status(
share_mapping, fields.ShareMappingStatus.ERROR
)
LOG.error(e)
raise
except (exception.ShareNotFound, exception.ShareAccessNotFound):
# Ignore the error if for any reason there is nothing to
# remove from manila, so we can still detach the share.
share_mapping.delete()
_deny_share(context, instance, share_mapping)
@wrap_exception()
def _mount_all_shares(self, context, instance, share_info):
for share_mapping in share_info:
self._mount_share(context, instance, share_mapping)
@wrap_exception()
def _umount_all_shares(self, context, instance, share_info):
for share_mapping in share_info:
self._umount_share(context, instance, share_mapping)
@wrap_exception()
def _mount_share(self, context, instance, share_mapping):
@utils.synchronized(share_mapping.share_id)
def _mount_share(context, instance, share_mapping):
try:
LOG.debug("Mounting share %s", share_mapping.share_id)
self.driver.mount_share(context, instance, share_mapping)
except (
exception.ShareMountError,
) as e:
self._set_share_mapping_and_instance_in_error(
instance, share_mapping
)
LOG.error(e.format_message())
raise
_mount_share(context, instance, share_mapping)
@wrap_exception()
def _umount_share(self, context, instance, share_mapping):
@utils.synchronized(share_mapping.share_id)
def _umount_share(context, instance, share_mapping):
try:
self.driver.umount_share(context, instance, share_mapping)
except (
exception.ShareUmountError,
) as e:
self._set_share_mapping_and_instance_in_error(
instance, share_mapping
)
LOG.error(e.format_message())
raise
_umount_share(context, instance, share_mapping)
def _set_share_mapping_status(self, share_mapping, status):
share_mapping.status = status
share_mapping.save()
def _set_share_mapping_and_instance_in_error(
self, instance, share_mapping
):
share_mapping.status = fields.ShareMappingStatus.ERROR
share_mapping.save()
self._set_instance_obj_error_state(
instance, clean_task_state=True
)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
@ -4705,7 +5001,7 @@ class ComputeManager(manager.Manager):
phase=fields.NotificationPhase.START)
try:
self._power_off_instance(instance, clean_shutdown)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance, network_info,
rescue_image_meta, admin_password,
@ -5995,7 +6291,7 @@ class ComputeManager(manager.Manager):
# potentially running in two places.
LOG.debug('Stopping instance', instance=instance)
try:
self._power_off_instance(instance)
self._power_off_instance(ctxt, instance)
except Exception as e:
LOG.exception('Failed to power off instance.', instance=instance)
raise exception.InstancePowerOffFailure(reason=str(e))
@ -6944,7 +7240,7 @@ class ComputeManager(manager.Manager):
# running.
if instance.power_state == power_state.PAUSED:
clean_shutdown = False
self._power_off_instance(instance, clean_shutdown)
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat()
@ -7006,7 +7302,7 @@ class ComputeManager(manager.Manager):
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.START, bdms=bdms)
self._power_off_instance(instance, clean_shutdown)
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
@ -10784,7 +11080,7 @@ class ComputeManager(manager.Manager):
"DELETED but still present on host.",
instance.name, instance=instance)
try:
self.driver.power_off(instance)
self.driver.power_off(context, instance)
except Exception:
LOG.warning("Failed to power off instance",
instance=instance, exc_info=True)

View File

@ -405,6 +405,7 @@ class ComputeAPI(object):
* 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
* 6.2 - Add target_state parameter to rebuild_instance()
* 6.3 - Add delete_attachment parameter to remove_volume_connection
* 6.4 - Add allow_share() and deny_share()
'''
VERSION_ALIASES = {
@ -1477,6 +1478,38 @@ class ComputeAPI(object):
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
def allow_share(self, ctxt, instance, share_mapping):
version = '6.4'
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.UnsupportedRPCVersion(
api="allow_share",
required=version)
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(
ctxt,
"allow_share",
instance=instance,
share_mapping=share_mapping
)
def deny_share(self, ctxt, instance, share_mapping):
version = '6.4'
client = self.router.client(ctxt)
if not client.can_send_version(version):
raise exception.UnsupportedRPCVersion(
api="deny_share",
required=version)
cctxt = self.router.client(ctxt).prepare(
server=_compute_host(None, instance), version=version)
cctxt.cast(
ctxt,
"deny_share",
instance=instance,
share_mapping=share_mapping,
)
def external_instance_event(self, ctxt, instances, events, host=None):
instance = instances[0]
version = self._ver(ctxt, '5.0')

View File

@ -35,6 +35,7 @@ Possible values:
Related options:
* my_block_storage_ip
* my_shared_fs_storage_ip
"""),
cfg.StrOpt("my_block_storage_ip",
default="$my_ip",
@ -48,6 +49,20 @@ Possible values:
Related options:
* my_ip - if my_block_storage_ip is not set, then my_ip value is used.
"""),
cfg.StrOpt("my_shared_fs_storage_ip",
default="$my_ip",
help="""
The IP address which is used to connect to the shared_fs storage (manila)
network.
Possible values:
* String with valid IP address. Default is IP address of this host.
Related options:
* my_ip - if my_shared_fs_storage_ip is not set, then my_ip value is used.
"""),
cfg.HostDomainOpt("host",
default=socket.gethostname(),

View File

@ -716,6 +716,16 @@ class ShareProtocolUnknown(NotFound):
msg_fmt = _("Share protocol %(share_proto)s is unknown.")
class ShareError(NovaException):
msg_fmt = _("Share %(share_id)s used by instance %(instance_uuid)s "
"is in error state.")
class ShareErrorUnexpectedStatus(NovaException):
msg_fmt = _("Share %(share_id)s used by instance %(instance_uuid)s "
"is in an unexpected state.")
class ShareUmountError(NovaException):
msg_fmt = _("Share id %(share_id)s umount error "
"from server %(server_id)s.\n"

View File

@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
SERVICE_VERSION = 67
SERVICE_VERSION = 68
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@ -234,7 +234,9 @@ SERVICE_VERSION_HISTORY = (
# Version 67: Compute RPC v6.3:
# Add delete_attachment parameter to remove_volume_connection()
{'compute_rpc': '6.3'},
# Version 68: Compute RPC v6.4:
# Add support for shares
{'compute_rpc': '6.4'},
)
# This is the version after which we can rely on having a persistent

View File

@ -1050,7 +1050,7 @@ class NodeDevice(object):
def attach(self):
pass
def dettach(self):
def detach(self):
pass
def reset(self):

View File

@ -48,6 +48,8 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
os_traits.COMPUTE_ADDRESS_SPACE_EMULATED,
os_traits.COMPUTE_ADDRESS_SPACE_PASSTHROUGH,
os_traits.COMPUTE_SECURITY_STATELESS_FIRMWARE,
os_traits.COMPUTE_STORAGE_VIRTIO_FS,
os_traits.COMPUTE_MEM_BACKING_FILE,
]
])

View File

@ -8659,3 +8659,52 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertRaises(
exception.ServiceUnavailable,
self.compute_api.detach_volume, self.context, instance, None)
def get_fake_share_mapping(self):
share_mapping = objects.ShareMapping(self.context)
share_mapping.uuid = uuids.share_mapping
share_mapping.instance_uuid = uuids.instance
share_mapping.share_id = uuids.share
share_mapping.status = 'inactive'
share_mapping.tag = 'fake_tag'
share_mapping.export_location = '192.168.122.152:/manila/share'
share_mapping.share_proto = 'NFS'
return share_mapping
@mock.patch('oslo_messaging.rpc.client._BaseCallContext.cast')
def test_allow_share(self, mock_cast):
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.STOPPED))
self.assertEqual(instance.vm_state, vm_states.STOPPED)
self.assertIsNone(instance.task_state)
share_mapping = self.get_fake_share_mapping()
self.compute_api.allow_share(
self.context, instance, share_mapping=share_mapping
)
mock_cast.assert_called_with(
self.context,
'allow_share',
instance=instance,
share_mapping=share_mapping)
@mock.patch('oslo_messaging.rpc.client._BaseCallContext.cast')
def test_deny_share(self, mock_cast):
instance = self._create_instance_obj(
params=dict(vm_state=vm_states.STOPPED))
self.assertEqual(instance.vm_state, vm_states.STOPPED)
self.assertIsNone(instance.task_state)
share_mapping = self.get_fake_share_mapping()
self.compute_api.deny_share(
self.context, instance, share_mapping=share_mapping
)
mock_cast.assert_called_with(
self.context,
'deny_share',
instance=instance,
share_mapping=share_mapping)

View File

@ -1655,6 +1655,26 @@ class ComputeTestCase(BaseTestCase,
'm1.small')
self.tiny_flavor = objects.Flavor.get_by_name(self.context, 'm1.tiny')
def fake_share_info(self):
share_mapping = {}
share_mapping['id'] = 1
share_mapping['created_at'] = None
share_mapping['updated_at'] = None
share_mapping['uuid'] = uuids.share_mapping
share_mapping['instance_uuid'] = uuids.instance
share_mapping['share_id'] = uuids.share
share_mapping['status'] = 'inactive'
share_mapping['tag'] = 'fake_tag'
share_mapping['export_location'] = 'fake_export_location'
share_mapping['share_proto'] = 'NFS'
share_info = objects.base.obj_make_list(
self.context,
objects.ShareMappingList(self.context),
objects.ShareMapping,
[share_mapping])
return share_info
def test_wrap_instance_fault(self):
inst = {"uuid": uuids.instance}
@ -2625,7 +2645,8 @@ class ComputeTestCase(BaseTestCase,
called = {'power_on': False}
def fake_driver_power_on(self, context, instance, network_info,
block_device_info, accel_device_info=None):
block_device_info, accel_device_info=None,
share_info=None):
called['power_on'] = True
self.stub_out('nova.virt.fake.FakeDriver.power_on',
@ -2644,13 +2665,76 @@ class ComputeTestCase(BaseTestCase,
self.assertTrue(called['power_on'])
self.compute.terminate_instance(self.context, inst_obj, [])
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch.object(fake.FakeDriver, 'power_on')
@mock.patch('nova.objects.share_mapping.ShareMapping.activate')
@mock.patch('nova.compute.manager.ComputeManager._mount_share')
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
def test_power_on_with_share(self, mock_share, mock_mount, mock_activate,
mock_power_on, mock_nw_info, mock_blockdev):
instance = self._create_fake_instance_obj()
share_info = self.fake_share_info()
mock_share.return_value = share_info
mock_nw_info.return_value = 'nw_info'
mock_blockdev.return_value = 'blockdev_info'
self.compute._power_on(self.context, instance)
mock_share.assert_called_once_with(self.context, instance)
mock_power_on.assert_called_once_with(
self.context,
instance,
'nw_info',
'blockdev_info',
[],
share_info
)
mock_mount.assert_called_once_with(
self.context, instance, share_info[0]
)
mock_activate.assert_called_once()
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch.object(fake.FakeDriver, 'power_on')
@mock.patch('nova.objects.share_mapping.ShareMapping.activate')
@mock.patch('nova.compute.manager.ComputeManager._mount_share')
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
def test_power_on_with_no_share(self, mock_shares, mock_mount,
mock_activate, mock_power_on, mock_nw_info, mock_blockdev):
instance = self._create_fake_instance_obj()
share_info = objects.ShareMappingList()
mock_shares.return_value = share_info
mock_nw_info.return_value = 'nw_info'
mock_blockdev.return_value = 'blockdev_info'
self.compute._power_on(self.context, instance)
mock_shares.assert_called_once_with(self.context, instance)
mock_power_on.assert_called_once_with(
self.context,
instance,
'nw_info',
'blockdev_info',
[],
share_info
)
mock_mount.assert_not_called()
mock_activate.assert_not_called()
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch.object(fake.FakeDriver, 'power_on')
@mock.patch('nova.accelerator.cyborg._CyborgClient.get_arqs_for_instance')
def test_power_on_with_accels(self, mock_get_arqs,
mock_power_on, mock_nw_info, mock_blockdev):
mock_power_on, mock_nw_info, mock_blockdev, mock_shares):
share_info = objects.ShareMappingList()
mock_shares.return_value = share_info
instance = self._create_fake_instance_obj()
instance.flavor.extra_specs = {'accel:device_profile': 'mydp'}
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
@ -2660,8 +2744,14 @@ class ComputeTestCase(BaseTestCase,
self.compute._power_on(self.context, instance)
mock_get_arqs.assert_called_once_with(instance['uuid'])
mock_power_on.assert_called_once_with(self.context,
instance, 'nw_info', 'blockdev_info', accel_info)
mock_power_on.assert_called_once_with(
self.context,
instance,
'nw_info',
'blockdev_info',
accel_info,
share_info
)
def test_power_off(self):
# Ensure instance can be powered off.
@ -2689,6 +2779,60 @@ class ComputeTestCase(BaseTestCase,
self.assertTrue(called['power_off'])
self.compute.terminate_instance(self.context, inst_obj, [])
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch.object(fake.FakeDriver, 'power_off')
@mock.patch('nova.objects.share_mapping.ShareMapping.deactivate')
@mock.patch('nova.compute.manager.ComputeManager._umount_share')
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
def test_power_off_with_share(self, mock_share, mock_umount,
mock_deactivate, mock_power_off, mock_nw_info, mock_blockdev):
instance = self._create_fake_instance_obj()
share_info = self.fake_share_info()
mock_share.return_value = share_info
mock_nw_info.return_value = 'nw_info'
mock_blockdev.return_value = 'blockdev_info'
self.compute._power_off_instance(self.context, instance)
mock_share.assert_called_once_with(self.context, instance)
mock_power_off.assert_called_once_with(
instance,
60,
10
)
mock_umount.assert_called_once_with(
self.context, instance, share_info[0]
)
mock_deactivate.assert_called_once()
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch.object(fake.FakeDriver, 'power_off')
@mock.patch('nova.objects.share_mapping.ShareMapping.deactivate')
@mock.patch('nova.compute.manager.ComputeManager._umount_share')
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
def test_power_off_with_no_share(self, mock_share, mock_umount,
mock_deactivate, mock_power_off, mock_nw_info, mock_blockdev):
instance = self._create_fake_instance_obj()
share_info = objects.ShareMappingList()
mock_share.return_value = share_info
mock_nw_info.return_value = 'nw_info'
mock_blockdev.return_value = 'blockdev_info'
self.compute._power_off_instance(self.context, instance)
mock_share.assert_called_once_with(self.context, instance)
mock_power_off.assert_called_once_with(
instance,
60,
10
)
mock_umount.assert_not_called()
mock_deactivate.assert_not_called()
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch.object(nova.context.RequestContext, 'elevated')
def test_pause(self, mock_context, mock_notify):
@ -7113,7 +7257,8 @@ class ComputeTestCase(BaseTestCase,
mock_get.assert_called_once_with(ctxt,
{'deleted': True,
'soft_deleted': False})
mock_power.assert_has_calls([mock.call(inst1), mock.call(inst2)])
mock_power.assert_has_calls(
[mock.call(ctxt, inst1), mock.call(ctxt, inst2)])
@mock.patch.object(compute_manager.ComputeManager,
'_get_instances_on_driver')
@ -7131,7 +7276,8 @@ class ComputeTestCase(BaseTestCase,
mock_get.assert_called_once_with(ctxt,
{'deleted': True,
'soft_deleted': False})
mock_power.assert_has_calls([mock.call(inst1), mock.call(inst2)])
mock_power.assert_has_calls(
[mock.call(ctxt, inst1), mock.call(ctxt, inst2)])
@mock.patch.object(compute_manager.ComputeManager,
'_get_instances_on_driver')

View File

@ -26,6 +26,7 @@ from eventlet import event as eventlet_event
from eventlet import timeout as eventlet_timeout
from keystoneauth1 import exceptions as keystone_exception
import netaddr
from openstack import exceptions as sdk_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@ -116,6 +117,63 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
}
}
def get_fake_share_mapping(self):
share_mapping = objects.ShareMapping(self.context)
share_mapping.uuid = uuids.uuid
share_mapping.instance_uuid = uuids.instance
share_mapping.share_id = uuids.share_id
share_mapping.status = 'inactive'
share_mapping.tag = 'fake_tag'
share_mapping.export_location = '192.168.122.152:/manila/share'
share_mapping.share_proto = 'NFS'
return share_mapping
def get_fake_share_access(self):
access = {
"access_level": "rw",
"state": "active",
"id": "507bf114-36f2-4f56-8cf4-857985ca87c1",
"access_type": "ip",
"access_to": "192.168.0.1",
"access_key": None,
}
return nova.share.manila.Access.from_dict(access)
def fake_share_info(self):
share_mapping = {}
share_mapping['id'] = 1
share_mapping['created_at'] = None
share_mapping['updated_at'] = None
share_mapping['uuid'] = uuids.share_mapping
share_mapping['instance_uuid'] = (
'386dbea6-0338-4104-8eb9-42b214b40311')
share_mapping['share_id'] = '232a4b40-306b-4cce-8bf4-689d2e671552'
share_mapping['status'] = 'inactive'
share_mapping['tag'] = 'fake_tag'
share_mapping['export_location'] = 'fake_export_location'
share_mapping['share_proto'] = 'NFS'
# This share_mapping status is error so it should be filtered
share_mapping2 = {}
share_mapping2['id'] = 2
share_mapping2['created_at'] = None
share_mapping2['updated_at'] = None
share_mapping2['uuid'] = uuids.share_mapping2
share_mapping2['instance_uuid'] = (
'386dbea6-0338-4104-8eb9-42b214b40312')
share_mapping2['share_id'] = '232a4b40-306b-4cce-8bf4-689d2e671553'
share_mapping2['status'] = 'error'
share_mapping2['tag'] = 'fake_tag2'
share_mapping2['export_location'] = 'fake_export_location2'
share_mapping2['share_proto'] = 'NFS'
share_info = objects.base.obj_make_list(
self.context,
objects.ShareMappingList(self.context),
objects.ShareMapping,
[share_mapping, share_mapping2])
return share_info
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(manager.ComputeManager, '_sync_instance_power_state')
@mock.patch.object(objects.Instance, 'get_by_uuid')
@ -2117,22 +2175,632 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager._get_share_info')
@mock.patch('nova.virt.fake.FakeDriver.power_off')
@mock.patch.object(compute_utils, 'get_value_from_system_metadata',
return_value=CONF.shutdown_timeout)
def test_power_off_values(self, mock_get_metadata, mock_power_off):
def test_power_off_values(
self, mock_get_metadata, mock_power_off, mock_share
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
self.compute._power_off_instance(instance, clean_shutdown=True)
self.compute._power_off_instance(
self.context, instance, clean_shutdown=True
)
mock_power_off.assert_called_once_with(
instance,
CONF.shutdown_timeout,
20)
@mock.patch('nova.objects.ShareMappingList.get_by_instance_uuid')
def test_get_share_info(self, mock_db):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db.return_value = self.fake_share_info()
# Delete the second share_mapping which is in error state
del mock_db.return_value.objects[1]
share_info = self.compute._get_share_info(self.context, instance)
self.assertIsInstance(
share_info, objects.share_mapping.ShareMappingList)
self.assertEqual(len(share_info), 1)
self.assertIsInstance(
share_info[0], objects.share_mapping.ShareMapping)
self.assertEqual(share_info[0].id, 1)
self.assertEqual(
share_info[0].instance_uuid,
'386dbea6-0338-4104-8eb9-42b214b40311')
self.assertEqual(
share_info[0].share_id, '232a4b40-306b-4cce-8bf4-689d2e671552')
self.assertEqual(share_info[0].status, 'inactive')
self.assertEqual(share_info[0].tag, 'fake_tag')
self.assertEqual(share_info[0].export_location, 'fake_export_location')
self.assertEqual(share_info[0].share_proto, 'NFS')
@mock.patch('nova.compute.manager.LOG', autospec=True)
@mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.ShareMappingList.get_by_instance_uuid')
def test_get_share_info_warning_share_mapping_in_error(
self, mock_db, mock_instance, mock_log
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db.return_value = self.fake_share_info()
output = self.compute._get_share_info(self.context, instance)
mock_log.warning.assert_called_once_with(
"Share id '%s' attached to server id '%s' is in " "error state.",
"232a4b40-306b-4cce-8bf4-689d2e671553",
instance.id,
)
self.assertEqual(
output[0].share_id, "232a4b40-306b-4cce-8bf4-689d2e671552"
)
self.assertEqual(
output[1].share_id, "232a4b40-306b-4cce-8bf4-689d2e671553"
)
@mock.patch('nova.compute.manager.LOG', autospec=True)
@mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.ShareMappingList.get_by_instance_uuid')
def test_get_share_info_fails_share_mapping_in_attaching(
self, mock_db, mock_instance, mock_log
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
share_mappings = self.fake_share_info()
share_mappings.objects[0].status = "attaching"
mock_db.return_value = share_mappings
self.assertRaises(
exception.ShareErrorUnexpectedStatus,
self.compute._get_share_info,
self.context,
instance
)
mock_log.error.assert_called_once_with(
"Share id '%s' attached to server id '%s' is "
"still in '%s' state. Setting the instance "
"in error.",
share_mappings.objects[0].share_id,
instance.id,
"attaching",
)
@mock.patch('nova.compute.manager.LOG', autospec=True)
@mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.ShareMappingList.get_by_instance_uuid')
def test_get_share_info_fails_share_mapping_in_detaching(
self, mock_db, mock_instance, mock_log
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
share_mappings = self.fake_share_info()
share_mappings.objects[0].status = "detaching"
mock_db.return_value = share_mappings
self.assertRaises(
exception.ShareErrorUnexpectedStatus,
self.compute._get_share_info,
self.context,
instance
)
mock_log.error.assert_called_once_with(
"Share id '%s' attached to server id '%s' is "
"still in '%s' state. Setting the instance "
"in error.",
share_mappings.objects[0].share_id,
instance.id,
"detaching",
)
@mock.patch('nova.share.manila.API.allow')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
def test_allow_share(
self, mock_db, mock_get_access, mock_allow
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_get_access.side_effect = [None, self.get_fake_share_access()]
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
share_mapping = self.get_fake_share_mapping()
self.compute.allow_share(self.context, instance, share_mapping)
mock_get_access.assert_called_with(
self.context, share_mapping.share_id, 'ip', compute_ip)
mock_allow.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip, 'rw')
@mock.patch('nova.share.manila.API.allow')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
def test_allow_share_fails_share_not_found(
self, mock_db, mock_get_access, mock_allow
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_get_access.side_effect = [None, self.get_fake_share_access()]
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
share_mapping = self.get_fake_share_mapping()
mock_allow.side_effect = exception.ShareNotFound(
share_id=share_mapping.share_id
)
self.assertRaises(
exception.ShareNotFound,
self.compute.allow_share,
self.context,
instance,
share_mapping
)
mock_get_access.assert_called_with(
self.context, share_mapping.share_id, 'ip', compute_ip)
mock_allow.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip, 'rw')
@mock.patch('nova.share.manila.API.allow')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
def test_allow_share_fails_share_access_grant_error(
self, mock_db, mock_get_access, mock_allow
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_get_access.side_effect = [None, self.get_fake_share_access()]
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
share_mapping = self.get_fake_share_mapping()
mock_allow.side_effect = exception.ShareAccessGrantError(
share_id=share_mapping.share_id,
reason="fake_reason"
)
self.assertRaises(
exception.ShareAccessGrantError,
self.compute.allow_share,
self.context,
instance,
share_mapping
)
mock_get_access.assert_called_with(
self.context, share_mapping.share_id, 'ip', compute_ip)
mock_allow.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip, 'rw')
@mock.patch('nova.share.manila.API.allow')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
def test_allow_share_fails_bad_request_exception(
self, mock_db, mock_get_access, mock_allow
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_get_access.side_effect = [None, self.get_fake_share_access()]
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
share_mapping = self.get_fake_share_mapping()
mock_allow.side_effect = sdk_exc.BadRequestException()
self.assertRaises(
sdk_exc.BadRequestException,
self.compute.allow_share,
self.context,
instance,
share_mapping
)
mock_get_access.assert_called_with(
self.context, share_mapping.share_id, 'ip', compute_ip)
mock_allow.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip, 'rw')
@mock.patch('nova.share.manila.API.allow')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
def test_allow_share_fails_keystone_exception(
self, mock_db, mock_get_access, mock_allow
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_get_access.side_effect = [None, self.get_fake_share_access()]
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
share_mapping = self.get_fake_share_mapping()
mock_allow.side_effect = keystone_exception.http.Unauthorized(
message="Unauthorized"
)
self.assertRaises(
keystone_exception.http.Unauthorized,
self.compute.allow_share,
self.context,
instance,
share_mapping
)
mock_get_access.assert_called_with(
self.context, share_mapping.share_id, 'ip', compute_ip)
mock_allow.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip, 'rw')
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we can deny the instance share.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
mock_db_get_share.return_value.objects.append(share_mapping)
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
self.compute.deny_share(self.context, instance, share_mapping)
mock_deny.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip)
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_in_use(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we cannot deny a share used by an instance.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'active'
mock_db_get_share.return_value.objects.append(share_mapping)
self.compute.deny_share(self.context, instance, share_mapping)
mock_deny.assert_not_called()
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_in_error(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we can deny a share in error on the instance detaching the
share.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'error'
mock_db_get_share.return_value.objects.append(share_mapping)
# Ensure CONF.my_shared_fs_storage_ip default is my_ip
self.flags(my_ip="10.0.0.2")
self.assertEqual(CONF.my_shared_fs_storage_ip, '10.0.0.2')
# Set CONF.my_shared_fs_storage_ip to ensure it is used by the code
self.flags(my_shared_fs_storage_ip="192.168.0.1")
compute_ip = CONF.my_shared_fs_storage_ip
self.assertEqual(compute_ip, '192.168.0.1')
self.compute.deny_share(self.context, instance, share_mapping)
mock_deny.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip)
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_access_not_found_in_manila(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we can deny a share even if access is not found in manila.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'detaching'
compute_ip = CONF.my_shared_fs_storage_ip
mock_db_get_share.return_value.objects.append(share_mapping)
mock_get_access.side_effect = exception.ShareNotFound(
share_id=share_mapping.share_id
)
self.compute.deny_share(self.context, instance, share_mapping)
mock_deny.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip)
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_not_found_in_manila(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we can deny a share even if the share is not found in manila.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'detaching'
compute_ip = CONF.my_shared_fs_storage_ip
mock_db_get_share.return_value.objects.append(share_mapping)
mock_deny.side_effect = exception.ShareNotFound(
share_id=share_mapping.share_id
)
self.compute.deny_share(self.context, instance, share_mapping)
mock_deny.assert_called_once_with(
mock.ANY, share_mapping.share_id, 'ip', compute_ip)
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_fails_access_removal_error(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete,
mock_db_save
):
"""Ensure we have an exception if the access cannot be removed
by manila.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'detaching'
mock_db_get_share.return_value.objects.append(share_mapping)
mock_deny.side_effect = exception.ShareAccessRemovalError(
share_id=share_mapping.share_id,
reason="fake_reason"
)
self.assertRaises(
exception.ShareAccessRemovalError,
self.compute.deny_share,
self.context,
instance,
share_mapping
)
mock_db_delete.assert_not_called()
self.assertEqual(share_mapping.status, 'error')
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_fails_keystone_unauthorized(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete,
mock_db_save
):
"""Ensure we have an exception if the access cannot be removed
by manila.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping = self.get_fake_share_mapping()
share_mapping.status = 'detaching'
mock_db_get_share.return_value.objects.append(share_mapping)
mock_deny.side_effect = keystone_exception.http.Unauthorized(
message="Unauthorized"
)
self.assertRaises(
keystone_exception.http.Unauthorized,
self.compute.deny_share,
self.context,
instance,
share_mapping
)
mock_db_delete.assert_not_called()
self.assertEqual(share_mapping.status, 'error')
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_in_use_by_another_instance(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we do not deny a share used by another instance.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping1 = self.get_fake_share_mapping()
share_mapping2 = self.get_fake_share_mapping()
share_mapping2.instance_uuid = uuidutils.generate_uuid()
mock_db_get_share.return_value.objects.append(share_mapping1)
mock_db_get_share.return_value.objects.append(share_mapping2)
self.compute.deny_share(self.context, instance, share_mapping1)
mock_deny.assert_not_called()
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.delete')
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.share.manila.API.get_access')
@mock.patch('nova.objects.share_mapping.ShareMappingList.get_by_share_id')
def test_deny_share_in_error_on_another_instance(
self, mock_db_get_share, mock_get_access, mock_deny, mock_db_delete
):
"""Ensure we cannot deny a share in error state on another instance.
If the other instance is hard rebooted, it might need the share.
"""
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
mock_db_get_share.return_value = (
objects.share_mapping.ShareMappingList()
)
share_mapping1 = self.get_fake_share_mapping()
share_mapping2 = self.get_fake_share_mapping()
share_mapping2.instance_uuid = uuidutils.generate_uuid()
share_mapping2.status = 'error'
mock_db_get_share.return_value.objects.append(share_mapping1)
mock_db_get_share.return_value.objects.append(share_mapping2)
self.compute.deny_share(self.context, instance, share_mapping1)
mock_deny.assert_not_called()
mock_db_delete.assert_called_once()
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
@mock.patch('nova.virt.fake.FakeDriver.mount_share')
def test_mount_nfs_share(
self, mock_drv, mock_db
):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
share_mapping = self.get_fake_share_mapping()
self.compute._mount_share(self.context, instance, share_mapping)
mock_drv.assert_called_once_with(self.context, instance, share_mapping)
@mock.patch('nova.share.manila.API.deny')
@mock.patch('nova.virt.fake.FakeDriver.umount_share', return_value=False)
def test_umount_share(
self, mock_drv, mock_deny):
self.flags(shutdown_retry_interval=20, group='compute')
instance = fake_instance.fake_instance_obj(
self.context,
uuid=uuids.instance,
vm_state=vm_states.ACTIVE,
task_state=task_states.POWERING_OFF)
share_mapping = self.get_fake_share_mapping()
self.compute._umount_share(self.context, instance, share_mapping)
mock_drv.assert_called_once_with(self.context, instance, share_mapping)
@mock.patch('nova.compute.manager.ComputeManager._get_share_info',
return_value=[])
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch(
@ -2142,9 +2810,17 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch(
'nova.compute.manager.ComputeManager._notify_about_instance_usage')
def test_shutdown_instance_versioned_notifications(self,
mock_notify_unversioned, mock_notify, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
def test_shutdown_instance_versioned_notifications(
self,
mock_notify_unversioned,
mock_notify,
mock_connector,
mock_destroy,
mock_blk_device_info,
mock_nw_info,
mock_elevated,
mock_share,
):
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
self.context,
@ -2160,14 +2836,24 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.call(self.context, instance, 'fake-mini',
action='shutdown', phase='end', bdms=bdms)])
@mock.patch('nova.compute.manager.ComputeManager._get_share_info',
return_value=[])
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
@mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
def _test_shutdown_instance_exception(self, exc, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
def _test_shutdown_instance_exception(
self,
exc,
mock_connector,
mock_destroy,
mock_blk_device_info,
mock_nw_info,
mock_elevated,
mock_share,
):
mock_connector.side_effect = exc
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
@ -4856,7 +5542,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
]
notify_instance_usage.assert_has_calls(notify_calls)
power_off_instance.assert_called_once_with(instance,
power_off_instance.assert_called_once_with(self.context, instance,
clean_shutdown)
driver_rescue.assert_called_once_with(
@ -5450,7 +6136,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_detach.assert_called_once_with(
self.context, bdms[0], instance, destroy_bdm=False)
def test_stop_instance_task_state_none_power_state_shutdown(self):
@mock.patch('nova.compute.manager.ComputeManager._get_share_info',
return_value=[])
def test_stop_instance_task_state_none_power_state_shutdown(self,
mock_get_share_info):
# Tests that stop_instance doesn't puke when the instance power_state
# is shutdown and the task_state is None.
instance = fake_instance.fake_instance_obj(
@ -5479,7 +6168,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.call(self.context, instance, 'fake-mini',
action='power_off', phase='end'),
])
power_off_mock.assert_called_once_with(instance, True)
power_off_mock.assert_called_once_with(
self.context, instance, True)
save_mock.assert_called_once_with(
expected_task_state=[task_states.POWERING_OFF, None])
self.assertEqual(power_state.SHUTDOWN, instance.power_state)
@ -5893,7 +6583,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_destroy.assert_called_once_with(
self.context, instance,
network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
mock_power_off.assert_called_once_with(self.context,
instance, clean_shutdown=True)
if is_vol_backed and reimage_boot_vol:
mock_rebuild_vol_backed_inst.assert_called_once_with(
@ -12132,7 +12822,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock.call(self.context, self.instance, get_bdms.return_value,
get_instance_nw_info.return_value,
fields.NotificationPhase.END)])
_power_off_instance.assert_called_once_with(self.instance)
_power_off_instance.assert_called_once_with(
self.context, self.instance)
self.assertEqual(power_state.SHUTDOWN, self.instance.power_state)
if snapshot_id is None:
_snapshot_for_resize.assert_not_called()
@ -12184,7 +12875,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
exception.InstancePowerOffFailure,
self.compute._prep_snapshot_based_resize_at_source,
self.context, self.instance, self.migration)
_power_off_instance.assert_called_once_with(self.instance)
_power_off_instance.assert_called_once_with(
self.context, self.instance)
@mock.patch('nova.objects.Instance.get_bdms',
return_value=objects.BlockDeviceMappingList())

View File

@ -1327,3 +1327,30 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'access the database. You should run this '
'service without the [api_database]/connection '
'config option.')
def get_fake_share_mapping(self):
share_mapping = objects.ShareMapping(self.context)
share_mapping.uuid = uuids.share_mapping
share_mapping.instance_uuid = uuids.instance
share_mapping.share_id = uuids.share
share_mapping.status = 'inactive'
share_mapping.tag = 'fake_tag'
share_mapping.export_location = '192.168.122.152:/manila/share'
share_mapping.share_proto = 'NFS'
return share_mapping
def test_allow_share(self):
self._test_compute_api(
'allow_share',
'cast',
instance=self.fake_instance_obj,
share_mapping=self.get_fake_share_mapping(),
version='6.4')
def test_deny_share(self):
self._test_compute_api(
'deny_share',
'cast',
instance=self.fake_instance_obj,
share_mapping=self.get_fake_share_mapping(),
version='6.4')

View File

@ -831,6 +831,24 @@ class LibvirtConnTestCase(test.NoDBTestCase,
" is invalid",
)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_register_all_undefined_instance_details',
new=mock.Mock())
def test_driver_capabilities_mem_backing_file(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.init_host("dummyhost")
self.assertFalse(drvr.capabilities['supports_mem_backing_file'],
'Driver capabilities for '
'\'supports_mem_backing_file\' '
'is invalid when \'file_backed_memory is not set\'')
self.flags(file_backed_memory=1024, group='libvirt')
self.flags(virt_type='kvm', group='libvirt')
self.flags(ram_allocation_ratio=1.0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.init_host("dummyhost")
self.assertTrue(drvr.capabilities['supports_mem_backing_file'])
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
self.flags(force_raw_images=False)
@ -1243,7 +1261,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock())
def test_file_backed_memory_support_called(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr,
with mock.patch.object(drvr._host,
'_check_file_backed_memory_support') as mock_check_fb_support:
drvr.init_host("dummyhost")
self.assertTrue(mock_check_fb_support.called)
@ -1252,14 +1270,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(file_backed_memory=1024, group='libvirt')
self.flags(ram_allocation_ratio=1.0)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._check_file_backed_memory_support()
drvr._host._check_file_backed_memory_support()
def test_min_version_file_backed_bad_ram_allocation_ratio(self):
self.flags(file_backed_memory=1024, group="libvirt")
self.flags(ram_allocation_ratio=1.5)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.InternalError,
drvr._check_file_backed_memory_support)
drvr._host._check_file_backed_memory_support)
def test__check_file_backed_memory_support__total_lt_reserved(self):
"""Ensure an error is raised if total memory < reserved.
@ -1271,10 +1289,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(ram_allocation_ratio=1.0, reserved_host_memory_mb=4096)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(
exception.InternalError, drvr._check_file_backed_memory_support,
exception.InternalError,
drvr._host._check_file_backed_memory_support,
)
@mock.patch.object(libvirt_driver.LOG, 'warning')
@mock.patch.object(host.LOG, 'warning')
def test__check_file_backed_memory_support__has_reserved(self, mock_log):
"""Ensure a warning is issued if memory is reserved.
@ -1286,7 +1305,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# we don't need to configure '[DEFAULT] reserved_host_memory_mb' since
# it defaults to 512 (MB)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._check_file_backed_memory_support()
drvr._host._check_file_backed_memory_support()
mock_log.assert_called_once()
self.assertIn(
"Reserving memory via '[DEFAULT] reserved_host_memory_mb' is not "
@ -16359,7 +16378,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
new=mock.Mock())
def test_spawn_with_pci_devices(self):
class FakeLibvirtPciDevice(object):
def dettach(self):
def detach(self):
return None
def reset(self):
@ -17567,8 +17586,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
mock_hard_reboot.assert_called_once_with(
self.context, instance, [], mock.ANY, None
)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@ -17610,8 +17630,119 @@ class LibvirtConnTestCase(test.NoDBTestCase,
backend = self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
share_info = objects.ShareMappingList()
with mock.patch('os.path.exists', return_value=True):
drvr._hard_reboot(self.context, instance, network_info,
drvr._hard_reboot(
self.context,
instance,
network_info,
share_info,
block_device_info,
accel_info=accel_info,
)
disks = backend.disks
# NOTE(mdbooth): _create_images_and_backing() passes a full path in
# 'disk_name' when creating a disk. This is wrong, but happens to
# work due to handling by each individual backend. This will be
# fixed in a subsequent commit.
#
# We translate all the full paths into disk names here to make the
# test readable
disks = {os.path.basename(name): value
for name, value in disks.items()}
# We should have called cache() on the root and ephemeral disks
for name in ('disk', 'disk.local'):
self.assertTrue(disks[name].cache.called)
mock_get_mdev.assert_called_once_with(instance)
mock_destroy.assert_called_once_with(self.context, instance,
network_info, destroy_disks=False,
block_device_info=block_device_info,
destroy_secrets=False)
mock_get_guest_xml.assert_called_once_with(self.context, instance,
network_info, mock.ANY, mock.ANY,
block_device_info=block_device_info, mdevs=[uuids.mdev1],
accel_info=accel_info, share_info=share_info)
mock_create_guest_with_network.assert_called_once_with(
self.context, dummyxml, instance, network_info,
block_device_info, vifs_already_plugged=True,
external_events=[])
@mock.patch('nova.objects.instance.Instance.save',
return_value=None)
@mock.patch('nova.virt.libvirt.LibvirtDriver._build_device_metadata',
return_value=None)
@mock.patch('nova.objects.share_mapping.ShareMapping.save')
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_instance_disk_info_from_config')
@mock.patch('nova.virt.libvirt.LibvirtDriver.destroy')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
'_get_all_assigned_mediated_devices')
def test_hard_reboot_with_share_info(
self, mock_get_mdev, mock_destroy, mock_get_disk_info,
mock_get_guest_xml, mock_create_guest_with_network,
mock_get_info, mock_attach, mock_metadata, mock_save
):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_mdev.return_value = {uuids.mdev1: uuids.inst1}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
mock_get_guest_xml.return_value = dummyxml
mock_get_disk_info.return_value = \
fake_disk_info_byname(instance).values()
backend = self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
# Input object
share_mapping = {}
share_mapping['id'] = 1
share_mapping['created_at'] = None
share_mapping['updated_at'] = None
share_mapping['uuid'] = uuids.share_mapping
share_mapping['instance_uuid'] = uuids.instance
share_mapping['share_id'] = uuids.share
share_mapping['status'] = 'inactive'
share_mapping['tag'] = 'fake_tag'
share_mapping['export_location'] = 'fake_export_location'
share_mapping['share_proto'] = 'NFS'
share_info = objects.base.obj_make_list(
self.context,
objects.ShareMappingList(self.context),
objects.ShareMapping,
[share_mapping])
with mock.patch(
'os.path.exists', return_value=True
):
drvr._hard_reboot(self.context, instance, network_info, share_info,
block_device_info, accel_info=accel_info)
disks = backend.disks
@ -17639,10 +17770,67 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_guest_xml.assert_called_once_with(self.context, instance,
network_info, mock.ANY, mock.ANY,
block_device_info=block_device_info, mdevs=[uuids.mdev1],
accel_info=accel_info)
mock_create_guest_with_network.assert_called_once_with(self.context,
dummyxml, instance, network_info, block_device_info,
vifs_already_plugged=True, external_events=[])
accel_info=accel_info, share_info=share_info)
mock_create_guest_with_network.assert_called_once_with(
self.context, dummyxml, instance, network_info, block_device_info,
vifs_already_plugged=True,
external_events=[])
def _mount_or_umount_share(self, func, side_effect=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class_object = getattr(drvr, func)
base_class = 'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver'
if func == 'umount_share':
mock_class_object = base_class + '.disconnect_volume'
exc = exception.ShareUmountError
elif func == 'mount_share':
exc = exception.ShareMountError
mock_class_object = base_class + '.connect_volume'
else:
assert False
instance = objects.Instance(**self.test_instance)
share_mapping = objects.ShareMapping(self.context)
share_mapping.uuid = uuids.share_mapping
share_mapping.instance_uuid = uuids.instance
share_mapping.share_id = uuids.share
share_mapping.status = 'inactive'
share_mapping.tag = 'fake_tag'
share_mapping.export_location = '192.168.122.152:/manila/share'
share_mapping.share_proto = 'NFS'
with mock.patch(mock_class_object) as mock_nfsdrv, mock.patch(
'nova.objects.share_mapping.ShareMapping.save'):
if not side_effect:
class_object(self.context, instance, share_mapping)
mock_nfsdrv.assert_called_once_with(
{'data': {
'export': share_mapping.export_location,
'name': share_mapping.share_id},
},
instance)
else:
mock_nfsdrv.side_effect = side_effect
self.assertRaises(
exc,
class_object,
self.context,
instance,
share_mapping
)
def test_mount_share(self):
self._mount_or_umount_share('mount_share')
def test_mount_share_fails(self):
self._mount_or_umount_share(
'mount_share', processutils.ProcessExecutionError)
def test_umount_share(self):
self._mount_or_umount_share('umount_share')
def test_umount_share_fails(self):
self._mount_or_umount_share(
'umount_share', processutils.ProcessExecutionError)
@mock.patch('oslo_utils.fileutils.ensure_tree', new=mock.Mock())
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@ -28001,15 +28189,25 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
image_ref=uuids.image,
flavor=objects.Flavor(extra_specs={'resources:VGPU': 1}))
drvr._hard_reboot(ctxt, instance, mock.sentinel.network_info)
share_info = objects.ShareMappingList()
drvr._hard_reboot(
ctxt, instance, mock.sentinel.network_info, share_info
)
(mock_compute.reportclient.get_allocations_for_consumer.
assert_called_once_with(ctxt, instance.uuid))
mock_allocate_mdevs.assert_called_once_with(mock.sentinel.allocations)
mock_get_xml.assert_called_once_with(
ctxt, instance, mock.sentinel.network_info,
mock.sentinel.disk_info, mock_image_meta, block_device_info=None,
mdevs=mock_allocate_mdevs.return_value, accel_info=None)
ctxt,
instance,
mock.sentinel.network_info,
mock.sentinel.disk_info,
mock_image_meta,
block_device_info=None,
mdevs=mock_allocate_mdevs.return_value,
accel_info=None,
share_info=share_info,
)
return ctxt, mock_get_xml, instance

View File

@ -221,6 +221,8 @@ CAPABILITY_TRAITS_MAP = {
os_traits.COMPUTE_ADDRESS_SPACE_EMULATED,
"supports_stateless_firmware":
os_traits.COMPUTE_SECURITY_STATELESS_FIRMWARE,
"supports_virtio_fs": os_traits.COMPUTE_STORAGE_VIRTIO_FS,
"supports_mem_backing_file": os_traits.COMPUTE_MEM_BACKING_FILE,
}
@ -292,6 +294,8 @@ class ComputeDriver(object):
"supports_remote_managed_ports": False,
"supports_address_space_passthrough": False,
"supports_address_space_emulated": False,
"supports_virtio_fs": False,
"supports_mem_backing_file": False,
# Ephemeral encryption support flags
"supports_ephemeral_encryption": False,
@ -1038,14 +1042,37 @@ class ComputeDriver(object):
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
"""Power on the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param network_info: instance network information
:param block_device_info: instance volume block device info
:param accel_info: List of accelerator request dicts. The exact
data struct is doc'd in nova/virt/driver.py::spawn().
:param share_info: a ShareMappingList containing the attached shares.
"""
raise NotImplementedError()
def mount_share(self, context, instance, share_mapping):
"""Mount a manila share to the compute node.
:param context: security context
:param instance: nova.objects.instance.Instance
:param share_mapping: nova.objects.share_mapping.ShareMapping object
that define the share
"""
raise NotImplementedError()
def umount_share(self, context, instance, share_mapping):
"""Unmount a manila share from the compute node.
:param context: security context
:param instance: nova.objects.instance.Instance
:param share_mapping: nova.objects.share_mapping.ShareMapping object
that define the share
:returns: True if the mountpoint is still in used by another instance
"""
raise NotImplementedError()

View File

@ -122,6 +122,8 @@ class FakeDriver(driver.ComputeDriver):
"supports_address_space_passthrough": True,
"supports_address_space_emulated": True,
"supports_stateless_firmware": True,
"supports_virtio_fs": True,
"supports_mem_backing_file": True,
# Supported image types
"supports_image_type_raw": True,
@ -286,12 +288,21 @@ class FakeDriver(driver.ComputeDriver):
raise exception.InstanceNotFound(instance_id=instance.uuid)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
if instance.uuid in self.instances:
self.instances[instance.uuid].state = power_state.RUNNING
if share_info:
for share in share_info:
share.activate()
else:
raise exception.InstanceNotFound(instance_id=instance.uuid)
def mount_share(self, context, instance, share_mapping):
pass
def umount_share(self, context, instance, share_mapping):
pass
def trigger_crash_dump(self, instance):
pass

View File

@ -164,6 +164,8 @@ class IronicDriver(virt_driver.ComputeDriver):
"supports_address_space_passthrough": False,
"supports_address_space_emulated": False,
"supports_stateless_firmware": False,
"supports_virtio_fs": False,
"supports_mem_backing_file": False,
# Image type support flags
"supports_image_type_aki": False,
@ -1402,6 +1404,8 @@ class IronicDriver(virt_driver.ComputeDriver):
encountered. Ignored by this driver.
:param accel_info: List of accelerator request dicts. The exact
data struct is doc'd in nova/virt/driver.py::spawn().
:param share_info: share mapping information used to mount Manila
shares on the compute and then on the instance using virtiofs.
"""
LOG.debug('Reboot(type %s) called for instance',
reboot_type, instance=instance)
@ -1496,7 +1500,7 @@ class IronicDriver(virt_driver.ComputeDriver):
node.id, instance=instance)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
@ -1510,6 +1514,7 @@ class IronicDriver(virt_driver.ComputeDriver):
information. Ignored by this driver.
:param accel_info: List of accelerator requests for this instance.
Ignored by this driver.
:param share_info: instance share attached list.
"""
LOG.debug('Power on called for instance', instance=instance)
node = self._validate_instance_and_node(instance)

View File

@ -129,6 +129,7 @@ from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import fs
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import nfs
from nova.virt.libvirt.volume import remotefs
from nova.virt.libvirt.volume import volume
from nova.virt import netutils
@ -790,8 +791,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._supported_perf_events = self._get_supported_perf_events()
self._check_file_backed_memory_support()
self._check_my_ip()
# TODO(ykarel) This can be dropped when MIN_LIBVIRT_VERSION>=8.0.0
@ -911,7 +910,9 @@ class LibvirtDriver(driver.ComputeDriver):
self.capabilities.update({
'supports_secure_boot': self._host.supports_secure_boot,
'supports_remote_managed_ports':
self._host.supports_remote_managed_ports
self._host.supports_remote_managed_ports,
'supports_virtio_fs': self._host.supports_virtio_fs,
'supports_mem_backing_file': self._host.supports_mem_backing_file
})
supports_maxphysaddr = self._host.has_min_version(
@ -1220,45 +1221,6 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.InvalidLibvirtMdevConfig(reason=msg)
self._create_new_mediated_device(parent, uuid=mdev_uuid)
def _check_file_backed_memory_support(self):
if not CONF.libvirt.file_backed_memory:
return
# file_backed_memory is only compatible with qemu/kvm virts
if CONF.libvirt.virt_type not in ("qemu", "kvm"):
raise exception.InternalError(
_('Running Nova with file_backed_memory and virt_type '
'%(type)s is not supported. file_backed_memory is only '
'supported with qemu and kvm types.') %
{'type': CONF.libvirt.virt_type})
# file-backed memory doesn't work with memory overcommit.
# Block service startup if file-backed memory is enabled and
# ram_allocation_ratio is not 1.0
if CONF.ram_allocation_ratio != 1.0:
raise exception.InternalError(
'Running Nova with file_backed_memory requires '
'ram_allocation_ratio configured to 1.0')
if CONF.reserved_host_memory_mb:
# this is a hard failure as placement won't allow total < reserved
if CONF.reserved_host_memory_mb >= CONF.libvirt.file_backed_memory:
msg = _(
"'[libvirt] file_backed_memory', which represents total "
"memory reported to placement, must be greater than "
"reserved memory configured via '[DEFAULT] "
"reserved_host_memory_mb'"
)
raise exception.InternalError(msg)
# TODO(stephenfin): Change this to an exception in W or later
LOG.warning(
"Reserving memory via '[DEFAULT] reserved_host_memory_mb' "
"is not compatible with file-backed memory. Consider "
"setting '[DEFAULT] reserved_host_memory_mb' to 0. This will "
"be an error in a future release."
)
def _check_my_ip(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
@ -4057,7 +4019,8 @@ class LibvirtDriver(driver.ComputeDriver):
"Trying hard reboot.",
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info, accel_info)
objects.ShareMappingList(), block_device_info, accel_info
)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
@ -4105,7 +4068,7 @@ class LibvirtDriver(driver.ComputeDriver):
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
def _hard_reboot(self, context, instance, network_info, share_info,
block_device_info=None, accel_info=None):
"""Reboot a virtual machine, given an instance reference.
@ -4159,10 +4122,12 @@ class LibvirtDriver(driver.ComputeDriver):
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info,
mdevs=mdevs, accel_info=accel_info)
mdevs=mdevs, accel_info=accel_info,
share_info=share_info)
# NOTE(mdbooth): context.auth_token will not be set when we call
# _hard_reboot from resume_state_on_host_boot()
@ -4337,13 +4302,66 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info,
accel_info)
self._hard_reboot(context, instance, network_info, share_info,
block_device_info, accel_info)
def _get_share_driver_manager(self, host, protocol):
if protocol == fields.ShareMappingProto.NFS:
return nfs.LibvirtNFSVolumeDriver(host)
elif protocol == fields.ShareMappingProto.CEPHFS:
raise NotImplementedError()
else:
raise exception.ShareProtocolUnknown(share_proto=protocol)
def _get_share_connection_info(self, share_mapping):
connection_info = {'data': {'export': share_mapping.export_location,
'name': share_mapping.share_id}}
return connection_info
def _get_share_mount_path(self, instance, share_mapping):
drv = self._get_share_driver_manager(
instance.host, share_mapping.share_proto)
mount_path = drv._get_mount_path(
self._get_share_connection_info(share_mapping))
return mount_path
def mount_share(self, context, instance, share_mapping):
drv = self._get_share_driver_manager(
instance.host, share_mapping.share_proto)
try:
drv.connect_volume(
self._get_share_connection_info(share_mapping),
instance
)
except processutils.ProcessExecutionError as exc:
raise exception.ShareMountError(
share_id=share_mapping.share_id,
server_id=share_mapping.instance_uuid,
reason=exc
)
def umount_share(self, context, instance, share_mapping):
drv = self._get_share_driver_manager(
instance.host, share_mapping.share_proto)
try:
return drv.disconnect_volume(
self._get_share_connection_info(share_mapping),
instance
)
except processutils.ProcessExecutionError as exc:
raise exception.ShareUmountError(
share_id=share_mapping.share_id,
server_id=share_mapping.instance_uuid,
reason=exc
)
def trigger_crash_dump(self, instance):
"""Trigger crash dump by injecting an NMI to the specified instance."""
@ -4427,7 +4445,9 @@ class LibvirtDriver(driver.ComputeDriver):
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
self._hard_reboot(context, instance, network_info,
objects.ShareMappingList(), block_device_info
)
def rescue(self, context, instance, network_info, image_meta,
rescue_password, block_device_info):
@ -7365,7 +7385,8 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None, mdevs=None, accel_info=None):
context=None, mdevs=None, accel_info=None,
share_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
@ -7374,6 +7395,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param mdevs: optional list of mediated devices to assign to the guest.
:param accel_info: optional list of accelerator requests (ARQs)
:param share_info: optional list of share_mapping
"""
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
@ -7498,6 +7520,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._guest_add_accel_pci_devices(guest, pci_arq_list)
self._guest_add_virtiofs_for_share(guest, instance, share_info)
self._guest_add_watchdog_action(guest, flavor, image_meta)
self._guest_add_memory_balloon(guest)
@ -7916,7 +7940,8 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None,
mdevs=None, accel_info=None):
mdevs=None, accel_info=None,
share_info=None):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
@ -7925,16 +7950,18 @@ class LibvirtDriver(driver.ComputeDriver):
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
'block_device_info=%(block_device_info)s'
'share_info=%(share_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
'block_device_info': block_device_info,
'share_info': share_info, })
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context, mdevs, accel_info)
context, mdevs, accel_info, share_info)
xml = conf.to_xml()
LOG.debug('End _get_guest_xml xml=%(xml)s',
@ -13229,3 +13256,15 @@ class LibvirtDriver(driver.ComputeDriver):
' of host capabilities: %(error)s',
{'uri': self._host._uri, 'error': ex})
return None
def _guest_add_virtiofs_for_share(self, guest, instance, share_info):
"""Add all share mount point as virtio fs entries."""
if share_info:
for share in share_info:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = 'mount'
fs.access_mode = 'passthrough'
fs.driver_type = 'virtiofs'
fs.source_dir = self._get_share_mount_path(instance, share)
fs.target_dir = share.tag
guest.add_device(fs)

View File

@ -62,6 +62,7 @@ from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
@ -1766,6 +1767,46 @@ class Host(object):
msg = _('Invalid machine type: %s')
raise exception.InternalError(msg % machine)
def _check_file_backed_memory_support(self):
if not CONF.libvirt.file_backed_memory:
return False
# file_backed_memory is only compatible with qemu/kvm virts
if CONF.libvirt.virt_type not in ("qemu", "kvm"):
raise exception.InternalError(
_('Running Nova with file_backed_memory and virt_type '
'%(type)s is not supported. file_backed_memory is only '
'supported with qemu and kvm types.') %
{'type': CONF.libvirt.virt_type})
# file-backed memory doesn't work with memory overcommit.
# Block service startup if file-backed memory is enabled and
# ram_allocation_ratio is not 1.0
if CONF.ram_allocation_ratio != 1.0:
raise exception.InternalError(
'Running Nova with file_backed_memory requires '
'ram_allocation_ratio configured to 1.0')
if CONF.reserved_host_memory_mb:
# this is a hard failure as placement won't allow total < reserved
if CONF.reserved_host_memory_mb >= CONF.libvirt.file_backed_memory:
msg = _(
"'[libvirt] file_backed_memory', which represents total "
"memory reported to placement, must be greater than "
"reserved memory configured via '[DEFAULT] "
"reserved_host_memory_mb'"
)
raise exception.InternalError(msg)
# TODO(stephenfin): Change this to an exception in W or later
LOG.warning(
"Reserving memory via '[DEFAULT] reserved_host_memory_mb' "
"is not compatible with file-backed memory. Consider "
"setting '[DEFAULT] reserved_host_memory_mb' to 0. This will "
"be an error in a future release."
)
return True
@property
def has_hyperthreading(self) -> bool:
"""Determine if host CPU has SMT, a.k.a. HyperThreading.
@ -1814,6 +1855,16 @@ class Host(object):
self._supports_uefi = False
return False
@property
def supports_virtio_fs(self) -> bool:
return self.has_min_version(
lv_ver=driver.MIN_LIBVIRT_VERSION,
hv_ver=driver.MIN_QEMU_VERSION)
@property
def supports_mem_backing_file(self) -> bool:
return self._check_file_backed_memory_support()
@property
def supports_secure_boot(self) -> bool:
"""Determine if the host supports UEFI Secure Boot for guests.

View File

@ -121,7 +121,7 @@ class LibvirtMountedFileSystemVolumeDriver(LibvirtBaseFileSystemVolumeDriver,
vol_name = connection_info['data']['name']
mountpoint = self._get_mount_path(connection_info)
mount.umount(vol_name, mountpoint, instance)
return mount.umount(vol_name, mountpoint, instance)
@abc.abstractmethod
def _mount_options(self, connection_info):

View File

@ -344,6 +344,7 @@ class _HostMountState(object):
:param mountpoint: The directory where the filesystem is be
mounted on the local compute host.
:param instance: The instance the volume was attached to.
:returns: True if the mountpoint is still in used by another instance
"""
LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
'mountpoint=%(mountpoint)s) generation %(gen)s',
@ -373,6 +374,8 @@ class _HostMountState(object):
{'mountpoint': mountpoint, 'gen': self.generation},
instance=instance)
return mount.in_use()
def _real_umount(self, mountpoint):
# Unmount and delete a mountpoint.
# Return mount state after umount (i.e. True means still mounted)
@ -417,4 +420,4 @@ def umount(vol_name, mountpoint, instance):
_HostMountStateManager singleton.
"""
with __manager__.get_state() as mount_state:
mount_state.umount(vol_name, mountpoint, instance)
return mount_state.umount(vol_name, mountpoint, instance)

View File

@ -76,6 +76,8 @@ class VMwareVCDriver(driver.ComputeDriver):
"supports_address_space_passthrough": False,
"supports_address_space_emulated": False,
"supports_stateless_firmware": False,
"supports_virtio_fs": False,
"supports_mem_backing_file": False,
# Image type support flags
"supports_image_type_aki": False,
@ -664,7 +666,7 @@ class VMwareVCDriver(driver.ComputeDriver):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)

View File

@ -50,6 +50,8 @@ class ZVMDriver(driver.ComputeDriver):
"supports_address_space_passthrough": False,
"supports_address_space_emulated": False,
"supports_stateless_firmware": False,
"supports_virtio_fs": False,
"supports_mem_backing_file": False,
# Image type support flags
"supports_image_type_aki": False,
@ -398,7 +400,7 @@ class ZVMDriver(driver.ComputeDriver):
self._hypervisor.guest_softstop(instance.name)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
block_device_info=None, accel_info=None, share_info=None):
self._hypervisor.guest_start(instance.name)
def pause(self, instance):