Add cinder's new attachment support
Cinder introduced new attachment API flow in microversion 3.27 (also attachment_complete added in mv 3.44 and support for passing mode added in mv 3.54) which provides a clean interface to interact with cinder for attachments and is also required for multiattach volume support (Related future work). Nova uses it since a long time and is proven to be stable, this patch implements the same for glance. The create volume and delete volume calls are also moved to cinder_utils file to use the generic exception handler and keep similar code together for consistency. Partially Implements: blueprint attachment-api-and-multiattach-support Change-Id: I2758ed1d5b8e0981faa3eff6f83e1ce5975a01d2
This commit is contained in:
parent
98b4a0d4e7
commit
1178f113c4
@ -95,4 +95,9 @@ latex_documents = [
|
||||
# It would never happen in a real scenario as it is only imported
|
||||
# from cinder store after the config are loaded but to handle doc
|
||||
# failures, we mock it here.
|
||||
autodoc_mock_imports = ['glance_store.common.fs_mount']
|
||||
# The cinder_utils module imports external dependencies like
|
||||
# cinderclient, retrying etc which are not recognized by
|
||||
# autodoc, hence, are mocked here. These dependencies are installed
|
||||
# during an actual deployment and won't cause any issue during usage.
|
||||
autodoc_mock_imports = ['glance_store.common.fs_mount',
|
||||
'glance_store.common.cinder_utils']
|
||||
|
@ -33,6 +33,7 @@ from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
|
||||
from glance_store import capabilities
|
||||
from glance_store.common import cinder_utils
|
||||
from glance_store.common import utils
|
||||
import glance_store.driver
|
||||
from glance_store import exceptions
|
||||
@ -40,6 +41,7 @@ from glance_store.i18n import _, _LE, _LI, _LW
|
||||
import glance_store.location
|
||||
|
||||
try:
|
||||
from cinderclient import api_versions
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from cinderclient.v3 import client as cinderclient
|
||||
from os_brick.initiator import connector
|
||||
@ -476,6 +478,7 @@ class Store(glance_store.driver.Store):
|
||||
self.store_conf = getattr(self.conf, self.backend_group)
|
||||
else:
|
||||
self.store_conf = self.conf.glance_store
|
||||
self.volume_api = cinder_utils.API()
|
||||
|
||||
def _set_url_prefix(self):
|
||||
self._url_prefix = "cinder://"
|
||||
@ -545,7 +548,8 @@ class Store(glance_store.driver.Store):
|
||||
for key in ['user_name', 'password',
|
||||
'project_name', 'auth_address']])
|
||||
|
||||
def get_cinderclient(self, context=None, legacy_update=False):
|
||||
def get_cinderclient(self, context=None, legacy_update=False,
|
||||
version='3.0'):
|
||||
# NOTE: For legacy image update from single store to multiple
|
||||
# stores we need to use admin context rather than user provided
|
||||
# credentials
|
||||
@ -588,10 +592,12 @@ class Store(glance_store.driver.Store):
|
||||
reason=reason)
|
||||
auth = ksa_token_endpoint.Token(endpoint=url, token=token)
|
||||
|
||||
api_version = api_versions.APIVersion(version)
|
||||
c = cinderclient.Client(
|
||||
session=session, auth=auth,
|
||||
region_name=self.store_conf.cinder_os_region_name,
|
||||
retries=self.store_conf.cinder_http_retries)
|
||||
retries=self.store_conf.cinder_http_retries,
|
||||
api_version=api_version)
|
||||
|
||||
LOG.debug(
|
||||
'Cinderclient connection created for user %(user)s using URL: '
|
||||
@ -688,52 +694,49 @@ class Store(glance_store.driver.Store):
|
||||
use_multipath = self.store_conf.cinder_use_multipath
|
||||
enforce_multipath = self.store_conf.cinder_enforce_multipath
|
||||
mount_point_base = self.store_conf.cinder_mount_point_base
|
||||
volume_id = volume.id
|
||||
|
||||
properties = connector.get_connector_properties(
|
||||
connector_prop = connector.get_connector_properties(
|
||||
root_helper, host, use_multipath, enforce_multipath)
|
||||
|
||||
try:
|
||||
volume.reserve(volume)
|
||||
except cinder_exception.ClientException as e:
|
||||
msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
|
||||
% {'volume_id': volume.id, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
attachment = self.volume_api.attachment_create(client, volume_id,
|
||||
mode=attach_mode)
|
||||
attachment = self.volume_api.attachment_update(
|
||||
client, attachment['id'], connector_prop,
|
||||
mountpoint='glance_store')
|
||||
self.volume_api.attachment_complete(client, attachment.id)
|
||||
volume = volume.manager.get(volume_id)
|
||||
connection_info = attachment.connection_info
|
||||
|
||||
try:
|
||||
connection_info = volume.initialize_connection(volume, properties)
|
||||
conn = connector.InitiatorConnector.factory(
|
||||
connection_info['driver_volume_type'], root_helper,
|
||||
conn=connection_info, use_multipath=use_multipath)
|
||||
if connection_info['driver_volume_type'] == 'nfs':
|
||||
if volume.encrypted:
|
||||
volume.unreserve(volume)
|
||||
volume.delete()
|
||||
self.volume_api.attachment_delete(client, attachment.id)
|
||||
msg = (_('Encrypted volume creation for cinder nfs is not '
|
||||
'supported from glance_store. Failed to create '
|
||||
'volume %(volume_id)s')
|
||||
% {'volume_id': volume.id})
|
||||
% {'volume_id': volume_id})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
@utils.synchronized(connection_info['data']['export'])
|
||||
@utils.synchronized(connection_info['export'])
|
||||
def connect_volume_nfs():
|
||||
data = connection_info['data']
|
||||
export = data['export']
|
||||
vol_name = data['name']
|
||||
export = connection_info['export']
|
||||
vol_name = connection_info['name']
|
||||
mountpoint = self._get_mount_path(
|
||||
export,
|
||||
os.path.join(mount_point_base, 'nfs'))
|
||||
options = data['options']
|
||||
options = connection_info['options']
|
||||
self.mount.mount(
|
||||
'nfs', export, vol_name, mountpoint, host,
|
||||
root_helper, options)
|
||||
return {'path': os.path.join(mountpoint, vol_name)}
|
||||
device = connect_volume_nfs()
|
||||
else:
|
||||
device = conn.connect_volume(connection_info['data'])
|
||||
volume.attach(None, 'glance_store', attach_mode, host_name=host)
|
||||
volume = self._wait_volume_status(volume, 'attaching', 'in-use')
|
||||
device = conn.connect_volume(connection_info)
|
||||
if (connection_info['driver_volume_type'] == 'rbd' and
|
||||
not conn.do_local_attach):
|
||||
yield device['path']
|
||||
@ -746,38 +749,23 @@ class Store(glance_store.driver.Store):
|
||||
'%(volume_id)s.'), {'volume_id': volume.id})
|
||||
raise
|
||||
finally:
|
||||
if volume.status == 'in-use':
|
||||
volume.begin_detaching(volume)
|
||||
elif volume.status == 'attaching':
|
||||
volume.unreserve(volume)
|
||||
|
||||
if device:
|
||||
try:
|
||||
if connection_info['driver_volume_type'] == 'nfs':
|
||||
@utils.synchronized(connection_info['data']['export'])
|
||||
@utils.synchronized(connection_info['export'])
|
||||
def disconnect_volume_nfs():
|
||||
path, vol_name = device['path'].rsplit('/', 1)
|
||||
self.mount.umount(vol_name, path, host,
|
||||
root_helper)
|
||||
disconnect_volume_nfs()
|
||||
else:
|
||||
conn.disconnect_volume(connection_info['data'], device)
|
||||
conn.disconnect_volume(connection_info, device)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to disconnect volume '
|
||||
'%(volume_id)s.'),
|
||||
{'volume_id': volume.id})
|
||||
|
||||
try:
|
||||
volume.terminate_connection(volume, properties)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to terminate connection of volume '
|
||||
'%(volume_id)s.'), {'volume_id': volume.id})
|
||||
|
||||
try:
|
||||
client.volumes.detach(volume)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
|
||||
{'volume_id': volume.id})
|
||||
self.volume_api.attachment_delete(client, attachment.id)
|
||||
|
||||
def _cinder_volume_data_iterator(self, client, volume, max_size, offset=0,
|
||||
chunk_size=None, partial_length=None):
|
||||
@ -824,7 +812,7 @@ class Store(glance_store.driver.Store):
|
||||
loc = location.store_location
|
||||
self._check_context(context)
|
||||
try:
|
||||
client = self.get_cinderclient(context)
|
||||
client = self.get_cinderclient(context, version='3.54')
|
||||
volume = client.volumes.get(loc.volume_id)
|
||||
size = int(volume.metadata.get('image_size',
|
||||
volume.size * units.Gi))
|
||||
@ -892,7 +880,7 @@ class Store(glance_store.driver.Store):
|
||||
"""
|
||||
|
||||
self._check_context(context, require_tenant=True)
|
||||
client = self.get_cinderclient(context)
|
||||
client = self.get_cinderclient(context, version='3.54')
|
||||
os_hash_value = utils.get_hasher(hashing_algo, False)
|
||||
checksum = utils.get_hasher('md5', False)
|
||||
bytes_written = 0
|
||||
@ -914,9 +902,9 @@ class Store(glance_store.driver.Store):
|
||||
"resize-before-write for each GB which "
|
||||
"will be considerably slower than normal."))
|
||||
try:
|
||||
volume = client.volumes.create(size_gb, name=name,
|
||||
metadata=metadata,
|
||||
volume_type=volume_type)
|
||||
volume = self.volume_api.create(client, size_gb, name=name,
|
||||
metadata=metadata,
|
||||
volume_type=volume_type)
|
||||
except cinder_exception.NotFound:
|
||||
LOG.error(_LE("Invalid volume type %s configured. Please check "
|
||||
"the `cinder_volume_type` configuration parameter."
|
||||
@ -1025,9 +1013,9 @@ class Store(glance_store.driver.Store):
|
||||
"""
|
||||
loc = location.store_location
|
||||
self._check_context(context)
|
||||
client = self.get_cinderclient(context)
|
||||
try:
|
||||
volume = self.get_cinderclient(context).volumes.get(loc.volume_id)
|
||||
volume.delete()
|
||||
self.volume_api.delete(client, loc.volume_id)
|
||||
except cinder_exception.NotFound:
|
||||
raise exceptions.NotFound(image=loc.volume_id)
|
||||
except cinder_exception.ClientException as e:
|
||||
|
194
glance_store/common/cinder_utils.py
Normal file
194
glance_store/common/cinder_utils.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright 2021 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from cinderclient.apiclient import exceptions as apiclient_exception
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from keystoneauth1 import exceptions as keystone_exc
|
||||
from oslo_utils import excutils
|
||||
import retrying
|
||||
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _LE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def handle_exceptions(method):
|
||||
"""Transforms the exception for the volume but keeps its traceback intact.
|
||||
"""
|
||||
def wrapper(self, ctx, volume_id, *args, **kwargs):
|
||||
try:
|
||||
res = method(self, ctx, volume_id, *args, **kwargs)
|
||||
except (keystone_exc.NotFound,
|
||||
cinder_exception.NotFound,
|
||||
cinder_exception.OverLimit) as e:
|
||||
raise exceptions.BackendException(str(e))
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def _retry_on_internal_server_error(e):
|
||||
if isinstance(e, apiclient_exception.InternalServerError):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class API(object):
|
||||
"""API for interacting with the cinder."""
|
||||
|
||||
@handle_exceptions
|
||||
def create(self, client, size, name,
|
||||
volume_type=None, metadata=None):
|
||||
|
||||
kwargs = dict(volume_type=volume_type,
|
||||
metadata=metadata,
|
||||
name=name)
|
||||
|
||||
volume = client.volumes.create(size, **kwargs)
|
||||
return volume
|
||||
|
||||
@handle_exceptions
|
||||
def delete(self, client, volume_id):
|
||||
client.volumes.delete(volume_id)
|
||||
|
||||
@handle_exceptions
|
||||
def attachment_create(self, client, volume_id, connector=None,
|
||||
mountpoint=None, mode=None):
|
||||
"""Create a volume attachment. This requires microversion >= 3.54.
|
||||
|
||||
The attachment_create call was introduced in microversion 3.27. We
|
||||
need 3.54 as minimum here as we need attachment_complete to finish the
|
||||
attaching process and it which was introduced in version 3.44 and
|
||||
we also pass the attach mode which was introduced in version 3.54.
|
||||
|
||||
:param client: cinderclient object
|
||||
:param volume_id: UUID of the volume on which to create the attachment.
|
||||
:param connector: host connector dict; if None, the attachment will
|
||||
be 'reserved' but not yet attached.
|
||||
:param mountpoint: Optional mount device name for the attachment,
|
||||
e.g. "/dev/vdb". This is only used if a connector is provided.
|
||||
:param mode: The mode in which the attachment is made i.e.
|
||||
read only(ro) or read/write(rw)
|
||||
:returns: a dict created from the
|
||||
cinderclient.v3.attachments.VolumeAttachment object with a backward
|
||||
compatible connection_info dict
|
||||
"""
|
||||
if connector and mountpoint and 'mountpoint' not in connector:
|
||||
connector['mountpoint'] = mountpoint
|
||||
|
||||
try:
|
||||
attachment_ref = client.attachments.create(
|
||||
volume_id, connector, mode=mode)
|
||||
return attachment_ref
|
||||
except cinder_exception.ClientException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Create attachment failed for volume '
|
||||
'%(volume_id)s. Error: %(msg)s Code: %(code)s'),
|
||||
{'volume_id': volume_id,
|
||||
'msg': str(ex),
|
||||
'code': getattr(ex, 'code', None)})
|
||||
|
||||
@handle_exceptions
|
||||
def attachment_get(self, client, attachment_id):
|
||||
"""Gets a volume attachment.
|
||||
|
||||
:param client: cinderclient object
|
||||
:param attachment_id: UUID of the volume attachment to get.
|
||||
:returns: a dict created from the
|
||||
cinderclient.v3.attachments.VolumeAttachment object with a backward
|
||||
compatible connection_info dict
|
||||
"""
|
||||
try:
|
||||
attachment_ref = client.attachments.show(
|
||||
attachment_id)
|
||||
return attachment_ref
|
||||
except cinder_exception.ClientException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Show attachment failed for attachment '
|
||||
'%(id)s. Error: %(msg)s Code: %(code)s'),
|
||||
{'id': attachment_id,
|
||||
'msg': str(ex),
|
||||
'code': getattr(ex, 'code', None)})
|
||||
|
||||
@handle_exceptions
|
||||
def attachment_update(self, client, attachment_id, connector,
|
||||
mountpoint=None):
|
||||
"""Updates the connector on the volume attachment. An attachment
|
||||
without a connector is considered reserved but not fully attached.
|
||||
|
||||
:param client: cinderclient object
|
||||
:param attachment_id: UUID of the volume attachment to update.
|
||||
:param connector: host connector dict. This is required when updating
|
||||
a volume attachment. To terminate a connection, the volume
|
||||
attachment for that connection must be deleted.
|
||||
:param mountpoint: Optional mount device name for the attachment,
|
||||
e.g. "/dev/vdb". Theoretically this is optional per volume backend,
|
||||
but in practice it's normally required so it's best to always
|
||||
provide a value.
|
||||
:returns: a dict created from the
|
||||
cinderclient.v3.attachments.VolumeAttachment object with a backward
|
||||
compatible connection_info dict
|
||||
"""
|
||||
if mountpoint and 'mountpoint' not in connector:
|
||||
connector['mountpoint'] = mountpoint
|
||||
|
||||
try:
|
||||
attachment_ref = client.attachments.update(
|
||||
attachment_id, connector)
|
||||
return attachment_ref
|
||||
except cinder_exception.ClientException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Update attachment failed for attachment '
|
||||
'%(id)s. Error: %(msg)s Code: %(code)s'),
|
||||
{'id': attachment_id,
|
||||
'msg': str(ex),
|
||||
'code': getattr(ex, 'code', None)})
|
||||
|
||||
@handle_exceptions
|
||||
def attachment_complete(self, client, attachment_id):
|
||||
"""Marks a volume attachment complete.
|
||||
|
||||
This call should be used to inform Cinder that a volume attachment is
|
||||
fully connected on the host so Cinder can apply the necessary state
|
||||
changes to the volume info in its database.
|
||||
|
||||
:param client: cinderclient object
|
||||
:param attachment_id: UUID of the volume attachment to update.
|
||||
"""
|
||||
try:
|
||||
client.attachments.complete(attachment_id)
|
||||
except cinder_exception.ClientException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Complete attachment failed for attachment '
|
||||
'%(id)s. Error: %(msg)s Code: %(code)s'),
|
||||
{'id': attachment_id,
|
||||
'msg': str(ex),
|
||||
'code': getattr(ex, 'code', None)})
|
||||
|
||||
@handle_exceptions
|
||||
@retrying.retry(stop_max_attempt_number=5,
|
||||
retry_on_exception=_retry_on_internal_server_error)
|
||||
def attachment_delete(self, client, attachment_id):
|
||||
try:
|
||||
client.attachments.delete(attachment_id)
|
||||
except cinder_exception.ClientException as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE('Delete attachment failed for attachment '
|
||||
'%(id)s. Error: %(msg)s Code: %(code)s'),
|
||||
{'id': attachment_id,
|
||||
'msg': str(ex),
|
||||
'code': getattr(ex, 'code', None)})
|
157
glance_store/tests/unit/common/test_cinder_utils.py
Normal file
157
glance_store/tests/unit/common/test_cinder_utils.py
Normal file
@ -0,0 +1,157 @@
|
||||
# Copyright 2021 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from unittest import mock
|
||||
import uuid
|
||||
|
||||
from cinderclient.apiclient import exceptions as apiclient_exception
|
||||
from cinderclient import exceptions as cinder_exception
|
||||
from oslo_config import cfg
|
||||
from oslotest import base
|
||||
|
||||
from glance_store.common import cinder_utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class FakeObject(object):
|
||||
def __init__(self, **kwargs):
|
||||
for name, value in kwargs.items():
|
||||
setattr(self, name, value)
|
||||
|
||||
|
||||
class CinderUtilsTestCase(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CinderUtilsTestCase, self).setUp()
|
||||
CONF.register_opt(cfg.DictOpt('enabled_backends'))
|
||||
CONF.set_override('enabled_backends', 'fake:cinder')
|
||||
self.volume_api = cinder_utils.API()
|
||||
self.fake_client = FakeObject(attachments=FakeObject(
|
||||
create=mock.MagicMock(), delete=mock.MagicMock(),
|
||||
complete=mock.MagicMock(), update=mock.MagicMock(),
|
||||
show=mock.MagicMock()))
|
||||
self.fake_vol_id = uuid.uuid4()
|
||||
self.fake_attach_id = uuid.uuid4()
|
||||
self.fake_connector = {
|
||||
'platform': 'x86_64', 'os_type': 'linux', 'ip': 'fake_ip',
|
||||
'host': 'fake_host', 'multipath': False,
|
||||
'initiator': 'fake_initiator', 'do_local_attach': False,
|
||||
'uuid': '3e1a7217-104e-41c1-b177-a37c491129a0',
|
||||
'system uuid': '98755544-c749-40ed-b30a-a1cb27b2a46d',
|
||||
'nqn': 'fake_nqn'}
|
||||
|
||||
def test_attachment_create(self):
|
||||
self.volume_api.attachment_create(self.fake_client, self.fake_vol_id)
|
||||
self.fake_client.attachments.create.assert_called_once_with(
|
||||
self.fake_vol_id, None, mode=None)
|
||||
|
||||
def test_attachment_create_with_connector_and_mountpoint(self):
|
||||
self.volume_api.attachment_create(
|
||||
self.fake_client, self.fake_vol_id,
|
||||
connector=self.fake_connector, mountpoint='fake_mountpoint')
|
||||
self.fake_connector['mountpoint'] = 'fake_mountpoint'
|
||||
self.fake_client.attachments.create.assert_called_once_with(
|
||||
self.fake_vol_id, self.fake_connector, mode=None)
|
||||
|
||||
def test_attachment_create_client_exception(self):
|
||||
self.fake_client.attachments.create.side_effect = (
|
||||
cinder_exception.ClientException(code=1))
|
||||
self.assertRaises(
|
||||
cinder_exception.ClientException,
|
||||
self.volume_api.attachment_create,
|
||||
self.fake_client, self.fake_vol_id)
|
||||
|
||||
def test_attachment_get(self):
|
||||
self.volume_api.attachment_get(self.fake_client, self.fake_attach_id)
|
||||
self.fake_client.attachments.show.assert_called_once_with(
|
||||
self.fake_attach_id)
|
||||
|
||||
def test_attachment_get_client_exception(self):
|
||||
self.fake_client.attachments.show.side_effect = (
|
||||
cinder_exception.ClientException(code=1))
|
||||
self.assertRaises(
|
||||
cinder_exception.ClientException,
|
||||
self.volume_api.attachment_get,
|
||||
self.fake_client, self.fake_attach_id)
|
||||
|
||||
def test_attachment_update(self):
|
||||
self.volume_api.attachment_update(self.fake_client,
|
||||
self.fake_attach_id,
|
||||
self.fake_connector)
|
||||
self.fake_client.attachments.update.assert_called_once_with(
|
||||
self.fake_attach_id, self.fake_connector)
|
||||
|
||||
def test_attachment_update_with_connector_and_mountpoint(self):
|
||||
self.volume_api.attachment_update(
|
||||
self.fake_client, self.fake_attach_id, self.fake_connector,
|
||||
mountpoint='fake_mountpoint')
|
||||
self.fake_connector['mountpoint'] = 'fake_mountpoint'
|
||||
self.fake_client.attachments.update.assert_called_once_with(
|
||||
self.fake_attach_id, self.fake_connector)
|
||||
|
||||
def test_attachment_update_client_exception(self):
|
||||
self.fake_client.attachments.update.side_effect = (
|
||||
cinder_exception.ClientException(code=1))
|
||||
self.assertRaises(
|
||||
cinder_exception.ClientException,
|
||||
self.volume_api.attachment_update,
|
||||
self.fake_client, self.fake_attach_id, self.fake_connector)
|
||||
|
||||
def test_attachment_complete(self):
|
||||
self.volume_api.attachment_complete(self.fake_client,
|
||||
self.fake_attach_id)
|
||||
self.fake_client.attachments.complete.assert_called_once_with(
|
||||
self.fake_attach_id)
|
||||
|
||||
def test_attachment_complete_client_exception(self):
|
||||
self.fake_client.attachments.complete.side_effect = (
|
||||
cinder_exception.ClientException(code=1))
|
||||
self.assertRaises(
|
||||
cinder_exception.ClientException,
|
||||
self.volume_api.attachment_complete,
|
||||
self.fake_client, self.fake_attach_id)
|
||||
|
||||
def test_attachment_delete(self):
|
||||
self.volume_api.attachment_delete(self.fake_client,
|
||||
self.fake_attach_id)
|
||||
self.fake_client.attachments.delete.assert_called_once_with(
|
||||
self.fake_attach_id)
|
||||
|
||||
def test_attachment_delete_client_exception(self):
|
||||
self.fake_client.attachments.delete.side_effect = (
|
||||
cinder_exception.ClientException(code=1))
|
||||
self.assertRaises(
|
||||
cinder_exception.ClientException,
|
||||
self.volume_api.attachment_delete,
|
||||
self.fake_client, self.fake_attach_id)
|
||||
|
||||
def test_attachment_delete_retries(self):
|
||||
# Make delete fail two times and succeed on the third attempt.
|
||||
self.fake_client.attachments.delete.side_effect = [
|
||||
apiclient_exception.InternalServerError(),
|
||||
apiclient_exception.InternalServerError(),
|
||||
lambda aid: 'foo']
|
||||
|
||||
# Make sure we get a clean result.
|
||||
self.assertIsNone(self.volume_api.attachment_delete(
|
||||
self.fake_client, self.fake_attach_id))
|
||||
|
||||
# Assert that we called delete three times due to the retry
|
||||
# decorator.
|
||||
self.fake_client.attachments.delete.assert_has_calls([
|
||||
mock.call(self.fake_attach_id),
|
||||
mock.call(self.fake_attach_id),
|
||||
mock.call(self.fake_attach_id)])
|
@ -31,6 +31,7 @@ from oslo_concurrency import processutils
|
||||
from oslo_utils.secretutils import md5
|
||||
from oslo_utils import units
|
||||
|
||||
from glance_store.common import cinder_utils
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
from glance_store.tests import base
|
||||
@ -154,8 +155,11 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
encrypted_nfs=False):
|
||||
self.config(cinder_mount_point_base=None)
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume,
|
||||
detach=mock.Mock())
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume)
|
||||
fake_attachment_id = str(uuid.uuid4())
|
||||
fake_attachment_create = {'id': fake_attachment_id}
|
||||
fake_attachment_update = mock.MagicMock(id=fake_attachment_id)
|
||||
fake_conn_info = mock.MagicMock(connector={})
|
||||
fake_client = FakeObject(volumes=fake_volumes)
|
||||
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
|
||||
fake_devinfo = {'path': fake_dev_path}
|
||||
@ -174,8 +178,6 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
raise error
|
||||
|
||||
def fake_factory(protocol, root_helper, **kwargs):
|
||||
self.assertEqual(fake_volume.initialize_connection.return_value,
|
||||
kwargs['conn'])
|
||||
return fake_connector
|
||||
|
||||
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
|
||||
@ -187,10 +189,22 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
mock.patch.object(cinder.Store, 'get_root_helper',
|
||||
return_value=root_helper), \
|
||||
mock.patch.object(connector.InitiatorConnector, 'factory',
|
||||
side_effect=fake_factory) as fake_conn_obj:
|
||||
side_effect=fake_factory
|
||||
) as fake_conn_obj, \
|
||||
mock.patch.object(cinder_utils.API, 'attachment_create',
|
||||
return_value=fake_attachment_create
|
||||
) as attach_create, \
|
||||
mock.patch.object(cinder_utils.API, 'attachment_update',
|
||||
return_value=fake_attachment_update
|
||||
) as attach_update, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_delete') as attach_delete, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_complete') as attach_complete:
|
||||
|
||||
with mock.patch.object(connector,
|
||||
'get_connector_properties') as mock_conn:
|
||||
'get_connector_properties',
|
||||
return_value=fake_conn_info) as mock_conn:
|
||||
if error:
|
||||
self.assertRaises(error, do_open)
|
||||
elif encrypted_nfs:
|
||||
@ -218,13 +232,18 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
mock.ANY)
|
||||
fake_connector.disconnect_volume.assert_called_once_with(
|
||||
mock.ANY, fake_devinfo)
|
||||
fake_volume.attach.assert_called_once_with(
|
||||
None, 'glance_store', attach_mode,
|
||||
host_name=socket.gethostname())
|
||||
fake_volumes.detach.assert_called_once_with(fake_volume)
|
||||
fake_conn_obj.assert_called_once_with(
|
||||
mock.ANY, root_helper, conn=mock.ANY,
|
||||
use_multipath=multipath_supported)
|
||||
attach_create.assert_called_once_with(
|
||||
fake_client, fake_volume.id, mode=attach_mode)
|
||||
attach_update.assert_called_once_with(
|
||||
fake_client, fake_attachment_id,
|
||||
fake_conn_info, mountpoint='glance_store')
|
||||
attach_complete.assert_called_once_with(
|
||||
fake_client, fake_attachment_id)
|
||||
attach_delete.assert_called_once_with(
|
||||
fake_client, fake_attachment_id)
|
||||
|
||||
def test_open_cinder_volume_rw(self):
|
||||
self._test_open_cinder_volume('wb', 'rw', None)
|
||||
@ -400,8 +419,7 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
def test_cinder_delete(self):
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume_uuid = str(uuid.uuid4())
|
||||
fake_volume = FakeObject(delete=mock.Mock())
|
||||
fake_volumes = {fake_volume_uuid: fake_volume}
|
||||
fake_volumes = FakeObject(delete=mock.Mock())
|
||||
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
|
||||
mocked_cc.return_value = FakeObject(client=fake_client,
|
||||
@ -410,7 +428,7 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
uri = 'cinder://%s' % fake_volume_uuid
|
||||
loc = location.get_location_from_uri(uri, conf=self.conf)
|
||||
self.store.delete(loc, context=self.context)
|
||||
fake_volume.delete.assert_called_once_with()
|
||||
fake_volumes.delete.assert_called_once_with(fake_volume_uuid)
|
||||
|
||||
def test_set_url_prefix(self):
|
||||
self.assertEqual('cinder://', self.store._url_prefix)
|
||||
|
@ -33,6 +33,7 @@ from oslo_utils.secretutils import md5
|
||||
from oslo_utils import units
|
||||
|
||||
import glance_store as store
|
||||
from glance_store.common import cinder_utils
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
from glance_store.tests import base
|
||||
@ -185,8 +186,11 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
enforce_multipath=False):
|
||||
self.config(cinder_mount_point_base=None, group='cinder1')
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume,
|
||||
detach=mock.Mock())
|
||||
fake_attachment_id = str(uuid.uuid4())
|
||||
fake_attachment_create = {'id': fake_attachment_id}
|
||||
fake_attachment_update = mock.MagicMock(id=fake_attachment_id)
|
||||
fake_conn_info = mock.MagicMock(connector={})
|
||||
fake_volumes = FakeObject(get=lambda id: fake_volume)
|
||||
fake_client = FakeObject(volumes=fake_volumes)
|
||||
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
|
||||
fake_devinfo = {'path': fake_dev_path}
|
||||
@ -205,8 +209,6 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
raise error
|
||||
|
||||
def fake_factory(protocol, root_helper, **kwargs):
|
||||
self.assertEqual(fake_volume.initialize_connection.return_value,
|
||||
kwargs['conn'])
|
||||
return fake_connector
|
||||
|
||||
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
|
||||
@ -218,10 +220,24 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
mock.patch.object(cinder.Store, 'get_root_helper',
|
||||
return_value=root_helper), \
|
||||
mock.patch.object(connector.InitiatorConnector, 'factory',
|
||||
side_effect=fake_factory) as fake_conn_obj:
|
||||
side_effect=fake_factory
|
||||
) as fake_conn_obj, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_create',
|
||||
return_value=fake_attachment_create
|
||||
) as attach_create, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_update',
|
||||
return_value=fake_attachment_update
|
||||
) as attach_update, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_delete') as attach_delete, \
|
||||
mock.patch.object(cinder_utils.API,
|
||||
'attachment_complete') as attach_complete:
|
||||
|
||||
with mock.patch.object(connector,
|
||||
'get_connector_properties') as mock_conn:
|
||||
'get_connector_properties',
|
||||
return_value=fake_conn_info) as mock_conn:
|
||||
if error:
|
||||
self.assertRaises(error, do_open)
|
||||
else:
|
||||
@ -233,13 +249,18 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
fake_connector.connect_volume.assert_called_once_with(mock.ANY)
|
||||
fake_connector.disconnect_volume.assert_called_once_with(
|
||||
mock.ANY, fake_devinfo)
|
||||
fake_volume.attach.assert_called_once_with(
|
||||
None, 'glance_store', attach_mode,
|
||||
host_name=socket.gethostname())
|
||||
fake_volumes.detach.assert_called_once_with(fake_volume)
|
||||
fake_conn_obj.assert_called_once_with(
|
||||
mock.ANY, root_helper, conn=mock.ANY,
|
||||
use_multipath=multipath_supported)
|
||||
attach_create.assert_called_once_with(
|
||||
fake_client, fake_volume.id, mode=attach_mode)
|
||||
attach_update.assert_called_once_with(
|
||||
fake_client, fake_attachment_id,
|
||||
fake_conn_info, mountpoint='glance_store')
|
||||
attach_complete.assert_called_once_with(fake_client,
|
||||
fake_attachment_id)
|
||||
attach_delete.assert_called_once_with(fake_client,
|
||||
fake_attachment_id)
|
||||
|
||||
def test_open_cinder_volume_rw(self):
|
||||
self._test_open_cinder_volume('wb', 'rw', None)
|
||||
@ -499,8 +520,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
def test_cinder_delete(self):
|
||||
fake_client = FakeObject(auth_token=None, management_url=None)
|
||||
fake_volume_uuid = str(uuid.uuid4())
|
||||
fake_volume = FakeObject(delete=mock.Mock())
|
||||
fake_volumes = {fake_volume_uuid: fake_volume}
|
||||
fake_volumes = FakeObject(delete=mock.Mock())
|
||||
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
|
||||
mocked_cc.return_value = FakeObject(client=fake_client,
|
||||
@ -511,7 +531,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
"cinder1",
|
||||
conf=self.conf)
|
||||
self.store.delete(loc, context=self.context)
|
||||
fake_volume.delete.assert_called_once_with()
|
||||
fake_volumes.delete.assert_called_once_with(fake_volume_uuid)
|
||||
|
||||
def test_cinder_add_different_backend(self):
|
||||
self.store = cinder.Store(self.conf, backend="cinder2")
|
||||
|
@ -72,6 +72,7 @@ requests==2.14.2
|
||||
requestsexceptions==1.4.0
|
||||
requests-mock==1.2.0
|
||||
restructuredtext-lint==1.1.3
|
||||
retrying==1.3.3
|
||||
rfc3986==1.1.0
|
||||
six==1.11.0
|
||||
smmap2==2.0.3
|
||||
|
@ -14,6 +14,7 @@ coverage!=4.4,>=4.0 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
python-subunit>=1.0.0 # Apache-2.0/BSD
|
||||
requests-mock>=1.2.0 # Apache-2.0
|
||||
retrying>=1.3.3
|
||||
stestr>=2.0.0 # Apache-2.0
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testtools>=2.2.0 # MIT
|
||||
|
Loading…
x
Reference in New Issue
Block a user