Refactor/restructure glance cinder store
This is an effort to decouple some of the cinder backend specific code (like nfs, scaleio) from the generic logic. The purpose is to make the code modular and any change for a particular cinder backend should not affect the code path of other backends thereby reducing regression. This is also required for another use case of supporting extend of attached volumes added in [1]. Following are the major changes done in this patch: 1) Move cinder store to a new directory 'cinder' and rename 'cinder.py' to 'store.py' (similar to swift) 2) Create new files for nfs and scaleio backends for moving code specific to these backends into their own separate file. This also fixes one bug when using sparse files in nfs and we wait for file size to be equal to volume size (initially done for scaleio/powerflex backend) but this will never happen for nfs sparse files. See bug: 2000584 3) Move cinder tests to 'tests/unit/cinder' directory and add tests for base, nfs and scaleio files. 4) Modify/fix existing tests Closes-Bug: #2000584 [1] https://review.opendev.org/c/openstack/glance_store/+/868742 Depends-On: https://review.opendev.org/c/openstack/glance/+/869021 Change-Id: I26c272b6c503e98fbbafca411d3eec47283bd6fc
This commit is contained in:
parent
1a01fe7fd7
commit
d0733a0f4f
16
glance_store/_drivers/cinder/__init__.py
Normal file
16
glance_store/_drivers/cinder/__init__.py
Normal file
@ -0,0 +1,16 @@
|
||||
# Copyright 2023 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from glance_store._drivers.cinder.store import * # noqa
|
63
glance_store/_drivers/cinder/base.py
Normal file
63
glance_store/_drivers/cinder/base.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright 2023 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_utils import importutils
|
||||
|
||||
from os_brick.initiator import connector
|
||||
|
||||
NFS = 'nfs'
|
||||
SCALEIO = "scaleio"
|
||||
|
||||
BASE = 'glance_store._drivers.cinder.base.BaseBrickConnectorInterface'
|
||||
|
||||
_connector_mapping = {
|
||||
NFS: 'glance_store._drivers.cinder.nfs.NfsBrickConnector',
|
||||
SCALEIO: 'glance_store._drivers.cinder.scaleio.ScaleIOBrickConnector',
|
||||
}
|
||||
|
||||
|
||||
def factory(*args, **kwargs):
|
||||
connection_info = kwargs.get('connection_info')
|
||||
protocol = connection_info['driver_volume_type']
|
||||
connector = _connector_mapping.get(protocol, BASE)
|
||||
conn_cls = importutils.import_class(connector)
|
||||
return conn_cls(*args, **kwargs)
|
||||
|
||||
|
||||
class BaseBrickConnectorInterface(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.connection_info = kwargs.get('connection_info')
|
||||
self.root_helper = kwargs.get('root_helper')
|
||||
self.use_multipath = kwargs.get('use_multipath')
|
||||
self.conn = connector.InitiatorConnector.factory(
|
||||
self.connection_info['driver_volume_type'], self.root_helper,
|
||||
conn=self.connection_info, use_multipath=self.use_multipath)
|
||||
|
||||
def connect_volume(self, volume):
|
||||
device = self.conn.connect_volume(self.connection_info)
|
||||
return device
|
||||
|
||||
def disconnect_volume(self, device):
|
||||
self.conn.disconnect_volume(self.connection_info, device)
|
||||
|
||||
def yield_path(self, volume, volume_path):
|
||||
"""
|
||||
This method returns the volume file path.
|
||||
|
||||
The reason for it's implementation is to fix Bug#2000584. More
|
||||
information is added in the ScaleIO connector which makes actual
|
||||
use of it's implementation.
|
||||
"""
|
||||
return volume_path
|
103
glance_store/_drivers/cinder/nfs.py
Normal file
103
glance_store/_drivers/cinder/nfs.py
Normal file
@ -0,0 +1,103 @@
|
||||
# Copyright 2023 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from glance_store._drivers.cinder import base
|
||||
from glance_store.common import cinder_utils
|
||||
from glance_store.common import fs_mount as mount
|
||||
from glance_store.common import utils
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NfsBrickConnector(base.BaseBrickConnectorInterface):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.volume = kwargs.get('volume')
|
||||
self.connection_info = kwargs.get('connection_info')
|
||||
self.root_helper = kwargs.get('root_helper')
|
||||
self.mount_point_base = kwargs.get('mountpoint_base')
|
||||
self.attachment_obj = kwargs.get('attachment_obj')
|
||||
self.client = kwargs.get('client')
|
||||
self.host = socket.gethostname()
|
||||
self.volume_api = cinder_utils.API()
|
||||
|
||||
def _get_mount_path(self, share, mount_point_base):
|
||||
"""Returns the mount path prefix using the mount point base and share.
|
||||
|
||||
:returns: The mount path prefix.
|
||||
"""
|
||||
return os.path.join(self.mount_point_base,
|
||||
NfsBrickConnector.get_hash_str(share))
|
||||
|
||||
@staticmethod
|
||||
def get_hash_str(base_str):
|
||||
"""Returns string that represents SHA256 hash of base_str (in hex format).
|
||||
|
||||
If base_str is a Unicode string, encode it to UTF-8.
|
||||
"""
|
||||
if isinstance(base_str, str):
|
||||
base_str = base_str.encode('utf-8')
|
||||
return hashlib.sha256(base_str).hexdigest()
|
||||
|
||||
def connect_volume(self, volume):
|
||||
# The format info of nfs volumes is exposed via attachment_get
|
||||
# API hence it is not available in the connection info of
|
||||
# attachment object received from attachment_update and we
|
||||
# need to do this call
|
||||
vol_attachment = self.volume_api.attachment_get(
|
||||
self.client, self.attachment_obj.id)
|
||||
if (volume.encrypted or
|
||||
vol_attachment.connection_info['format'] == 'qcow2'):
|
||||
issue_type = 'Encrypted' if volume.encrypted else 'qcow2'
|
||||
msg = (_('%(issue_type)s volume creation for cinder nfs '
|
||||
'is not supported from glance_store. Failed to '
|
||||
'create volume %(volume_id)s')
|
||||
% {'issue_type': issue_type,
|
||||
'volume_id': volume.id})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
@utils.synchronized(self.connection_info['export'])
|
||||
def connect_volume_nfs():
|
||||
export = self.connection_info['export']
|
||||
vol_name = self.connection_info['name']
|
||||
mountpoint = self._get_mount_path(
|
||||
export, os.path.join(self.mount_point_base, 'nfs'))
|
||||
options = self.connection_info['options']
|
||||
mount.mount(
|
||||
'nfs', export, vol_name, mountpoint, self.host,
|
||||
self.root_helper, options)
|
||||
return {'path': os.path.join(mountpoint, vol_name)}
|
||||
|
||||
device = connect_volume_nfs()
|
||||
return device
|
||||
|
||||
def disconnect_volume(self, device):
|
||||
@utils.synchronized(self.connection_info['export'])
|
||||
def disconnect_volume_nfs():
|
||||
path, vol_name = device['path'].rsplit('/', 1)
|
||||
mount.umount(vol_name, path, self.host,
|
||||
self.root_helper)
|
||||
disconnect_volume_nfs()
|
79
glance_store/_drivers/cinder/scaleio.py
Normal file
79
glance_store/_drivers/cinder/scaleio.py
Normal file
@ -0,0 +1,79 @@
|
||||
# Copyright 2023 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
|
||||
from glance_store._drivers.cinder import base
|
||||
from glance_store import exceptions
|
||||
from glance_store.i18n import _
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ScaleIOBrickConnector(base.BaseBrickConnectorInterface):
|
||||
|
||||
@staticmethod
|
||||
def _get_device_size(device_file):
|
||||
# The seek position is corrected after every extend operation
|
||||
# with the bytes written (which is after this wait call) so we
|
||||
# don't need to worry about setting it back to original position
|
||||
device_file.seek(0, os.SEEK_END)
|
||||
# There are other ways to determine the file size like os.stat
|
||||
# or os.path.getsize but it requires file name attribute which
|
||||
# we don't have for the RBD file wrapper RBDVolumeIOWrapper
|
||||
device_size = device_file.tell()
|
||||
device_size = int(math.ceil(float(device_size) / units.Gi))
|
||||
return device_size
|
||||
|
||||
@staticmethod
|
||||
def _wait_resize_device(volume, device_file):
|
||||
timeout = 20
|
||||
max_recheck_wait = 10
|
||||
tries = 0
|
||||
elapsed = 0
|
||||
while ScaleIOBrickConnector._get_device_size(
|
||||
device_file) < volume.size:
|
||||
wait = min(0.5 * 2 ** tries, max_recheck_wait)
|
||||
time.sleep(wait)
|
||||
tries += 1
|
||||
elapsed += wait
|
||||
if elapsed >= timeout:
|
||||
msg = (_('Timeout while waiting while volume %(volume_id)s '
|
||||
'to resize the device in %(tries)s tries.')
|
||||
% {'volume_id': volume.id, 'tries': tries})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
def yield_path(self, volume, volume_path):
|
||||
"""
|
||||
This method waits for the LUN size to match the volume size.
|
||||
|
||||
This method is created to fix Bug#2000584 where NFS sparse volumes
|
||||
timeout waiting for the file size to match the volume.size field.
|
||||
The reason is that the volume is sparse and only takes up space of
|
||||
data which is written to it (similar to thin provisioned volumes).
|
||||
"""
|
||||
# Sometimes the extended LUN on storage side takes time
|
||||
# to reflect in the device so we wait until the device
|
||||
# size is equal to the extended volume size.
|
||||
ScaleIOBrickConnector._wait_resize_device(volume, volume_path)
|
||||
return volume_path
|
@ -14,7 +14,6 @@
|
||||
|
||||
import contextlib
|
||||
import errno
|
||||
import hashlib
|
||||
import importlib
|
||||
import logging
|
||||
import math
|
||||
@ -33,6 +32,7 @@ from oslo_config import cfg
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import units
|
||||
|
||||
from glance_store._drivers.cinder import base
|
||||
from glance_store import capabilities
|
||||
from glance_store.common import attachment_state_manager
|
||||
from glance_store.common import cinder_utils
|
||||
@ -644,37 +644,6 @@ class Store(glance_store.driver.Store):
|
||||
raise exceptions.BadStoreConfiguration(store_name="cinder",
|
||||
reason=reason)
|
||||
|
||||
@staticmethod
|
||||
def _get_device_size(device_file):
|
||||
# The seek position is corrected after every extend operation
|
||||
# with the bytes written (which is after this wait call) so we
|
||||
# don't need to worry about setting it back to original position
|
||||
device_file.seek(0, os.SEEK_END)
|
||||
# There are other ways to determine the file size like os.stat
|
||||
# or os.path.getsize but it requires file name attribute which
|
||||
# we don't have for the RBD file wrapper RBDVolumeIOWrapper
|
||||
device_size = device_file.tell()
|
||||
device_size = int(math.ceil(float(device_size) / units.Gi))
|
||||
return device_size
|
||||
|
||||
@staticmethod
|
||||
def _wait_resize_device(volume, device_file):
|
||||
timeout = 20
|
||||
max_recheck_wait = 10
|
||||
tries = 0
|
||||
elapsed = 0
|
||||
while Store._get_device_size(device_file) < volume.size:
|
||||
wait = min(0.5 * 2 ** tries, max_recheck_wait)
|
||||
time.sleep(wait)
|
||||
tries += 1
|
||||
elapsed += wait
|
||||
if elapsed >= timeout:
|
||||
msg = (_('Timeout while waiting while volume %(volume_id)s '
|
||||
'to resize the device in %(tries)s tries.')
|
||||
% {'volume_id': volume.id, 'tries': tries})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
def _wait_volume_status(self, volume, status_transition, status_expected):
|
||||
max_recheck_wait = 15
|
||||
timeout = self.store_conf.cinder_state_transition_timeout
|
||||
@ -703,22 +672,6 @@ class Store(glance_store.driver.Store):
|
||||
raise exceptions.BackendException(msg)
|
||||
return volume
|
||||
|
||||
def get_hash_str(self, base_str):
|
||||
"""Returns string that represents SHA256 hash of base_str (in hex format).
|
||||
|
||||
If base_str is a Unicode string, encode it to UTF-8.
|
||||
"""
|
||||
if isinstance(base_str, str):
|
||||
base_str = base_str.encode('utf-8')
|
||||
return hashlib.sha256(base_str).hexdigest()
|
||||
|
||||
def _get_mount_path(self, share, mount_point_base):
|
||||
"""Returns the mount path prefix using the mount point base and share.
|
||||
|
||||
:returns: The mount path prefix.
|
||||
"""
|
||||
return os.path.join(mount_point_base, self.get_hash_str(share))
|
||||
|
||||
def _get_host_ip(self, host):
|
||||
try:
|
||||
return socket.getaddrinfo(host, None, socket.AF_INET6)[0][4][0]
|
||||
@ -735,7 +688,6 @@ class Store(glance_store.driver.Store):
|
||||
my_ip = self._get_host_ip(host)
|
||||
use_multipath = self.store_conf.cinder_use_multipath
|
||||
enforce_multipath = self.store_conf.cinder_enforce_multipath
|
||||
mount_point_base = self.store_conf.cinder_mount_point_base
|
||||
volume_id = volume.id
|
||||
|
||||
connector_prop = connector.get_connector_properties(
|
||||
@ -762,42 +714,16 @@ class Store(glance_store.driver.Store):
|
||||
connection_info = attachment.connection_info
|
||||
|
||||
try:
|
||||
conn = connector.InitiatorConnector.factory(
|
||||
connection_info['driver_volume_type'], root_helper,
|
||||
conn=connection_info, use_multipath=use_multipath)
|
||||
if connection_info['driver_volume_type'] == 'nfs':
|
||||
# The format info of nfs volumes is exposed via attachment_get
|
||||
# API hence it is not available in the connection info of
|
||||
# attachment object received from attachment_update and we
|
||||
# need to do this call
|
||||
vol_attachment = self.volume_api.attachment_get(
|
||||
client, attachment.id)
|
||||
if (volume.encrypted or
|
||||
vol_attachment.connection_info['format'] == 'qcow2'):
|
||||
issue_type = 'Encrypted' if volume.encrypted else 'qcow2'
|
||||
msg = (_('%(issue_type)s volume creation for cinder nfs '
|
||||
'is not supported from glance_store. Failed to '
|
||||
'create volume %(volume_id)s')
|
||||
% {'issue_type': issue_type,
|
||||
'volume_id': volume_id})
|
||||
LOG.error(msg)
|
||||
raise exceptions.BackendException(msg)
|
||||
|
||||
@utils.synchronized(connection_info['export'])
|
||||
def connect_volume_nfs():
|
||||
export = connection_info['export']
|
||||
vol_name = connection_info['name']
|
||||
mountpoint = self._get_mount_path(
|
||||
export,
|
||||
os.path.join(mount_point_base, 'nfs'))
|
||||
options = connection_info['options']
|
||||
self.mount.mount(
|
||||
'nfs', export, vol_name, mountpoint, host,
|
||||
root_helper, options)
|
||||
return {'path': os.path.join(mountpoint, vol_name)}
|
||||
device = connect_volume_nfs()
|
||||
else:
|
||||
device = conn.connect_volume(connection_info)
|
||||
conn = base.factory(
|
||||
connection_info['driver_volume_type'],
|
||||
volume=volume,
|
||||
connection_info=connection_info,
|
||||
root_helper=root_helper,
|
||||
use_multipath=use_multipath,
|
||||
mountpoint_base=self.store_conf.cinder_mount_point_base,
|
||||
attachment_obj=attachment,
|
||||
client=client)
|
||||
device = conn.connect_volume(volume)
|
||||
|
||||
# Complete the attachment (marking the volume "in-use") after
|
||||
# the connection with os-brick is complete
|
||||
@ -805,12 +731,12 @@ class Store(glance_store.driver.Store):
|
||||
LOG.debug('Attachment %(attachment_id)s completed successfully.',
|
||||
{'attachment_id': attachment.id})
|
||||
if (connection_info['driver_volume_type'] == 'rbd' and
|
||||
not conn.do_local_attach):
|
||||
not conn.conn.do_local_attach):
|
||||
yield device['path']
|
||||
else:
|
||||
with self.temporary_chown(
|
||||
device['path']), open(device['path'], mode) as f:
|
||||
yield f
|
||||
yield conn.yield_path(volume, f)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Exception while accessing to cinder volume '
|
||||
'%(volume_id)s.'), {'volume_id': volume.id})
|
||||
@ -818,20 +744,13 @@ class Store(glance_store.driver.Store):
|
||||
finally:
|
||||
if device:
|
||||
try:
|
||||
if connection_info['driver_volume_type'] == 'nfs':
|
||||
@utils.synchronized(connection_info['export'])
|
||||
def disconnect_volume_nfs():
|
||||
path, vol_name = device['path'].rsplit('/', 1)
|
||||
self.mount.umount(vol_name, path, host,
|
||||
root_helper)
|
||||
disconnect_volume_nfs()
|
||||
if volume.multiattach:
|
||||
attachment_state_manager.detach(
|
||||
client, attachment.id, volume_id, host, conn,
|
||||
connection_info, device)
|
||||
else:
|
||||
if volume.multiattach:
|
||||
attachment_state_manager.detach(
|
||||
client, attachment.id, volume_id, host, conn,
|
||||
connection_info, device)
|
||||
else:
|
||||
conn.disconnect_volume(connection_info, device)
|
||||
conn.disconnect_volume(device)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to disconnect volume '
|
||||
'%(volume_id)s.'),
|
||||
@ -988,10 +907,6 @@ class Store(glance_store.driver.Store):
|
||||
try:
|
||||
while need_extend:
|
||||
with self._open_cinder_volume(client, volume, 'wb') as f:
|
||||
# Sometimes the extended LUN on storage side takes time
|
||||
# to reflect in the device so we wait until the device
|
||||
# size is equal to the extended volume size.
|
||||
Store._wait_resize_device(volume, f)
|
||||
f.seek(bytes_written)
|
||||
if buf:
|
||||
f.write(buf)
|
@ -230,7 +230,7 @@ class _AttachmentState(object):
|
||||
{'volume_id': volume_id, 'host': host})
|
||||
|
||||
if not vol_attachment.in_use():
|
||||
conn.disconnect_volume(connection_info, device)
|
||||
conn.disconnect_volume(device)
|
||||
del self.volumes[volume_id]
|
||||
self.volume_api.attachment_delete(client, attachment_id)
|
||||
|
||||
|
111
glance_store/tests/unit/cinder/test_base.py
Normal file
111
glance_store/tests/unit/cinder/test_base.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Copyright 2023 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
from unittest import mock
|
||||
|
||||
import ddt
|
||||
|
||||
from glance_store._drivers.cinder import base
|
||||
from glance_store._drivers.cinder import scaleio
|
||||
from glance_store.tests import base as test_base
|
||||
|
||||
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
|
||||
from glance_store._drivers.cinder import store as cinder # noqa
|
||||
from glance_store._drivers.cinder import nfs # noqa
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestConnectorBase(test_base.StoreBaseTest):
|
||||
@ddt.data(
|
||||
('iscsi', base.BaseBrickConnectorInterface),
|
||||
('nfs', nfs.NfsBrickConnector),
|
||||
('scaleio', scaleio.ScaleIOBrickConnector),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_factory(self, protocol, expected_class):
|
||||
connector_class = base.factory(
|
||||
connection_info={'driver_volume_type': protocol})
|
||||
self.assertTrue(isinstance(connector_class, expected_class))
|
||||
|
||||
|
||||
class TestBaseBrickConnectorInterface(test_base.StoreBaseTest):
|
||||
|
||||
def get_connection_info(self):
|
||||
"""Return iSCSI connection information"""
|
||||
return {
|
||||
'target_discovered': False,
|
||||
'target_portal': '0.0.0.0:3260',
|
||||
'target_iqn': 'iqn.2010-10.org.openstack:volume-fake-vol',
|
||||
'target_lun': 0,
|
||||
'volume_id': '007dedb8-ddc0-445c-88f1-d07acbe4efcb',
|
||||
'auth_method': 'CHAP',
|
||||
'auth_username': '2ttANgVaDRqxtMNK3hUj',
|
||||
'auth_password': 'fake-password',
|
||||
'encrypted': False,
|
||||
'qos_specs': None,
|
||||
'access_mode': 'rw',
|
||||
'cacheable': False,
|
||||
'driver_volume_type': 'iscsi',
|
||||
'attachment_id': '7f45b2fe-111a-42df-be3e-f02b312ad8ea'}
|
||||
|
||||
def setUp(self, connection_info={}, **kwargs):
|
||||
super().setUp()
|
||||
self.connection_info = connection_info or self.get_connection_info()
|
||||
self.root_helper = 'fake_rootwrap'
|
||||
self.use_multipath = False
|
||||
self.properties = {
|
||||
'connection_info': self.connection_info,
|
||||
'root_helper': self.root_helper,
|
||||
'use_multipath': self.use_multipath}
|
||||
self.properties.update(kwargs)
|
||||
self.mock_object(base.connector.InitiatorConnector, 'factory')
|
||||
self.connector = base.factory(**self.properties)
|
||||
|
||||
def mock_object(self, obj, attr_name, *args, **kwargs):
|
||||
"""Use python mock to mock an object attribute
|
||||
|
||||
Mocks the specified objects attribute with the given value.
|
||||
Automatically performs 'addCleanup' for the mock.
|
||||
"""
|
||||
patcher = mock.patch.object(obj, attr_name, *args, **kwargs)
|
||||
result = patcher.start()
|
||||
self.addCleanup(patcher.stop)
|
||||
return result
|
||||
|
||||
def test_connect_volume(self):
|
||||
if self.connection_info['driver_volume_type'] == 'nfs':
|
||||
self.skip('NFS tests have custom implementation of this method.')
|
||||
fake_vol = mock.MagicMock()
|
||||
fake_path = {'path': 'fake_dev_path'}
|
||||
self.mock_object(self.connector.conn, 'connect_volume',
|
||||
return_value=fake_path)
|
||||
fake_dev_path = self.connector.connect_volume(fake_vol)
|
||||
self.connector.conn.connect_volume.assert_called_once_with(
|
||||
self.connector.connection_info)
|
||||
self.assertEqual(fake_path['path'], fake_dev_path['path'])
|
||||
|
||||
def test_disconnect_volume(self):
|
||||
fake_device = 'fake_dev_path'
|
||||
self.mock_object(self.connector.conn, 'disconnect_volume')
|
||||
self.connector.disconnect_volume(fake_device)
|
||||
self.connector.conn.disconnect_volume.assert_called_once_with(
|
||||
self.connection_info, fake_device)
|
||||
|
||||
def test_yield_path(self):
|
||||
fake_vol = mock.MagicMock()
|
||||
fake_device = 'fake_dev_path'
|
||||
fake_dev_path = self.connector.yield_path(fake_vol, fake_device)
|
||||
self.assertEqual(fake_device, fake_dev_path)
|
@ -32,13 +32,15 @@ from oslo_concurrency import processutils
|
||||
from oslo_utils.secretutils import md5
|
||||
from oslo_utils import units
|
||||
|
||||
from glance_store._drivers.cinder import scaleio
|
||||
from glance_store.common import attachment_state_manager
|
||||
from glance_store.common import cinder_utils
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
|
||||
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
|
||||
from glance_store._drivers import cinder # noqa
|
||||
from glance_store._drivers.cinder import store as cinder # noqa
|
||||
from glance_store._drivers.cinder import nfs # noqa
|
||||
|
||||
|
||||
class TestCinderStoreBase(object):
|
||||
@ -216,7 +218,9 @@ class TestCinderStoreBase(object):
|
||||
id=fake_attachment_id,
|
||||
connection_info={'driver_volume_type': 'nfs'})
|
||||
else:
|
||||
fake_attachment_update = mock.MagicMock(id=fake_attachment_id)
|
||||
fake_attachment_update = mock.MagicMock(
|
||||
id=fake_attachment_id,
|
||||
connection_info={'driver_volume_type': 'fake'})
|
||||
fake_conn_info = mock.MagicMock(connector={})
|
||||
fake_volumes = mock.MagicMock(get=lambda id: fake_volume)
|
||||
fake_client = mock.MagicMock(volumes=fake_volumes)
|
||||
@ -335,9 +339,6 @@ class TestCinderStoreBase(object):
|
||||
host=fake_host)
|
||||
fake_connector.connect_volume.assert_not_called()
|
||||
fake_connector.disconnect_volume.assert_not_called()
|
||||
fake_conn_obj.assert_called_once_with(
|
||||
mock.ANY, root_helper, conn=mock.ANY,
|
||||
use_multipath=multipath_supported)
|
||||
attach_create.assert_called_once_with(
|
||||
fake_client, fake_volume.id, mode=attach_mode)
|
||||
attach_update.assert_called_once_with(
|
||||
@ -516,7 +517,7 @@ class TestCinderStoreBase(object):
|
||||
|
||||
def _test_cinder_add(self, fake_volume, volume_file, size_kb=5,
|
||||
verifier=None, backend='glance_store',
|
||||
fail_resize=False, is_multi_store=False):
|
||||
is_multi_store=False):
|
||||
expected_image_id = str(uuid.uuid4())
|
||||
expected_size = size_kb * units.Ki
|
||||
expected_file_contents = b"*" * expected_size
|
||||
@ -547,11 +548,7 @@ class TestCinderStoreBase(object):
|
||||
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume',
|
||||
side_effect=fake_open), \
|
||||
mock.patch.object(
|
||||
cinder.Store, '_wait_resize_device') as mock_wait_resize:
|
||||
if fail_resize:
|
||||
mock_wait_resize.side_effect = exceptions.BackendException()
|
||||
side_effect=fake_open):
|
||||
mock_cc.return_value = mock.MagicMock(client=fake_client,
|
||||
volumes=fake_volumes)
|
||||
loc, size, checksum, multihash, metadata = self.store.add(
|
||||
@ -636,7 +633,6 @@ class TestCinderStoreBase(object):
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume',
|
||||
side_effect=fake_open), \
|
||||
mock.patch.object(cinder.Store, '_wait_resize_device'), \
|
||||
mock.patch.object(cinder.utils, 'get_hasher') as fake_hasher, \
|
||||
mock.patch.object(cinder.Store, '_wait_volume_status',
|
||||
return_value=fake_volume) as mock_wait:
|
||||
@ -693,7 +689,6 @@ class TestCinderStoreBase(object):
|
||||
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume'), \
|
||||
mock.patch.object(cinder.Store, '_wait_resize_device'), \
|
||||
mock.patch.object(cinder.utils, 'get_hasher'), \
|
||||
mock.patch.object(
|
||||
cinder.Store, '_wait_volume_status') as mock_wait:
|
||||
@ -735,7 +730,6 @@ class TestCinderStoreBase(object):
|
||||
|
||||
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
|
||||
mock.patch.object(self.store, '_open_cinder_volume'), \
|
||||
mock.patch.object(cinder.Store, '_wait_resize_device'), \
|
||||
mock.patch.object(cinder.utils, 'get_hasher'), \
|
||||
mock.patch.object(
|
||||
cinder.Store, '_wait_volume_status') as mock_wait:
|
||||
@ -780,7 +774,7 @@ class TestCinderStoreBase(object):
|
||||
fake_data = b"fake binary data"
|
||||
fake_len = int(math.ceil(float(len(fake_data)) / units.Gi))
|
||||
fake_file = io.BytesIO(fake_data)
|
||||
dev_size = cinder.Store._get_device_size(fake_file)
|
||||
dev_size = scaleio.ScaleIOBrickConnector._get_device_size(fake_file)
|
||||
self.assertEqual(fake_len, dev_size)
|
||||
|
||||
@mock.patch.object(time, 'sleep')
|
||||
@ -789,9 +783,11 @@ class TestCinderStoreBase(object):
|
||||
fake_vol.size = 2
|
||||
fake_file = io.BytesIO(b"fake binary data")
|
||||
with mock.patch.object(
|
||||
cinder.Store, '_get_device_size') as mock_get_dev_size:
|
||||
scaleio.ScaleIOBrickConnector,
|
||||
'_get_device_size') as mock_get_dev_size:
|
||||
mock_get_dev_size.side_effect = [1, 2]
|
||||
cinder.Store._wait_resize_device(fake_vol, fake_file)
|
||||
scaleio.ScaleIOBrickConnector._wait_resize_device(
|
||||
fake_vol, fake_file)
|
||||
|
||||
@mock.patch.object(time, 'sleep')
|
||||
def test__wait_resize_device_fails(self, mock_sleep):
|
||||
@ -799,11 +795,11 @@ class TestCinderStoreBase(object):
|
||||
fake_vol.size = 2
|
||||
fake_file = io.BytesIO(b"fake binary data")
|
||||
with mock.patch.object(
|
||||
cinder.Store, '_get_device_size',
|
||||
scaleio.ScaleIOBrickConnector, '_get_device_size',
|
||||
return_value=1):
|
||||
self.assertRaises(
|
||||
exceptions.BackendException,
|
||||
cinder.Store._wait_resize_device,
|
||||
scaleio.ScaleIOBrickConnector._wait_resize_device,
|
||||
fake_vol, fake_file)
|
||||
|
||||
def test_process_specs(self):
|
||||
@ -827,20 +823,23 @@ class TestCinderStoreBase(object):
|
||||
self.assertEqual(expected, res)
|
||||
|
||||
def test_get_hash_str(self):
|
||||
nfs_conn = nfs.NfsBrickConnector()
|
||||
test_str = 'test_str'
|
||||
with mock.patch.object(cinder.hashlib, 'sha256') as fake_hashlib:
|
||||
self.store.get_hash_str(test_str)
|
||||
with mock.patch.object(nfs.hashlib, 'sha256') as fake_hashlib:
|
||||
nfs_conn.get_hash_str(test_str)
|
||||
test_str = test_str.encode('utf-8')
|
||||
fake_hashlib.assert_called_once_with(test_str)
|
||||
|
||||
def test__get_mount_path(self):
|
||||
nfs_conn = nfs.NfsBrickConnector(mountpoint_base='fake_mount_path')
|
||||
fake_hex = 'fake_hex_digest'
|
||||
fake_share = 'fake_share'
|
||||
fake_path = 'fake_mount_path'
|
||||
expected_path = os.path.join(fake_path, fake_hex)
|
||||
with mock.patch.object(self.store, 'get_hash_str') as fake_hash:
|
||||
with mock.patch.object(
|
||||
nfs.NfsBrickConnector, 'get_hash_str') as fake_hash:
|
||||
fake_hash.return_value = fake_hex
|
||||
res = self.store._get_mount_path(fake_share, fake_path)
|
||||
res = nfs_conn._get_mount_path(fake_share, fake_path)
|
||||
self.assertEqual(expected_path, res)
|
||||
|
||||
def test__get_host_ip_v6(self):
|
@ -24,11 +24,11 @@ from oslo_utils import units
|
||||
|
||||
from glance_store import exceptions
|
||||
from glance_store.tests import base
|
||||
from glance_store.tests.unit import test_cinder_base
|
||||
from glance_store.tests.unit.cinder import test_cinder_base
|
||||
from glance_store.tests.unit import test_store_capabilities
|
||||
|
||||
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
|
||||
from glance_store._drivers import cinder # noqa
|
||||
from glance_store._drivers.cinder import store as cinder # noqa
|
||||
|
||||
|
||||
class TestCinderStore(base.StoreBaseTest,
|
||||
@ -135,16 +135,6 @@ class TestCinderStore(base.StoreBaseTest,
|
||||
self._test_cinder_add, fake_volume, volume_file)
|
||||
fake_volume.delete.assert_called_once_with()
|
||||
|
||||
def test_cinder_add_fail_resize(self):
|
||||
volume_file = io.BytesIO()
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
|
||||
status='available',
|
||||
size=1)
|
||||
self.assertRaises(exceptions.BackendException,
|
||||
self._test_cinder_add, fake_volume, volume_file,
|
||||
fail_resize=True)
|
||||
fake_volume.delete.assert_called_once()
|
||||
|
||||
def test_cinder_add_extend(self):
|
||||
self._test_cinder_add_extend()
|
||||
|
@ -28,11 +28,11 @@ import glance_store as store
|
||||
from glance_store import exceptions
|
||||
from glance_store import location
|
||||
from glance_store.tests import base
|
||||
from glance_store.tests.unit import test_cinder_base
|
||||
from glance_store.tests.unit.cinder import test_cinder_base
|
||||
from glance_store.tests.unit import test_store_capabilities as test_cap
|
||||
|
||||
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
|
||||
from glance_store._drivers import cinder # noqa
|
||||
from glance_store._drivers.cinder import store as cinder # noqa
|
||||
|
||||
|
||||
class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
@ -273,16 +273,6 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
|
||||
self._test_cinder_add(fake_volume, volume_file, backend="cinder2",
|
||||
is_multi_store=True)
|
||||
|
||||
def test_cinder_add_fail_resize(self):
|
||||
volume_file = io.BytesIO()
|
||||
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
|
||||
status='available',
|
||||
size=1)
|
||||
self.assertRaises(exceptions.BackendException,
|
||||
self._test_cinder_add, fake_volume, volume_file,
|
||||
fail_resize=True, is_multi_store=True)
|
||||
fake_volume.delete.assert_called_once()
|
||||
|
||||
def test_cinder_add_extend(self):
|
||||
self._test_cinder_add_extend(is_multi_store=True)
|
||||
|
93
glance_store/tests/unit/cinder/test_nfs.py
Normal file
93
glance_store/tests/unit/cinder/test_nfs.py
Normal file
@ -0,0 +1,93 @@
|
||||
# Copyright 2023 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
from unittest import mock
|
||||
|
||||
import ddt
|
||||
|
||||
from glance_store import exceptions
|
||||
from glance_store.tests.unit.cinder import test_base as test_base_connector
|
||||
|
||||
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
|
||||
from glance_store._drivers.cinder import store as cinder # noqa
|
||||
from glance_store._drivers.cinder import nfs # noqa
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestNfsBrickConnector(
|
||||
test_base_connector.TestBaseBrickConnectorInterface):
|
||||
|
||||
def setUp(self):
|
||||
self.connection_info = {
|
||||
'export': 'localhost:/srv/fake-nfs-path',
|
||||
'name': 'volume-1fa96ca8-9e07-4dad-a0ed-990c6e86b938',
|
||||
'options': None,
|
||||
'format': 'raw',
|
||||
'qos_specs': None,
|
||||
'access_mode': 'rw',
|
||||
'encrypted': False,
|
||||
'cacheable': False,
|
||||
'driver_volume_type': 'nfs',
|
||||
'mount_point_base': '/opt/stack/data/cinder/mnt',
|
||||
'attachment_id': '7eb574ce-f32d-4173-a68b-870ead29fd84'}
|
||||
fake_attachment = mock.MagicMock(id='fake_attachment_uuid')
|
||||
self.mountpath = 'fake_mount_path'
|
||||
super().setUp(connection_info=self.connection_info,
|
||||
attachment_obj=fake_attachment,
|
||||
mountpoint_base=self.mountpath)
|
||||
|
||||
@ddt.data(
|
||||
(False, 'raw'),
|
||||
(False, 'qcow2'),
|
||||
(True, 'raw'),
|
||||
(True, 'qcow2'))
|
||||
@ddt.unpack
|
||||
def test_connect_volume(self, encrypted, file_format):
|
||||
fake_vol = mock.MagicMock(id='fake_vol_uuid', encrypted=encrypted)
|
||||
fake_attachment = mock.MagicMock(
|
||||
id='fake_attachment_uuid',
|
||||
connection_info={'format': file_format})
|
||||
self.mock_object(self.connector.volume_api, 'attachment_get',
|
||||
return_value=fake_attachment)
|
||||
if encrypted or file_format == 'qcow2':
|
||||
self.assertRaises(exceptions.BackendException,
|
||||
self.connector.connect_volume,
|
||||
fake_vol)
|
||||
else:
|
||||
fake_hash = 'fake_hash'
|
||||
fake_path = {'path': os.path.join(
|
||||
self.mountpath, fake_hash, self.connection_info['name'])}
|
||||
self.mock_object(nfs.NfsBrickConnector, 'get_hash_str',
|
||||
return_value=fake_hash)
|
||||
fake_dev_path = self.connector.connect_volume(fake_vol)
|
||||
nfs.mount.mount.assert_called_once_with(
|
||||
'nfs', self.connection_info['export'],
|
||||
self.connection_info['name'],
|
||||
os.path.join(self.mountpath, fake_hash),
|
||||
self.connector.host, self.connector.root_helper,
|
||||
self.connection_info['options'])
|
||||
self.assertEqual(fake_path['path'], fake_dev_path['path'])
|
||||
|
||||
def test_disconnect_volume(self):
|
||||
fake_hash = 'fake_hash'
|
||||
fake_path = {'path': os.path.join(
|
||||
self.mountpath, fake_hash, self.connection_info['name'])}
|
||||
mount_path, vol_name = fake_path['path'].rsplit('/', 1)
|
||||
self.connector.disconnect_volume(fake_path)
|
||||
nfs.mount.umount.assert_called_once_with(
|
||||
vol_name, mount_path, self.connector.host,
|
||||
self.connector.root_helper)
|
49
glance_store/tests/unit/cinder/test_scaleio.py
Normal file
49
glance_store/tests/unit/cinder/test_scaleio.py
Normal file
@ -0,0 +1,49 @@
|
||||
# Copyright 2023 RedHat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import io
|
||||
from unittest import mock
|
||||
|
||||
from glance_store.tests.unit.cinder import test_base as test_base_connector
|
||||
|
||||
|
||||
class TestScaleioBrickConnector(
|
||||
test_base_connector.TestBaseBrickConnectorInterface):
|
||||
|
||||
def setUp(self):
|
||||
connection_info = {
|
||||
'scaleIO_volname': 'TZpPr43ISgmNSgpo0LP2uw==',
|
||||
'hostIP': None, 'serverIP': 'l4-pflex154gw',
|
||||
'serverPort': 443,
|
||||
'serverUsername': 'admin',
|
||||
'iopsLimit': None,
|
||||
'bandwidthLimit': None,
|
||||
'scaleIO_volume_id': '3b2f23b00000000d',
|
||||
'config_group': 'powerflex1',
|
||||
'failed_over': False,
|
||||
'discard': True,
|
||||
'qos_specs': None,
|
||||
'access_mode': 'rw',
|
||||
'encrypted': False,
|
||||
'cacheable': False,
|
||||
'driver_volume_type': 'scaleio',
|
||||
'attachment_id': '22914c3a-5818-4840-9188-2ac9833b9f7b'}
|
||||
super().setUp(connection_info=connection_info)
|
||||
|
||||
def test_yield_path(self):
|
||||
fake_vol = mock.MagicMock(size=1)
|
||||
fake_device = io.BytesIO(b"fake binary data")
|
||||
fake_dev_path = self.connector.yield_path(fake_vol, fake_device)
|
||||
self.assertEqual(fake_device, fake_dev_path)
|
@ -59,8 +59,7 @@ class AttachmentStateTestCase(base.BaseTestCase):
|
||||
self.m = attach_manager._AttachmentState()
|
||||
self.attach_call_1 = [mock.sentinel.client, mock.sentinel.volume_id]
|
||||
self.attach_call_2 = {'mode': mock.sentinel.mode}
|
||||
self.disconnect_vol_call = [mock.sentinel.connection_info,
|
||||
mock.sentinel.device]
|
||||
self.disconnect_vol_call = [mock.sentinel.device]
|
||||
self.detach_call = [mock.sentinel.client, mock.sentinel.attachment_id]
|
||||
self.attachment_dict = {'id': mock.sentinel.attachment_id}
|
||||
|
||||
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
`Bug #2000584 <https://bugs.launchpad.net/glance-store/+bug/2000584>`_:
|
||||
Fixed image create with cinder NFS store when using sparse volumes.
|
Loading…
Reference in New Issue
Block a user