Merge "Continue renaming of volume_utils (drivers)"

This commit is contained in:
Zuul 2019-09-12 16:20:09 +00:00 committed by Gerrit Code Review
commit 1ec89105a3
25 changed files with 233 additions and 223 deletions

View File

@ -16,7 +16,7 @@ from cinder import context
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import scst
from cinder.volume import volume_utils as vutils
from cinder.volume import volume_utils
class TestSCSTAdmDriver(tf.TargetDriverFixture):
@ -163,9 +163,9 @@ class TestSCSTAdmDriver(tf.TargetDriverFixture):
side_effect=_fake_iscsi_location),\
mock.patch.object(self.target, 'target_driver',
return_value='iscsi'),\
mock.patch.object(vutils, 'generate_username',
mock.patch.object(volume_utils, 'generate_username',
side_effect=lambda: 'QZJbisGmn9AL954FNF4D'),\
mock.patch.object(vutils, 'generate_password',
mock.patch.object(volume_utils, 'generate_password',
side_effect=lambda: 'P68eE7u9eFqDGexd28DQ'):
self.assertEqual(expected_result,
self.target.create_export(ctxt,

View File

@ -23,7 +23,7 @@ from cinder import test
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
from cinder.volume.targets import tgt
from cinder.volume import volume_utils as vutils
from cinder.volume import volume_utils
class TestTgtAdmDriver(tf.TargetDriverFixture):
@ -351,11 +351,11 @@ class TestTgtAdmDriver(tf.TargetDriverFixture):
side_effect=lambda x, y: True),\
mock.patch.object(self.target, '_get_target_chap_auth',
side_effect=lambda x, y: None) as m_chap,\
mock.patch.object(vutils, 'generate_username',
mock.patch.object(volume_utils, 'generate_username',
side_effect=lambda: 'QZJb'),\
mock.patch('cinder.privsep.targets.tgt.tgtadmin_update',
return_value=('', '')), \
mock.patch.object(vutils, 'generate_password',
mock.patch.object(volume_utils, 'generate_password',
side_effect=lambda: 'P68e'):
ctxt = context.get_admin_context()

View File

@ -27,7 +27,7 @@ from cinder.volume.drivers.macrosan import devop_client
from cinder.volume.drivers.macrosan import driver
from cinder.volume import qos_specs
from cinder.volume import volume_types
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
test_volume = (
@ -460,7 +460,7 @@ class MacroSANISCSIDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_volume_from_snapshot(self, mock_volume_type, mock_qos,
mock_hostname,
@ -480,7 +480,7 @@ class MacroSANISCSIDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_cloned_volume(self, mock_volume_types, mock_qos,
mock_hostname,
@ -567,7 +567,7 @@ class MacroSANISCSIDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_volume_from_snapshot_fail(self, mock_volume_type,
mock_qos, mock_hostname,
@ -588,7 +588,7 @@ class MacroSANISCSIDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_cloned_volume_fail(self, mock_volume_types, mock_qos,
mock_hostname,
@ -699,7 +699,7 @@ class MacroSANFCDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_volume_from_snapshot(self, mock_volume_types, mock_qos,
mock_hostname,
@ -720,7 +720,7 @@ class MacroSANFCDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_cloned_volume(self, mock_volume_types, mock_qos,
mock_hostname,
@ -741,7 +741,7 @@ class MacroSANFCDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_volume_from_snapshot_fail(self, mock_volume_types,
mock_qos,
@ -763,7 +763,7 @@ class MacroSANFCDriverTestCase(test.TestCase):
@mock.patch.object(socket, 'gethostname', return_value='controller')
@mock.patch.object(utils, 'brick_get_connector',
return_value=DummyBrickGetConnector())
@mock.patch.object(volutils, 'copy_volume', return_value=None)
@mock.patch.object(volume_utils, 'copy_volume', return_value=None)
@mock.patch.object(os.path, 'realpath', return_value=None)
def test_create_cloned_volume_fail(self, mock_volume_types, mock_qos,
mock_hostname,

View File

@ -378,7 +378,7 @@ class IdMatcher(object):
@ddt.ddt
@mock.patch.object(adapter, 'storops_ex', new=ex)
@mock.patch.object(adapter.vol_utils, 'is_group_a_cg_snapshot_type',
@mock.patch.object(adapter.volume_utils, 'is_group_a_cg_snapshot_type',
new=lambda x: True)
class CommonAdapterTest(test.TestCase):
def setUp(self):

View File

@ -32,7 +32,7 @@ from cinder.volume import configuration
from cinder.volume.drivers.kaminario import kaminario_common
from cinder.volume.drivers.kaminario import kaminario_fc
from cinder.volume.drivers.kaminario import kaminario_iscsi
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
CONNECTOR = {'initiator': 'iqn.1993-08.org.debian:01:12aa12aa12aa',
'ip': '192.168.2.5', 'platform': 'x86_64', 'host': 'test-k2',
@ -204,7 +204,7 @@ class TestKaminarioCommon(test.TestCase):
self.driver.delete_snapshot, self.snap)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
@mock.patch.object(volume_utils, 'copy_volume')
def test_create_volume_from_snapshot(self, mock_copy_volume,
mock_brick_get):
"""Test create_volume_from_snapshot."""
@ -215,7 +215,7 @@ class TestKaminarioCommon(test.TestCase):
self.assertIsNone(result)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
@mock.patch.object(volume_utils, 'copy_volume')
def test_create_volume_from_snapshot_with_exception(self, mock_copy_volume,
mock_brick_get):
"""Test create_volume_from_snapshot_with_exception."""
@ -227,7 +227,7 @@ class TestKaminarioCommon(test.TestCase):
self.snap)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
@mock.patch.object(volume_utils, 'copy_volume')
def test_create_cloned_volume(self, mock_copy_volume, mock_brick_get):
"""Test create_cloned_volume."""
mock_brick_get.return_value = CONNECTOR
@ -237,7 +237,7 @@ class TestKaminarioCommon(test.TestCase):
self.assertIsNone(result)
@mock.patch.object(utils, 'brick_get_connector_properties')
@mock.patch.object(vol_utils, 'copy_volume')
@mock.patch.object(volume_utils, 'copy_volume')
def test_create_cloned_volume_with_exception(self, mock_copy_volume,
mock_brick_get):
"""Test create_cloned_volume_with_exception."""

View File

@ -31,7 +31,7 @@ from cinder.tests.unit.volume import test_driver
from cinder.volume import configuration as conf
from cinder.volume.drivers import lvm
import cinder.volume.volume_utils
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
CONF = cfg.CONF
@ -74,8 +74,8 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
self.assertRaises(exception.VolumeBackendAPIException,
lvm_driver._delete_volume, volume)
@mock.patch.object(volutils, 'clear_volume')
@mock.patch.object(volutils, 'copy_volume')
@mock.patch.object(volume_utils, 'clear_volume')
@mock.patch.object(volume_utils, 'copy_volume')
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
def test_delete_volume_thinlvm_snap(self, _mock_create_export,
mock_copy, mock_clear):
@ -97,7 +97,7 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
'size': 123}
lvm_driver._delete_volume(fake_snapshot, is_snapshot=True)
@mock.patch.object(volutils, 'get_all_volume_groups',
@mock.patch.object(volume_utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
@mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version',
return_value=(2, 2, 100))
@ -186,7 +186,7 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
with mock.patch.object(self.volume.driver, 'vg'), \
mock.patch.object(self.volume.driver, '_create_volume'), \
mock.patch.object(volutils, 'copy_volume') as mock_copy:
mock.patch.object(volume_utils, 'copy_volume') as mock_copy:
# Test case for thick LVM
src_volume = tests_utils.create_volume(self.context)
@ -394,7 +394,7 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
self.assertFalse(moved)
self.assertIsNone(model_update)
@mock.patch.object(volutils, 'get_all_volume_groups',
@mock.patch.object(volume_utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
def test_lvm_migrate_volume_same_volume_group(self, vgs):
hostname = socket.gethostname()
@ -414,9 +414,9 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
@mock.patch.object(lvm.LVMVolumeDriver, '_create_volume')
@mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes')
@mock.patch.object(brick_lvm.LVM, 'delete')
@mock.patch.object(volutils, 'copy_volume',
@mock.patch.object(volume_utils, 'copy_volume',
side_effect=processutils.ProcessExecutionError)
@mock.patch.object(volutils, 'get_all_volume_groups',
@mock.patch.object(volume_utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes'}])
def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume,
mock_delete, mock_pvs,
@ -434,7 +434,7 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
vol, host)
mock_delete.assert_called_once_with(vol)
@mock.patch.object(volutils, 'get_all_volume_groups',
@mock.patch.object(volume_utils, 'get_all_volume_groups',
return_value=[{'name': 'cinder-volumes-2'}])
def test_lvm_volume_group_missing(self, vgs):
hostname = socket.gethostname()
@ -475,8 +475,8 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
return_value = [{}]), \
mock.patch.object(self.volume.driver, '_execute') \
as mock_execute, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volutils, 'get_all_volume_groups',
mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_utils, 'get_all_volume_groups',
side_effect = get_all_volume_groups), \
mock.patch.object(self.volume.driver, '_delete_volume'):
@ -523,8 +523,8 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
return_value = [{}]), \
mock.patch.object(lvm_driver, '_execute') \
as mock_execute, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volutils, 'get_all_volume_groups',
mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_utils, 'get_all_volume_groups',
side_effect = get_all_volume_groups), \
mock.patch.object(lvm_driver, '_delete_volume'):

View File

@ -28,7 +28,7 @@ from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -807,9 +807,9 @@ class PowerMaxUtils(object):
:param new_type_extra_specs: the target type extra specs
:return: bool
"""
is_src_multiattach = vol_utils.is_boolean_str(
is_src_multiattach = volume_utils.is_boolean_str(
extra_specs.get('multiattach'))
is_tgt_multiattach = vol_utils.is_boolean_str(
is_tgt_multiattach = volume_utils.is_boolean_str(
new_type_extra_specs.get('multiattach'))
return is_src_multiattach != is_tgt_multiattach

View File

@ -30,7 +30,7 @@ from cinder.objects import fields
from cinder import utils as cinder_utils
from cinder.volume.drivers.dell_emc.unity import client
from cinder.volume.drivers.dell_emc.unity import utils
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
storops = importutils.try_import('storops')
if storops:
@ -139,7 +139,7 @@ class VolumeParams(object):
def is_in_cg(self):
if self._is_in_cg is None:
self._is_in_cg = (self._volume.group and
vol_utils.is_group_a_cg_snapshot_type(
volume_utils.is_group_a_cg_snapshot_type(
self._volume.group))
return self._is_in_cg
@ -670,7 +670,7 @@ class CommonAdapter(object):
size_in_m = utils.byte_to_mib(src_snap.size)
else:
size_in_m = utils.byte_to_mib(src_lun.size_total)
vol_utils.copy_volume(
volume_utils.copy_volume(
src_info['device']['path'],
dest_info['device']['path'],
size_in_m,

View File

@ -28,7 +28,7 @@ from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
@ -177,19 +177,19 @@ def convert_to_itor_tgt_map(zone_mapping):
def get_pool_name(volume):
return vol_utils.extract_host(volume.host, 'pool')
return volume_utils.extract_host(volume.host, 'pool')
def get_pool_name_from_host(host):
return vol_utils.extract_host(host['host'], 'pool')
return volume_utils.extract_host(host['host'], 'pool')
def get_backend_name_from_volume(volume):
return vol_utils.extract_host(volume.host, 'backend')
return volume_utils.extract_host(volume.host, 'backend')
def get_backend_name_from_host(host):
return vol_utils.extract_host(host['host'], 'backend')
return volume_utils.extract_host(host['host'], 'backend')
def get_extra_spec(volume, spec_key):

View File

@ -34,7 +34,7 @@ from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.dell_emc.vnx import replication
from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow
from cinder.volume.drivers.dell_emc.vnx import utils
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
from cinder.zonemanager import utils as zm_utils
storops = importutils.try_import('storops')
@ -251,7 +251,7 @@ class CommonAdapter(replication.ReplicationAdapter):
qos_specs = utils.get_backend_qos_specs(volume)
if (volume.group and
vol_utils.is_group_a_cg_snapshot_type(volume.group)):
volume_utils.is_group_a_cg_snapshot_type(volume.group)):
cg_id = volume.group_id
else:
cg_id = None

View File

@ -28,7 +28,7 @@ from cinder.objects import fields
from cinder.volume.drivers.dell_emc.vnx import common
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
storops = importutils.try_import('storops')
@ -120,7 +120,7 @@ def update_remote_provider_location(volume, client):
def get_pool_from_host(host):
return vol_utils.extract_host(host, 'pool')
return volume_utils.extract_host(host, 'pool')
def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC,
@ -412,7 +412,7 @@ def is_volume_smp(volume):
def require_consistent_group_snapshot_enabled(func):
@six.wraps(func)
def inner(self, *args, **kwargs):
if not vol_utils.is_group_a_cg_snapshot_type(args[1]):
if not volume_utils.is_group_a_cg_snapshot_type(args[1]):
raise NotImplementedError
return func(self, *args, **kwargs)
return inner

View File

@ -55,7 +55,7 @@ from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import volume_utils as vutils
from cinder.volume import volume_utils
from cinder.zonemanager import utils as fczm_utils
@ -437,9 +437,10 @@ class XtremIOVolumeDriver(san.SanDriver):
or self.driver_name)
self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name')
or '')
self.provisioning_factor = vutils.get_max_over_subscription_ratio(
self.configuration.max_over_subscription_ratio,
supports_auto=False)
self.provisioning_factor = \
volume_utils.get_max_over_subscription_ratio(
self.configuration.max_over_subscription_ratio,
supports_auto=False)
self.clean_ig = (self.configuration.safe_get('xtremio_clean_unused_ig')
or False)
@ -784,7 +785,7 @@ class XtremIOVolumeDriver(san.SanDriver):
LOG.warning('Failed to clean IG %d without mappings', idx)
def _get_password(self):
return vutils.generate_password(
return volume_utils.generate_password(
length=12,
symbolgroups=(string.ascii_uppercase + string.digits))

View File

@ -52,7 +52,7 @@ from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
import math
import re
@ -489,7 +489,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
def create_group(self, context, group):
"""Creates a group."""
LOG.debug("Creating group.")
if not utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
for vol_type_id in group.volume_type_ids:
replication_type = self._volume_of_replicated_type(
@ -510,7 +510,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
"""Creates a group from a source"""
msg = _("Creating a group from a source is not "
"supported when consistent_group_snapshot_enabled to true.")
if not utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
else:
raise exception.VolumeBackendAPIException(data=msg)
@ -518,7 +518,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
@cinder_utils.trace
def delete_group(self, context, group, volumes):
"""Deletes a group."""
if not utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
volume_model_updates = []
@ -551,7 +551,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
add/remove volumes from the group.
"""
LOG.debug("Updating group.")
if not utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
return None, None, None
@ -559,7 +559,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
@cinder_utils.trace
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group snapshot."""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
client = self._login()
try:
@ -616,7 +616,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
@cinder_utils.trace
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group snapshot."""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
client = self._login()
snap_name_base = "snapshot-" + group_snapshot.id
@ -981,7 +981,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
optional = None
if chap_enabled:
chap_secret = utils.generate_password()
chap_secret = volume_utils.generate_password()
optional = {'chapName': connector['initiator'],
'chapTargetSecret': chap_secret,
'chapAuthenticationRequired': True

View File

@ -83,7 +83,7 @@ from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient
from cinder.volume.drivers.ibm.ibm_storage import proxy
from cinder.volume.drivers.ibm.ibm_storage import strings
from cinder.volume import volume_types
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -363,13 +363,13 @@ class Group(object):
self.id = group.id
self.host = group.host
self.consisgroup_snapshot_enabled = (
utils.is_group_a_cg_snapshot_type(group))
volume_utils.is_group_a_cg_snapshot_type(group))
self.group_replication_enabled = (
utils.is_group_a_type(group,
"group_replication_enabled"))
volume_utils.is_group_a_type(
group, "group_replication_enabled"))
self.consisgroup_replication_enabled = (
utils.is_group_a_type(group,
"consistent_group_replication_enabled"))
volume_utils.is_group_a_type(
group, "consistent_group_replication_enabled"))
if is_snapshot:
self.snapshots = group.snapshots
else:
@ -1151,7 +1151,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
if (grp.group_replication_enabled or
grp.consisgroup_replication_enabled):
for volume_type in group.volume_types:
replication_type = utils.is_replicated_spec(
replication_type = volume_utils.is_replicated_spec(
volume_type.extra_specs)
self._assert(replication_type,
'Unable to create group: group %(grp)s '

View File

@ -43,7 +43,7 @@ from cinder.volume.drivers.ibm.ibm_storage import xiv_replication as repl
from cinder.volume import group_types
from cinder.volume import qos_specs
from cinder.volume import volume_types
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
OPENSTACK_PRODUCT_NAME = "OpenStack"
@ -485,7 +485,8 @@ class XIVProxy(proxy.IBMStorageProxy):
LOG.debug('checking replication_info %(rep)s',
{'rep': replication_info})
volume_update['replication_status'] = 'disabled'
cg = volume.group and utils.is_group_a_cg_snapshot_type(volume.group)
cg = volume.group and volume_utils.is_group_a_cg_snapshot_type(
volume.group)
if replication_info['enabled']:
try:
repl.VolumeReplication(self).create_replication(
@ -588,7 +589,7 @@ class XIVProxy(proxy.IBMStorageProxy):
# Add this field to adjust it to generic replication (for volumes)
replication_info = self._get_replication_info(group_specs)
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
# take every vol out of cg - we can't mirror the cg otherwise.
if volumes:
self._update_consistencygroup(context, group,
@ -647,7 +648,7 @@ class XIVProxy(proxy.IBMStorageProxy):
replication_info = self._get_replication_info(group_specs)
updated_volumes = []
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
# one call deletes replication for cgs and volumes together.
group_name = self._cg_name_from_group(group)
repl.GroupReplication(self).delete_replication(group_name,
@ -784,7 +785,7 @@ class XIVProxy(proxy.IBMStorageProxy):
failback = (secondary_backend_id == strings.PRIMARY_BACKEND_ID)
result = False
details = ""
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
result, details = repl.GroupReplication(self).failover(group,
failback)
else:
@ -1782,7 +1783,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def create_group(self, context, group):
"""Creates a group."""
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
cgname = self._cg_name_from_group(group)
return self._create_consistencygroup(context, cgname)
# For generic group, create is executed by manager
@ -1868,7 +1869,7 @@ class XIVProxy(proxy.IBMStorageProxy):
sorted_snapshots, source_group,
sorted_source_vols):
"""Create volume group from volume group or volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._create_consistencygroup_from_src(context, group,
volumes,
group_snapshot,
@ -1984,7 +1985,7 @@ class XIVProxy(proxy.IBMStorageProxy):
LOG.error(msg)
raise self._get_exception()(msg)
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._delete_consistencygroup(context, group, volumes)
else:
# For generic group delete the volumes only - executed by manager
@ -2069,7 +2070,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._update_consistencygroup(context, group, add_volumes,
remove_volumes)
else:
@ -2155,7 +2156,7 @@ class XIVProxy(proxy.IBMStorageProxy):
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Create volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group_snapshot):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(context, group_snapshot, snapshots)
else:
# For generic group snapshot create executed by manager
@ -2229,7 +2230,7 @@ class XIVProxy(proxy.IBMStorageProxy):
@proxy._trace_time
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Delete volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group_snapshot):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._delete_cgsnapshot(context, group_snapshot, snapshots)
else:
# For generic group snapshot delete is executed by manager

View File

@ -49,7 +49,7 @@ from cinder.volume.drivers.ibm.storwize_svc import storwize_const
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
INTERVAL_1_SEC = 1
@ -975,7 +975,7 @@ class StorwizeHelpers(object):
def add_chap_secret_to_host(self, host_name):
"""Generate and store a randomly-generated CHAP secret for the host."""
chap_secret = utils.generate_password()
chap_secret = volume_utils.generate_password()
self.ssh.add_chap_secret(chap_secret, host_name)
return chap_secret
@ -1800,7 +1800,7 @@ class StorwizeHelpers(object):
% {"id": snapshot.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = utils.extract_host(volume.host, 'pool')
pool = volume_utils.extract_host(volume.host, 'pool')
self.create_flashcopy_to_consistgrp(snapshot['volume_name'],
snapshot['name'],
fc_consistgrp,
@ -1887,7 +1887,7 @@ class StorwizeHelpers(object):
for source, target in zip(sources, targets):
opts = self.get_vdisk_params(config, state,
source['volume_type_id'])
pool = utils.extract_host(target['host'], 'pool')
pool = volume_utils.extract_host(target['host'], 'pool')
self.create_flashcopy_to_consistgrp(source['name'],
target['name'],
fc_consistgrp,
@ -2998,7 +2998,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
def _check_if_group_type_cg_snapshot(self, volume):
if (volume.group_id and
not utils.is_group_a_cg_snapshot_type(volume.group)):
not volume_utils.is_group_a_cg_snapshot_type(volume.group)):
msg = _('Create volume with a replication or hyperswap '
'group_id is not supported. Please add volume to '
'group after volume creation.')
@ -3016,7 +3016,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
ctxt = context.get_admin_context()
rep_type = self._get_volume_replicated_type(ctxt, volume)
pool = utils.extract_host(volume['host'], 'pool')
pool = volume_utils.extract_host(volume['host'], 'pool')
model_update = None
if opts['volume_topology'] == 'hyperswap':
@ -3131,7 +3131,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
pool = utils.extract_host(source_vol['host'], 'pool')
pool = volume_utils.extract_host(source_vol['host'], 'pool')
opts = self._get_vdisk_params(source_vol['volume_type_id'])
if opts['volume_topology'] == 'hyperswap':
@ -3154,7 +3154,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = utils.extract_host(volume['host'], 'pool')
pool = volume_utils.extract_host(volume['host'], 'pool')
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True, self._state, pool=pool)
@ -3193,7 +3193,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
pool = utils.extract_host(tgt_volume['host'], 'pool')
pool = volume_utils.extract_host(tgt_volume['host'], 'pool')
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True, self._state, pool=pool)
@ -3837,7 +3837,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
for v in volumes:
volume_type = self._get_volume_replicated_type(ctxt, v)
grp = v.group
if grp and utils.is_group_a_type(
if grp and volume_utils.is_group_a_type(
grp, "consistent_group_replication_enabled"):
continue
elif volume_type and v.status in ['available', 'in-use']:
@ -4285,7 +4285,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
backend_helper = self._helpers
node_state = self._state
grp = volume.group
if grp and utils.is_group_a_type(
if grp and volume_utils.is_group_a_type(
grp, "consistent_group_replication_enabled"):
if (grp.replication_status ==
fields.ReplicationStatus.FAILED_OVER):
@ -4727,8 +4727,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
elif key in no_copy_keys:
vdisk_changes.append(key)
old_pool = utils.extract_host(volume['host'], 'pool')
new_pool = utils.extract_host(host['host'], 'pool')
old_pool = volume_utils.extract_host(volume['host'], 'pool')
new_pool = volume_utils.extract_host(host['host'], 'pool')
if old_pool != new_pool:
need_copy = True
@ -4953,7 +4953,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
'type_cps': rep_cps})
raise exception.ManageExistingVolumeTypeMismatch(reason=msg)
pool = utils.extract_host(volume['host'], 'pool')
pool = volume_utils.extract_host(volume['host'], 'pool')
if copies['primary']['mdisk_grp_name'] != pool:
msg = (_("Failed to manage existing volume due to the "
"pool of the volume to be managed does not "
@ -5155,7 +5155,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
'hyperswap_group_enabled']
supported_grp = False
for grp_spec in support_grps:
if utils.is_group_a_type(group, grp_spec):
if volume_utils.is_group_a_type(group, grp_spec):
supported_grp = True
break
if not supported_grp:
@ -5164,8 +5164,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
model_update = {'status': fields.GroupStatus.ERROR}
return model_update
if (utils.is_group_a_cg_snapshot_type(group) or
utils.is_group_a_type(group, "group_snapshot_enabled")):
if (volume_utils.is_group_a_cg_snapshot_type(group) or
volume_utils.is_group_a_type(group, "group_snapshot_enabled")):
for vol_type_id in group.volume_type_ids:
replication_type = self._get_volume_replicated_type(
context, None, vol_type_id)
@ -5187,11 +5187,11 @@ class StorwizeSVCCommonDriver(san.SanDriver,
# We'll rely on the generic group implementation if it is
# a non-consistent snapshot group.
if utils.is_group_a_type(group, "group_snapshot_enabled"):
if volume_utils.is_group_a_type(group, "group_snapshot_enabled"):
raise NotImplementedError()
if utils.is_group_a_type(group,
"consistent_group_replication_enabled"):
if volume_utils.is_group_a_type(
group, "consistent_group_replication_enabled"):
rccg_type = None
for vol_type_id in group.volume_type_ids:
replication_type = self._get_volume_replicated_type(
@ -5233,7 +5233,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
model_update = {'status': fields.GroupStatus.ERROR}
return model_update
if utils.is_group_a_type(group, "hyperswap_group_enabled"):
if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"):
if not self._helpers.is_system_topology_hyperswap(self._state):
LOG.error('Unable to create group: create group on '
'a system that does not support hyperswap.')
@ -5273,21 +5273,23 @@ class StorwizeSVCCommonDriver(san.SanDriver,
# we'll rely on the generic group implementation if it is
# not a consistency group and not a consistency replication
# request and not a hyperswap group request.
if (not utils.is_group_a_cg_snapshot_type(group) and not
utils.is_group_a_type(group,
"consistent_group_replication_enabled")
and not utils.is_group_a_type(group,
"hyperswap_group_enabled")):
if (not volume_utils.is_group_a_cg_snapshot_type(group) and not
volume_utils.is_group_a_type(
group,
"consistent_group_replication_enabled")
and not volume_utils.is_group_a_type(
group,
"hyperswap_group_enabled")):
raise NotImplementedError()
model_update = {'status': fields.GroupStatus.DELETED}
volumes_model_update = []
if utils.is_group_a_type(group,
"consistent_group_replication_enabled"):
if volume_utils.is_group_a_type(
group, "consistent_group_replication_enabled"):
model_update, volumes_model_update = self._delete_replication_grp(
group, volumes)
if utils.is_group_a_type(group, "hyperswap_group_enabled"):
if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"):
model_update, volumes_model_update = self._delete_hyperswap_grp(
group, volumes)
@ -5325,23 +5327,25 @@ class StorwizeSVCCommonDriver(san.SanDriver,
# we'll rely on the generic group implementation if it is not a
# consistency group request and not consistency replication request
# and not a hyperswap group request.
if (not utils.is_group_a_cg_snapshot_type(group) and not
utils.is_group_a_type(group,
"consistent_group_replication_enabled")
and not utils.is_group_a_type(group,
"hyperswap_group_enabled")):
if (not volume_utils.is_group_a_cg_snapshot_type(group) and not
volume_utils.is_group_a_type(
group,
"consistent_group_replication_enabled")
and not volume_utils.is_group_a_type(
group,
"hyperswap_group_enabled")):
raise NotImplementedError()
if utils.is_group_a_type(group,
"consistent_group_replication_enabled"):
if volume_utils.is_group_a_type(
group, "consistent_group_replication_enabled"):
return self._update_replication_grp(context, group, add_volumes,
remove_volumes)
if utils.is_group_a_type(group, "hyperswap_group_enabled"):
if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"):
return self._update_hyperswap_group(context, group,
add_volumes, remove_volumes)
if utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return None, None, None
def create_group_from_src(self, context, group, volumes,
@ -5360,22 +5364,23 @@ class StorwizeSVCCommonDriver(san.SanDriver,
"""
LOG.debug('Enter: create_group_from_src.')
if utils.is_group_a_type(group,
"consistent_group_replication_enabled"):
if volume_utils.is_group_a_type(
group,
"consistent_group_replication_enabled"):
# An unsupported configuration
msg = _('Unable to create replication group: create replication '
'group from a replication group is not supported.')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
if utils.is_group_a_type(group, "hyperswap_group_enabled"):
if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"):
# An unsupported configuration
msg = _('Unable to create hyperswap group: create hyperswap '
'group from a hyperswap group is not supported.')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
# we'll rely on the generic volume groups implementation if it is
# not a consistency group request.
raise NotImplementedError()
@ -5415,7 +5420,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
:param snapshots: a list of Snapshot objects in the group_snapshot.
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()
@ -5445,7 +5450,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
:returns: model_update, snapshots_model_update
"""
if not utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
# we'll rely on the generic group implementation if it is not a
# consistency group request.
raise NotImplementedError()

View File

@ -36,7 +36,7 @@ from cinder import version
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
from cinder.zonemanager import utils as fczm_utils
try:
@ -378,9 +378,9 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
infinidat_host = self._get_or_create_host(port)
if self.configuration.use_chap_auth:
chap_username = (self.configuration.chap_username or
vol_utils.generate_username())
volume_utils.generate_username())
chap_password = (self.configuration.chap_password or
vol_utils.generate_password())
volume_utils.generate_password())
infinidat_host.update_fields(
security_method='CHAP',
security_chap_inbound_username=chap_username,
@ -627,11 +627,11 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
dst_ctx = self._device_connect_context(volume)
with src_ctx as src_dev, dst_ctx as dst_dev:
dd_block_size = self.configuration.volume_dd_blocksize
vol_utils.copy_volume(src_dev['device']['path'],
dst_dev['device']['path'],
snapshot.volume.size * units.Ki,
dd_block_size,
sparse=True)
volume_utils.copy_volume(src_dev['device']['path'],
dst_dev['device']['path'],
snapshot.volume.size * units.Ki,
dd_block_size,
sparse=True)
except Exception:
infinidat_volume.delete()
raise
@ -680,11 +680,11 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
dst_ctx = self._device_connect_context(volume)
with src_ctx as src_dev, dst_ctx as dst_dev:
dd_block_size = self.configuration.volume_dd_blocksize
vol_utils.copy_volume(src_dev['device']['path'],
dst_dev['device']['path'],
src_vref.size * units.Ki,
dd_block_size,
sparse=True)
volume_utils.copy_volume(src_dev['device']['path'],
dst_dev['device']['path'],
src_vref.size * units.Ki,
dd_block_size,
sparse=True)
except Exception:
infinidat_volume.delete()
raise
@ -723,7 +723,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
def create_group(self, context, group):
"""Creates a group."""
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
obj = self._system.cons_groups.create(name=self._make_cg_name(group),
pool=self._get_infinidat_pool())
@ -734,7 +734,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
def delete_group(self, context, group, volumes):
"""Deletes a group."""
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
try:
infinidat_cg = self._get_infinidat_cg(group)
@ -751,7 +751,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
add_volumes = add_volumes if add_volumes else []
remove_volumes = remove_volumes if remove_volumes else []
@ -775,7 +775,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
# order as the target (volumes)
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group):
if not volume_utils.is_group_a_cg_snapshot_type(group):
raise NotImplementedError()
self.create_group(context, group)
new_infinidat_group = self._get_infinidat_cg(group)
@ -795,7 +795,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Creates a group_snapshot."""
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
infinidat_cg = self._get_infinidat_cg(group_snapshot.group)
group_snap_name = self._make_group_snapshot_name(group_snapshot)
@ -814,7 +814,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Deletes a group_snapshot."""
# let generic volume group support handle non-cgsnapshots
if not vol_utils.is_group_a_cg_snapshot_type(group_snapshot):
if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
raise NotImplementedError()
cgsnap_name = self._make_group_snapshot_name(group_snapshot)
infinidat_cgsnap = self._system.cons_groups.safe_get(name=cgsnap_name)

View File

@ -28,7 +28,7 @@ from cinder import interface
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.inspur.instorage import instorage_common
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@ -141,7 +141,7 @@ class InStorageMCSFCDriver(instorage_common.InStorageMCSCommonDriver,
properties['target_wwn'] = conn_wwpns
i_t_map = utils.make_initiator_target_all2all_map(
i_t_map = volume_utils.make_initiator_target_all2all_map(
connector['wwpns'], conn_wwpns)
properties['initiator_target_map'] = i_t_map
@ -223,9 +223,10 @@ class InStorageMCSFCDriver(instorage_common.InStorageMCSCommonDriver,
# a VM deletion.
for node in self._state['storage_nodes'].values():
target_wwpns.extend(node['WWPN'])
init_targ_map = (utils.make_initiator_target_all2all_map
(connector['wwpns'],
target_wwpns))
init_targ_map = (
volume_utils.make_initiator_target_all2all_map(
connector['wwpns'],
target_wwpns))
info['data'] = {'initiator_target_map': init_targ_map}
# Only remove the zone if it's the last volume removed
fczm_utils.remove_fc_zone(info)

View File

@ -36,7 +36,7 @@ from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
krest = importutils.try_import("krest")
@ -365,7 +365,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
"""Failover to replication target."""
volume_updates = []
back_end_ip = None
svc_host = vol_utils.extract_host(self.host, 'backend')
svc_host = volume_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
'cinder-volume')
@ -587,11 +587,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
self.create_volume(volume)
conn = self.initialize_connection(volume, properties)
dest_attach_info = self._connect_device(conn)
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
snapshot.volume.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
snapshot.volume.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
@ -635,11 +635,11 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
self.create_volume(volume)
conn = self.initialize_connection(volume, properties)
dest_attach_info = self._connect_device(conn)
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
src_vref.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
src_vref.size * units.Ki,
self.configuration.volume_dd_blocksize,
sparse=True)
self._kaminario_disconnect_volume(src_attach_info,
dest_attach_info)
self.terminate_connection(volume, properties)
@ -816,7 +816,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
and (cap.total - cap.free) != 0):
ratio = provisioned_vol / float(cap.total - cap.free)
else:
ratio = vol_utils.get_max_over_subscription_ratio(
ratio = volume_utils.get_max_over_subscription_ratio(
conf.max_over_subscription_ratio, supports_auto=True)
self.stats = {'QoS_support': False,

View File

@ -35,7 +35,7 @@ from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -167,7 +167,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
volume_utils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
@ -289,7 +289,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
try:
lvm_type = self.configuration.lvm_type
if lvm_type == 'auto':
if volutils.supports_thin_provisioning():
if volume_utils.supports_thin_provisioning():
lvm_type = 'thin'
else:
lvm_type = 'default'
@ -307,7 +307,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
vg_list = volume_utils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
@ -325,7 +325,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
self.configuration.lvm_type = 'default'
if volutils.supports_thin_provisioning():
if volume_utils.supports_thin_provisioning():
if self.vg.get_volume(pool_name) is not None:
LOG.info('Enabling LVM thin provisioning by default '
'because a thin pool exists.')
@ -337,7 +337,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
if not volume_utils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
@ -427,12 +427,12 @@ class LVMVolumeDriver(driver.VolumeDriver):
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
volume_utils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
@ -549,7 +549,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
volume_utils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
@ -594,8 +594,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
vol_id = volutils.extract_id_from_volume_name(lv_name)
if volutils.check_already_managed_volume(vol_id):
vol_id = volume_utils.extract_id_from_volume_name(lv_name)
if volume_utils.check_already_managed_volume(vol_id):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
@ -677,10 +677,12 @@ class LVMVolumeDriver(driver.VolumeDriver):
continue
if resource_type == 'volume':
potential_id = volutils.extract_id_from_volume_name(lv['name'])
potential_id = volume_utils.extract_id_from_volume_name(
lv['name'])
else:
unescape = self._unescape_snapshot(lv['name'])
potential_id = volutils.extract_id_from_snapshot_name(unescape)
potential_id = volume_utils.extract_id_from_snapshot_name(
unescape)
lv_info = {'reference': {'source-name': lv['name']},
'size': int(math.ceil(float(lv['size']))),
'cinder_id': None,
@ -703,8 +705,8 @@ class LVMVolumeDriver(driver.VolumeDriver):
entries.append(lv_info)
return volutils.paginate_entries_list(entries, marker, limit, offset,
sort_keys, sort_dirs)
return volume_utils.paginate_entries_list(entries, marker, limit,
offset, sort_keys, sort_dirs)
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
@ -757,7 +759,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
LOG.error(message)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups()
vg_list = volume_utils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
@ -785,12 +787,12 @@ class LVMVolumeDriver(driver.VolumeDriver):
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
try:
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
volume_utils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self._sparse_copy_volume)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Volume migration failed due to "

View File

@ -30,7 +30,7 @@ from cinder import objects
from cinder.volume import driver
from cinder.volume.drivers.nexenta.ns5 import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -997,8 +997,8 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
'path': volume_path,
'size': volume_size
}
vid = utils.extract_id_from_volume_name(volume_name)
if utils.check_already_managed_volume(vid):
vid = volume_utils.extract_id_from_volume_name(volume_name)
if volume_utils.check_already_managed_volume(vid):
message = (_('Volume %(name)s already managed')
% {'name': volume_name})
raise jsonrpc.NefException(code='EBUSY', message=message)
@ -1065,7 +1065,7 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
'volume_name': volume_name,
'volume_size': volume_size
}
sid = utils.extract_id_from_snapshot_name(name)
sid = volume_utils.extract_id_from_snapshot_name(name)
if self._check_already_managed_snapshot(sid):
message = (_('Snapshot %(name)s already managed')
% {'name': name})
@ -1229,9 +1229,9 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
'cinder_id': cinder_id,
'extra_info': extra_info
})
return utils.paginate_entries_list(manageable_volumes,
marker, limit, offset,
sort_keys, sort_dirs)
return volume_utils.paginate_entries_list(manageable_volumes,
marker, limit, offset,
sort_keys, sort_dirs)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
@ -1414,9 +1414,9 @@ class NexentaISCSIDriver(driver.ISCSIDriver):
'extra_info': extra_info,
'source_reference': source_reference
})
return utils.paginate_entries_list(manageable_snapshots,
marker, limit, offset,
sort_keys, sort_dirs)
return volume_utils.paginate_entries_list(manageable_snapshots,
marker, limit, offset,
sort_keys, sort_dirs)
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.

View File

@ -32,7 +32,7 @@ from cinder.privsep import fs
from cinder.volume.drivers.nexenta.ns5 import jsonrpc
from cinder.volume.drivers.nexenta import options
from cinder.volume.drivers import nfs
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -898,8 +898,8 @@ class NexentaNfsDriver(nfs.NfsDriver):
'name': volume_name,
'path': volume_path
}
vid = utils.extract_id_from_volume_name(volume_name)
if utils.check_already_managed_volume(vid):
vid = volume_utils.extract_id_from_volume_name(volume_name)
if volume_utils.check_already_managed_volume(vid):
message = (_('Volume %(name)s already managed')
% {'name': volume_name})
raise jsonrpc.NefException(code='EBUSY', message=message)
@ -966,7 +966,7 @@ class NexentaNfsDriver(nfs.NfsDriver):
'volume_name': volume_name,
'volume_size': volume_size
}
sid = utils.extract_id_from_snapshot_name(name)
sid = volume_utils.extract_id_from_snapshot_name(name)
if self._check_already_managed_snapshot(sid):
message = (_('Snapshot %(name)s already managed')
% {'name': name})
@ -1123,9 +1123,9 @@ class NexentaNfsDriver(nfs.NfsDriver):
'cinder_id': cinder_id,
'extra_info': extra_info
})
return utils.paginate_entries_list(manageable_volumes,
marker, limit, offset,
sort_keys, sort_dirs)
return volume_utils.paginate_entries_list(manageable_volumes,
marker, limit, offset,
sort_keys, sort_dirs)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
@ -1308,9 +1308,9 @@ class NexentaNfsDriver(nfs.NfsDriver):
'extra_info': extra_info,
'source_reference': source_reference
})
return utils.paginate_entries_list(manageable_snapshots,
marker, limit, offset,
sort_keys, sort_dirs)
return volume_utils.paginate_entries_list(manageable_snapshots,
marker, limit, offset,
sort_keys, sort_dirs)
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management.

View File

@ -42,7 +42,7 @@ from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -656,7 +656,7 @@ class SolidFireDriver(san.SanISCSIDriver):
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
return vol_utils.generate_password(
return volume_utils.generate_password(
length=length,
symbolgroups=(string.ascii_uppercase + string.digits))
@ -1604,7 +1604,7 @@ class SolidFireDriver(san.SanISCSIDriver):
# SolidFire does not have the concept of volume groups. We're going to
# play along with the group song and dance. There will be a lot of
# no-ops because of this.
if vol_utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return {'status': fields.GroupStatus.AVAILABLE}
# Blatantly ripping off this pattern from other drivers.
@ -1614,7 +1614,7 @@ class SolidFireDriver(san.SanISCSIDriver):
snapshots=None, source_group=None,
source_vols=None):
# At this point this is just a pass-through.
if vol_utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._create_consistencygroup_from_src(
ctxt,
group,
@ -1629,7 +1629,7 @@ class SolidFireDriver(san.SanISCSIDriver):
def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
# This is a pass-through to the old consistency group stuff.
if vol_utils.is_group_a_cg_snapshot_type(group_snapshot):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(ctxt, group_snapshot, snapshots)
# Default implementation handles other scenarios.
@ -1639,7 +1639,7 @@ class SolidFireDriver(san.SanISCSIDriver):
# Delete a volume group. SolidFire does not track volume groups,
# however we do need to actually remove the member volumes of the
# group. Right now only consistent volume groups are supported.
if vol_utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._delete_consistencygroup(ctxt, group, volumes)
# Default implementation handles other scenarios.
@ -1649,7 +1649,7 @@ class SolidFireDriver(san.SanISCSIDriver):
# Regarding consistency groups SolidFire does not track volumes, so
# this is a no-op. In the future with replicated volume groups this
# might actually do something.
if vol_utils.is_group_a_cg_snapshot_type(group):
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._update_consistencygroup(ctxt,
group,
add_volumes,
@ -1723,7 +1723,7 @@ class SolidFireDriver(san.SanISCSIDriver):
return None, None
def delete_group_snapshot(self, context, group_snapshot, snapshots):
if vol_utils.is_group_a_cg_snapshot_type(group_snapshot):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._delete_cgsnapshot(context, group_snapshot, snapshots)
# Default implementation handles other scenarios.

View File

@ -43,7 +43,7 @@ from cinder.objects import snapshot
from cinder.objects import volume
from cinder import utils
from cinder.volume import configuration
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
cinder_opts = [
@ -676,9 +676,9 @@ class SynoCommon(object):
if self.config.safe_get('use_chap_auth') and self.config.use_chap_auth:
auth_type = 1
chap_username = (self.config.safe_get('chap_username') or
volutils.generate_username(12))
volume_utils.generate_username(12))
chap_password = (self.config.safe_get('chap_password') or
volutils.generate_password())
volume_utils.generate_password())
provider_auth = ' '.join(('CHAP', chap_username, chap_password))
trg_prefix = self.config.safe_get('target_prefix')

View File

@ -17,7 +17,7 @@ from cinder import exception
from cinder.i18n import _
import cinder.privsep.targets.scst
from cinder.volume.targets import iscsi
from cinder.volume import volume_utils as vutils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -231,7 +231,7 @@ class SCSTAdm(iscsi.ISCSITarget):
return tid
def _iscsi_location(self, ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (vutils.sanitize_host(ip),
return "%s:%s,%s %s %s" % (volume_utils.sanitize_host(ip),
self.configuration.target_port,
target, iqn, lun)
@ -290,8 +290,8 @@ class SCSTAdm(iscsi.ISCSITarget):
else:
chap_auth = self._get_target_chap_auth(context, volume)
if not chap_auth:
chap_auth = (vutils.generate_username(),
vutils.generate_password())
chap_auth = (volume_utils.generate_username(),
volume_utils.generate_password())
tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target,
lun, volume_path, chap_auth)