Browse Source

Continue renaming volume_utils (core)

Now that volume_utils has been renamed, import it
and use it consistently everywhere.

Change-Id: I6a74f664ff890ff3f24f715a1e93df7e0384aa6b
changes/74/680474/2
Eric Harney 2 years ago
parent
commit
ca5c2ce4e8
  1. 4
      cinder/cmd/manage.py
  2. 6
      cinder/db/sqlalchemy/api.py
  3. 8
      cinder/group/api.py
  4. 21
      cinder/scheduler/filter_scheduler.py
  5. 13
      cinder/scheduler/host_manager.py
  6. 5
      cinder/tests/unit/scheduler/fakes.py
  7. 8
      cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py
  8. 41
      cinder/tests/unit/scheduler/test_capacity_weigher.py
  9. 12
      cinder/tests/unit/scheduler/test_filter_scheduler.py
  10. 24
      cinder/tests/unit/volume/test_init_host.py
  11. 4
      cinder/tests/unit/volume/test_volume_migration.py
  12. 16
      cinder/volume/flows/api/create_volume.py
  13. 75
      cinder/volume/manager.py

4
cinder/cmd/manage.py

@ -85,7 +85,7 @@ from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils as vutils
from cinder.volume import volume_utils
CONF = cfg.CONF
@ -444,7 +444,7 @@ class VolumeCommands(object):
"""Delete a volume, bypassing the check that it must be available."""
ctxt = context.get_admin_context()
volume = objects.Volume.get_by_id(ctxt, volume_id)
host = vutils.extract_host(volume.host) if volume.host else None
host = volume_utils.extract_host(volume.host) if volume.host else None
if not host:
print(_("Volume not yet assigned to host."))

6
cinder/db/sqlalchemy/api.py

@ -66,7 +66,7 @@ from cinder.i18n import _
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -577,10 +577,10 @@ def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name."""
if cluster_name:
model = models.Cluster
conditions = [model.name == vol_utils.extract_host(cluster_name)]
conditions = [model.name == volume_utils.extract_host(cluster_name)]
else:
model = models.Service
conditions = [model.host == vol_utils.extract_host(host)]
conditions = [model.host == volume_utils.extract_host(host)]
conditions.extend((~model.deleted, model.frozen))
query = get_session().query(sql.exists().where(and_(*conditions)))
frozen = query.scalar()

8
cinder/group/api.py

@ -40,7 +40,7 @@ from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
CONF = cfg.CONF
@ -785,8 +785,8 @@ class API(base.Base):
# group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before
# doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host)
vol_host = volume_utils.extract_host(add_vol_ref['host'])
group_host = volume_utils.extract_host(group.host)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
@ -956,7 +956,7 @@ class API(base.Base):
raise exception.InvalidGroupType(reason=msg)
for vol_type in group.volume_types:
if not vol_utils.is_replicated_spec(vol_type.extra_specs):
if not volume_utils.is_replicated_spec(vol_type.extra_specs):
msg = _("Volume type %s does not have 'replication_enabled' "
"spec key set to '<is> True'.") % vol_type.id
LOG.error(msg)

21
cinder/scheduler/filter_scheduler.py

@ -28,7 +28,7 @@ from cinder import exception
from cinder.i18n import _
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -117,11 +117,11 @@ class FilterScheduler(driver.Scheduler):
weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties)
# If backend has no pool defined we will ignore it in the comparison
ignore_pool = not bool(utils.extract_host(backend, 'pool'))
ignore_pool = not bool(volume_utils.extract_host(backend, 'pool'))
for weighed_backend in weighed_backends:
backend_id = weighed_backend.obj.backend_id
if ignore_pool:
backend_id = utils.extract_host(backend_id)
backend_id = volume_utils.extract_host(backend_id)
if backend_id == backend:
return weighed_backend.obj
@ -160,7 +160,7 @@ class FilterScheduler(driver.Scheduler):
if backend_state.backend_id == backend:
return backend_state
if utils.extract_host(backend, 'pool') is None:
if volume_utils.extract_host(backend, 'pool') is None:
# legacy volumes created before pool is introduced has no pool
# info in host. But host_state.host always include pool level
# info. In this case if above exact match didn't work out, we
@ -171,8 +171,9 @@ class FilterScheduler(driver.Scheduler):
# to happen even migration policy is 'never'.
for weighed_backend in weighed_backends:
backend_state = weighed_backend.obj
new_backend = utils.extract_host(backend_state.backend_id,
'backend')
new_backend = volume_utils.extract_host(
backend_state.backend_id,
'backend')
if new_backend == backend:
return backend_state
@ -447,8 +448,8 @@ class FilterScheduler(driver.Scheduler):
for backend2 in backend_list2:
# Should schedule creation of group on backend level,
# not pool level.
if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(backend2.obj.backend_id)):
if (volume_utils.extract_host(backend1.obj.backend_id) ==
volume_utils.extract_host(backend2.obj.backend_id)):
new_backends.append(backend1)
if not new_backends:
return []
@ -526,14 +527,14 @@ class FilterScheduler(driver.Scheduler):
# snapshot or volume).
resource_backend = request_spec.get('resource_backend')
if weighed_backends and resource_backend:
resource_backend_has_pool = bool(utils.extract_host(
resource_backend_has_pool = bool(volume_utils.extract_host(
resource_backend, 'pool'))
# Get host name including host@backend#pool info from
# weighed_backends.
for backend in weighed_backends[::-1]:
backend_id = (
backend.obj.backend_id if resource_backend_has_pool
else utils.extract_host(backend.obj.backend_id)
else volume_utils.extract_host(backend.obj.backend_id)
)
if backend_id != resource_backend:
weighed_backends.remove(backend)

13
cinder/scheduler/host_manager.py

@ -35,7 +35,7 @@ from cinder import objects
from cinder.scheduler import filters
from cinder import utils
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
# FIXME: This file should be renamed to backend_manager, we should also rename
@ -246,7 +246,7 @@ class BackendState(object):
pool_name = self.volume_backend_name
if pool_name is None:
# To get DEFAULT_POOL_NAME
pool_name = vol_utils.extract_host(self.host, 'pool', True)
pool_name = volume_utils.extract_host(self.host, 'pool', True)
if len(self.pools) == 0:
# No pool was there
@ -349,8 +349,8 @@ class BackendState(object):
class PoolState(BackendState):
def __init__(self, host, cluster_name, capabilities, pool_name):
new_host = vol_utils.append_host(host, pool_name)
new_cluster = vol_utils.append_host(cluster_name, pool_name)
new_host = volume_utils.append_host(host, pool_name)
new_cluster = volume_utils.append_host(cluster_name, pool_name)
super(PoolState, self).__init__(new_host, new_cluster, capabilities)
self.pool_name = pool_name
# No pools in pool
@ -726,7 +726,8 @@ class HostManager(object):
filtered = False
pool = state.pools[key]
# use backend_key.pool_name to make sure key is unique
pool_key = vol_utils.append_host(backend_key, pool.pool_name)
pool_key = volume_utils.append_host(backend_key,
pool.pool_name)
new_pool = dict(name=pool_key)
new_pool.update(dict(capabilities=pool.capabilities))
@ -871,7 +872,7 @@ class HostManager(object):
def _notify_capacity_usage(self, context, usage):
if usage:
for u in usage:
vol_utils.notify_about_capacity_usage(
volume_utils.notify_about_capacity_usage(
context, u, u['type'], None, None)
LOG.debug("Publish storage capacity: %s.", usage)

5
cinder/tests/unit/scheduler/fakes.py

@ -22,7 +22,7 @@ from oslo_utils import uuidutils
from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
UTC_NOW = timeutils.utcnow()
@ -301,7 +301,8 @@ def mock_host_manager_db_calls(mock_obj, backends_with_pools=False,
{
'id': sid,
'host': svc,
'availability_zone': az_map[utils.extract_host(svc, 'host')],
'availability_zone': az_map[volume_utils.extract_host(svc,
'host')],
'topic': 'volume',
'disabled': False,
'updated_at': timeutils.utcnow(),

8
cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py

@ -24,7 +24,7 @@ from cinder import context
from cinder.scheduler import weights
from cinder import test
from cinder.tests.unit.scheduler import fakes
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
class AllocatedCapacityWeigherTestCase(test.TestCase):
@ -66,7 +66,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(
'host1', utils.extract_host(weighed_host.obj.host))
'host1', volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier1(self):
self.flags(allocated_capacity_weight_multiplier=1.0)
@ -82,7 +82,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(
'host4', utils.extract_host(weighed_host.obj.host))
'host4', volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier2(self):
self.flags(allocated_capacity_weight_multiplier=-2.0)
@ -98,4 +98,4 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(
'host1', utils.extract_host(weighed_host.obj.host))
'host1', volume_utils.extract_host(weighed_host.obj.host))

41
cinder/tests/unit/scheduler/test_capacity_weigher.py

@ -25,7 +25,7 @@ from cinder import context
from cinder.scheduler import weights
from cinder import test
from cinder.tests.unit.scheduler import fakes
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
@ddt.ddt
@ -109,7 +109,8 @@ class CapacityWeigherTestCase(test.TestCase):
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
@ -158,7 +159,8 @@ class CapacityWeigherTestCase(test.TestCase):
weight_properties=weight_properties)
weighed_host = weighed_host[0]
self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
@ -206,7 +208,8 @@ class CapacityWeigherTestCase(test.TestCase):
backend_info_list,
weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
@ -233,11 +236,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host2 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2', utils.extract_host(worst_host.obj.host))
self.assertEqual('host2',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
@ -275,11 +280,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0)
@ -317,11 +324,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
@ -359,11 +368,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0)
@ -401,8 +412,10 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst:
worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host))
self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))

12
cinder/tests/unit/scheduler/test_filter_scheduler.py

@ -27,7 +27,7 @@ from cinder.scheduler import host_manager
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.scheduler import fakes
from cinder.tests.unit.scheduler import test_scheduler
from cinder.volume import volume_utils as utils
from cinder.volume import volume_utils
@ddt.ddt
@ -379,7 +379,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1',
request_spec, {})
self.assertEqual('host1', utils.extract_host(ret_host.host))
self.assertEqual('host1', volume_utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all')
@ -395,7 +395,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0',
request_spec, {})
self.assertEqual('host5', utils.extract_host(ret_host.host))
self.assertEqual('host5', volume_utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all')
@ -408,7 +408,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 1}}
request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host1', request_spec, {})
self.assertEqual('host1', utils.extract_host(ret_host.host))
self.assertEqual('host1', volume_utils.extract_host(ret_host.host))
self.assertTrue(mock_service_get_all.called)
@mock.patch('cinder.db.service_get_all')
@ -481,7 +481,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={},
migration_policy='never')
self.assertEqual('host4', utils.extract_host(host_state.host))
self.assertEqual('host4', volume_utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all')
def test_retype_with_pool_policy_never_migrate_pass(
@ -540,7 +540,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={},
migration_policy='on-demand')
self.assertEqual('host1', utils.extract_host(host_state.host))
self.assertEqual('host1', volume_utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all')
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):

24
cinder/tests/unit/volume/test_init_host.py

@ -26,7 +26,7 @@ from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
from cinder.volume import driver
from cinder.volume import volume_migration as volume_migration
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
CONF = cfg.CONF
@ -44,16 +44,16 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
self.context, size=100, host=CONF.host)
vol1 = tests_utils.create_volume(
self.context, size=128,
host=volutils.append_host(CONF.host, 'pool0'))
host=volume_utils.append_host(CONF.host, 'pool0'))
vol2 = tests_utils.create_volume(
self.context, size=256,
host=volutils.append_host(CONF.host, 'pool0'))
host=volume_utils.append_host(CONF.host, 'pool0'))
vol3 = tests_utils.create_volume(
self.context, size=512,
host=volutils.append_host(CONF.host, 'pool1'))
host=volume_utils.append_host(CONF.host, 'pool1'))
vol4 = tests_utils.create_volume(
self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2'))
host=volume_utils.append_host(CONF.host, 'pool2'))
self.volume.init_host(service_id=self.service_id)
init_host_mock.assert_called_once_with(
service_id=self.service_id, added_to_cluster=None)
@ -74,7 +74,7 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
# to be volume_backend_name or None
vol0.refresh()
expected_host = volutils.append_host(CONF.host, 'fake')
expected_host = volume_utils.append_host(CONF.host, 'fake')
self.assertEqual(expected_host, vol0.host)
self.volume.delete_volume(self.context, vol0)
self.volume.delete_volume(self.context, vol1)
@ -100,24 +100,24 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
cluster_name=cluster_name)
tests_utils.create_volume(
self.context, size=128, cluster_name=cluster_name,
host=volutils.append_host(CONF.host, 'pool0'))
host=volume_utils.append_host(CONF.host, 'pool0'))
tests_utils.create_volume(
self.context, size=256, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '2', 'pool0'))
host=volume_utils.append_host(CONF.host + '2', 'pool0'))
tests_utils.create_volume(
self.context, size=512, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '2', 'pool1'))
host=volume_utils.append_host(CONF.host + '2', 'pool1'))
tests_utils.create_volume(
self.context, size=1024, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '3', 'pool2'))
host=volume_utils.append_host(CONF.host + '3', 'pool2'))
# These don't belong to the cluster so they will be ignored
tests_utils.create_volume(
self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2'))
host=volume_utils.append_host(CONF.host, 'pool2'))
tests_utils.create_volume(
self.context, size=1024, cluster_name=cluster_name + '1',
host=volutils.append_host(CONF.host + '3', 'pool2'))
host=volume_utils.append_host(CONF.host + '3', 'pool2'))
self.volume.init_host(service_id=self.service_id)
init_host_mock.assert_called_once_with(

4
cinder/tests/unit/volume/test_volume_migration.py

@ -41,7 +41,7 @@ from cinder.volume import api as volume_api
from cinder.volume.flows.manager import create_volume as create_volume_manager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types
from cinder.volume import volume_utils as volutils
from cinder.volume import volume_utils
QUOTAS = quota.QUOTAS
@ -556,7 +556,7 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
mock.patch.object(os_brick.initiator.connector,
'get_connector_properties') \
as mock_get_connector_properties, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_rpcapi.VolumeAPI,
'get_capabilities') \
as mock_get_capabilities:

16
cinder/volume/flows/api/create_volume.py

@ -30,7 +30,7 @@ from cinder import quota_utils
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -223,7 +223,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# exist, this is expected as it signals that the image_id is missing.
image_meta = self.image_service.show(context, image_id)
vol_utils.check_image_metadata(image_meta, size)
volume_utils.check_image_metadata(image_meta, size)
return image_meta
@ -237,7 +237,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
the validated availability zone.
"""
refresh_az = False
type_azs = vol_utils.extract_availability_zones_from_volume_type(
type_azs = volume_utils.extract_availability_zones_from_volume_type(
volume_type)
type_az_configured = type_azs is not None
if type_az_configured:
@ -339,12 +339,12 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# Clone the existing key and associate a separate -- but
# identical -- key with each volume.
if encryption_key_id is not None:
encryption_key_id = vol_utils.clone_encryption_key(
encryption_key_id = volume_utils.clone_encryption_key(
context,
key_manager,
encryption_key_id)
else:
encryption_key_id = vol_utils.create_encryption_key(
encryption_key_id = volume_utils.create_encryption_key(
context,
key_manager,
volume_type_id)
@ -456,7 +456,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
specs = None
extra_specs = None
if vol_utils.is_replicated_spec(extra_specs):
if volume_utils.is_replicated_spec(extra_specs):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
@ -740,13 +740,13 @@ class VolumeCastTask(flow_utils.CinderTask):
# If cgroup_id existed, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as CG's backend.
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
request_spec['resource_backend'] = vol_utils.extract_host(
request_spec['resource_backend'] = volume_utils.extract_host(
cgroup.resource_backend)
elif group_id:
# If group_id exists, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as group's backend.
group = objects.Group.get_by_id(context, group_id)
request_spec['resource_backend'] = vol_utils.extract_host(
request_spec['resource_backend'] = volume_utils.extract_host(
group.resource_backend)
elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866.

75
cinder/volume/manager.py

@ -84,7 +84,7 @@ from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration
from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@ -205,7 +205,7 @@ class VolumeManager(manager.CleanableManager,
def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host
ctxt = context.get_admin_context()
svc_host = vol_utils.extract_host(host, 'backend')
svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None,
@ -315,7 +315,7 @@ class VolumeManager(manager.CleanableManager,
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
@ -329,8 +329,8 @@ class VolumeManager(manager.CleanableManager,
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
new_host = volume_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
@ -339,7 +339,7 @@ class VolumeManager(manager.CleanableManager,
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
@ -563,7 +563,7 @@ class VolumeManager(manager.CleanableManager,
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# Migrate any ConfKeyManager keys based on fixed_key to the currently
@ -669,9 +669,10 @@ class VolumeManager(manager.CleanableManager,
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
not volume_utils.hosts_are_equivalent(resource.host,
self.host)):
pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
@ -785,8 +786,9 @@ class VolumeManager(manager.CleanableManager,
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
res_backend = volume_utils.extract_host(
resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
@ -1305,7 +1307,7 @@ class VolumeManager(manager.CleanableManager,
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
host_name_sanitized = vol_utils.sanitize_hostname(
host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
@ -2162,11 +2164,11 @@ class VolumeManager(manager.CleanableManager,
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.",
@ -2197,7 +2199,7 @@ class VolumeManager(manager.CleanableManager,
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
@ -2665,7 +2667,7 @@ class VolumeManager(manager.CleanableManager,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2674,7 +2676,7 @@ class VolumeManager(manager.CleanableManager,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2684,7 +2686,7 @@ class VolumeManager(manager.CleanableManager,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
volume_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2693,7 +2695,7 @@ class VolumeManager(manager.CleanableManager,
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2703,7 +2705,7 @@ class VolumeManager(manager.CleanableManager,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2712,7 +2714,7 @@ class VolumeManager(manager.CleanableManager,
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
@ -2770,11 +2772,11 @@ class VolumeManager(manager.CleanableManager,
for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id)
pool = vol_utils.extract_host(volume.host, 'pool')
pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True)
try:
@ -2791,10 +2793,10 @@ class VolumeManager(manager.CleanableManager,
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
@ -2954,7 +2956,7 @@ class VolumeManager(manager.CleanableManager,
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_boolean_str(replication_diff[1])
is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
@ -2973,11 +2975,11 @@ class VolumeManager(manager.CleanableManager,
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True)
try:
@ -3440,12 +3442,13 @@ class VolumeManager(manager.CleanableManager,
def _update_allocated_capacity(self, vol, decrement=False, host=None):
# Update allocated capacity in volume stats
host = host or vol['host']
pool = vol_utils.extract_host(host, 'pool')
pool = volume_utils.extract_host(host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(host, 'pool',
True)
'volume_backend_name') or volume_utils.extract_host(host,
'pool',
True)
vol_size = -vol['size'] if decrement else vol['size']
try:

Loading…
Cancel
Save