Add per-backend availability zones

When volume backends were defined in the [DEFAULT] cinder.conf config
section, it did not make sense to have more than one AZ config setting.
Now that we have multibackend and we have deprecated the ability to
configure backends in DEFAULT, we can set AZs per backend section.

This previously required setting up a separate service using a different
cinder.conf to have more than one AZ. But since the vast majority of
backends are separate physical devices, there is no reason to restrict
the control plane service to be on separate nodes to support separate AZ
configurations.

Change-Id: I0d7669718a1d1e03e3dd42d1a19b70ff03342bd7
Implements: bp multibackend-az-support
This commit is contained in:
Sean McGinnis 2017-02-13 22:13:30 -06:00
parent 5916ad0673
commit 7c1e92278c
7 changed files with 60 additions and 10 deletions

View File

@ -117,7 +117,9 @@ global_opts = [
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
help='Availability zone of this node. Can be overridden per '
'volume backend with the option '
'"backend_availability_zone".'),
cfg.StrOpt('default_availability_zone',
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '

View File

@ -91,6 +91,7 @@ class Manager(base.Base, PeriodicTasks):
self.host = host
self.cluster = cluster
self.additional_endpoints = []
self.availability_zone = CONF.storage_availability_zone
super(Manager, self).__init__(db_driver)
@property

View File

@ -145,6 +145,11 @@ class Service(service.Service):
manager_class = profiler.trace_cls("rpc")(manager_class)
self.service = None
self.manager = manager_class(host=self.host,
cluster=self.cluster,
service_name=service_name,
*args, **kwargs)
self.availability_zone = self.manager.availability_zone
# NOTE(geguileo): We need to create the Service DB entry before we
# create the manager, otherwise capped versions for serializer and rpc
@ -201,10 +206,6 @@ class Service(service.Service):
# start while we are still doing the rolling upgrade.
self.added_to_cluster = not self.is_upgrading_to_n
self.manager = manager_class(host=self.host,
cluster=self.cluster,
service_name=service_name,
*args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
@ -356,13 +357,12 @@ class Service(service.Service):
pass
def _create_service_ref(self, context, rpc_version=None):
zone = CONF.storage_availability_zone
kwargs = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone,
'availability_zone': self.availability_zone,
'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION,
'object_current_version': objects_base.OBJ_VERSIONS.get_current(),
}
@ -486,7 +486,6 @@ class Service(service.Service):
return
ctxt = context.get_admin_context()
zone = CONF.storage_availability_zone
try:
try:
service_ref = objects.Service.get_by_id(ctxt,
@ -499,8 +498,8 @@ class Service(service.Service):
Service.service_id)
service_ref.report_count += 1
if zone != service_ref.availability_zone:
service_ref.availability_zone = zone
if self.availability_zone != service_ref.availability_zone:
service_ref.availability_zone = self.availability_zone
service_ref.save()

View File

@ -116,6 +116,36 @@ class VolumeTestCase(base.BaseVolumeTestCase):
scheduler_rpcapi.client.serializer._base.version_cap)
self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest)
@mock.patch('oslo_utils.importutils.import_object')
def test_backend_availability_zone(self, mock_import_object):
# NOTE(smcginnis): This isn't really the best place for this test,
# but we don't currently have a pure VolumeManager test class. So
# until we create a good suite for that class, putting here with
# other tests that use VolumeManager.
opts = {
'backend_availability_zone': 'caerbannog'
}
def conf_get(option):
if option in opts:
return opts[option]
return None
mock_driver = mock.Mock()
mock_driver.configuration.safe_get.side_effect = conf_get
mock_driver.configuration.extra_capabilities = 'null'
def import_obj(*args, **kwargs):
return mock_driver
mock_import_object.side_effect = import_obj
manager = vol_manager.VolumeManager(volume_driver=mock_driver)
self.assertIsNotNone(manager)
self.assertEqual(opts['backend_availability_zone'],
manager.availability_zone)
@mock.patch.object(vol_manager.VolumeManager,
'update_service_capabilities')
def test_report_filter_goodness_function(self, mock_update):

View File

@ -266,6 +266,11 @@ volume_opts = [
"working CI system and testing are marked as unsupported "
"until CI is working again. This also marks a driver as "
"deprecated and may be removed in the next release."),
cfg.StrOpt('backend_availability_zone',
default=None,
help='Availability zone for this volume backend. If not set, '
'the storage_availability_zone option value is used as '
'the default for all backends.'),
]
# for backward compatibility

View File

@ -268,6 +268,12 @@ class VolumeManager(manager.CleanableManager,
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
# Check if a per-backend AZ has been specified
backend_zone = self.driver.configuration.safe_get(
'backend_availability_zone')
if backend_zone:
self.availability_zone = backend_zone
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):

View File

@ -0,0 +1,7 @@
---
features:
- |
Availability zones may now be configured per backend in a multi-backend
configuration. Individual backend sections can now set the configuration
option ``backend_availability_zone``. If set, this value will override
the [DEFAULT] ``storage_availability_zone`` setting.