config options: centralize cinder options

This change moves the cinder config options to a central
location "nova/conf/cinder.py".

Change-Id: I144abf6c92f499fa605307631121cf96e24fbd62
Partially-Implements: blueprint centralize-config-options-newton
This commit is contained in:
Jiajun Liu 2016-03-15 09:49:07 -04:00
parent db851200d3
commit 0264687726
8 changed files with 95 additions and 67 deletions

View File

@ -27,7 +27,7 @@ from nova.conf import barbican
from nova.conf import base
from nova.conf import cells
from nova.conf import cert
# from nova.conf import cinder
from nova.conf import cinder
from nova.conf import cloudpipe
from nova.conf import compute
from nova.conf import conductor
@ -95,7 +95,7 @@ barbican.register_opts(CONF)
base.register_opts(CONF)
cells.register_opts(CONF)
cert.register_opts(CONF)
# cinder.register_opts(CONF)
cinder.register_opts(CONF)
cloudpipe.register_opts(CONF)
compute.register_opts(CONF)
conductor.register_opts(CONF)

72
nova/conf/cinder.py Normal file
View File

@ -0,0 +1,72 @@
# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
cinder_group = cfg.OptGroup(
'cinder',
title='Cinder Options')
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volumev2:cinderv2:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones. If False, volumes attached to an '
'instance must be in the same availability zone in '
'Cinder as the instance availability zone in Nova. '
'This also means care should be taken when booting an '
'instance from a volume where source is not "volume" '
'because Nova will attempt to create a volume using '
'the same availability zone as what is assigned to the '
'instance. If that AZ is not in Cinder (or '
'allow_availability_zone_fallback=False in cinder.conf), '
'the volume create request will fail and the instance '
'will fail the build request.'),
]
deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
group=cinder_group.name)],
'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=cinder_group.name)],
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=cinder_group.name)]}
def register_opts(conf):
conf.register_group(cinder_group)
conf.register_opts(cinder_opts, group=cinder_group)
ks_loading.register_session_conf_options(conf,
cinder_group.name,
deprecated_opts=deprecated)
def list_opts():
return {
cinder_group.name: cinder_opts
}

View File

@ -32,7 +32,6 @@ import nova.paths
import nova.servicegroup.api
import nova.spice
import nova.volume
import nova.volume.cinder
def list_opts():
@ -49,7 +48,6 @@ def list_opts():
nova.paths.path_opts,
nova.volume._volume_opts,
)),
('cinder', nova.volume.cinder.cinder_opts),
('api_database', nova.db.sqlalchemy.api.api_db_opts),
('database', nova.db.sqlalchemy.api.oslo_db_options.database_opts),
('spice',

View File

@ -14,18 +14,16 @@
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import nova.conf
from nova import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('cross_az_attach',
'nova.volume.cinder', group='cinder')
CONF = nova.conf.CONF
class fake_volume(object):

View File

@ -19,12 +19,15 @@ from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
import nova.conf
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
CONF = nova.conf.CONF
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
@ -184,9 +187,9 @@ class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
def setUp(self):
super(CinderV2TestCase, self).setUp()
cinder.CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(cinder.CONF.reset)
CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()

View File

@ -17,6 +17,7 @@ from cinderclient import exceptions as cinder_exception
from keystoneclient import exceptions as keystone_exception
import mock
import nova.conf
from nova import context
from nova import exception
from nova import test
@ -24,6 +25,8 @@ from nova.tests.unit.fake_instance import fake_instance_obj
from nova.tests import uuidsentinel as uuids
from nova.volume import cinder
CONF = nova.conf.CONF
class FakeCinderClient(object):
class Volumes(object):
@ -147,7 +150,7 @@ class CinderApiTestCase(test.NoDBTestCase):
side_effect=lambda context,
instance: 'zone1') as mock_get_instance_az:
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
CONF.set_override('cross_az_attach', False, group='cinder')
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(self.ctx,
volume, instance))
@ -168,7 +171,7 @@ class CinderApiTestCase(test.NoDBTestCase):
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
cinder.CONF.reset()
CONF.reset()
def test_check_attach(self):
volume = {'status': 'available'}
@ -176,14 +179,14 @@ class CinderApiTestCase(test.NoDBTestCase):
volume['availability_zone'] = 'zone1'
volume['multiattach'] = False
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
CONF.set_override('cross_az_attach', False, group='cinder')
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
side_effect=lambda context, instance: 'zone1'):
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
cinder.CONF.reset()
CONF.reset()
def test_check_detach(self):
volume = {'id': 'fake', 'status': 'in-use',

View File

@ -16,21 +16,20 @@ import functools
import itertools
import operator
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
from nova import block_device
import nova.conf
from nova import exception
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.volume import encryptors
CONF = cfg.CONF
CONF.import_opt('cross_az_attach', 'nova.volume.cinder', group='cinder')
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)

View File

@ -28,65 +28,20 @@ from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as v1_client
from keystoneauth1 import exceptions as keystone_exception
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
import six
from nova import availability_zones as az
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volumev2:cinderv2:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones. If False, volumes attached to an '
'instance must be in the same availability zone in '
'Cinder as the instance availability zone in Nova. '
'This also means care should be taken when booting an '
'instance from a volume where source is not "volume" '
'because Nova will attempt to create a volume using '
'the same availability zone as what is assigned to the '
'instance. If that AZ is not in Cinder (or '
'allow_availability_zone_fallback=False in cinder.conf), '
'the volume create request will fail and the instance '
'will fail the build request.'),
]
CONF = cfg.CONF
CINDER_OPT_GROUP = 'cinder'
# cinder_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(cinder_opts, group=CINDER_OPT_GROUP)
deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
group=CINDER_OPT_GROUP)],
'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
group=CINDER_OPT_GROUP)],
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=CINDER_OPT_GROUP)]}
ks_loading.register_session_conf_options(CONF,
CINDER_OPT_GROUP,
deprecated_opts=deprecated)
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@ -106,8 +61,8 @@ def cinderclient(context):
global _V1_ERROR_RAISED
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(CONF,
CINDER_OPT_GROUP)
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.cinder.cinder_group.name)
url = None
endpoint_override = None