Move flags in libvirt's volume to the libvirt group
This change continues moving libvirt specific flags into a libvirt specific configuration group. Progresses blueprint libvirt-opts-group. DocImpact: the following flags have moved into the libvirt group: num_iscsi_scan_tries num_iser_scan_tries rbd_user rbd_secret_uuid nfs_mount_point_base nfs_mount_options num_aoe_discover_tries glusterfs_mount_point_base iscsi_use_multipath (formerly libvirt_iscsi_use_multipath) iser_use_multipath (formerly libvirt_iser_use_multipath) scality_sofs_config scality_sofs_mount_point qemu_allowed_storage_drivers Change-Id: I78941aff4bf49fc70d91a3239deff44dbaaab6c9
This commit is contained in:
parent
aa480d75c7
commit
2b24b16976
|
@ -1939,60 +1939,6 @@
|
|||
#force_raw_images=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in nova.virt.libvirt.volume
|
||||
#
|
||||
|
||||
# number of times to rescan iSCSI target to find volume
|
||||
# (integer value)
|
||||
#num_iscsi_scan_tries=3
|
||||
|
||||
# number of times to rescan iSER target to find volume
|
||||
# (integer value)
|
||||
#num_iser_scan_tries=3
|
||||
|
||||
# the RADOS client name for accessing rbd volumes (string
|
||||
# value)
|
||||
#rbd_user=<None>
|
||||
|
||||
# the libvirt uuid of the secret for the rbd_uservolumes
|
||||
# (string value)
|
||||
#rbd_secret_uuid=<None>
|
||||
|
||||
# Dir where the nfs volume is mounted on the compute node
|
||||
# (string value)
|
||||
#nfs_mount_point_base=$state_path/mnt
|
||||
|
||||
# Mount options passed to the nfs client. See section of the
|
||||
# nfs man page for details (string value)
|
||||
#nfs_mount_options=<None>
|
||||
|
||||
# number of times to rediscover AoE target to find volume
|
||||
# (integer value)
|
||||
#num_aoe_discover_tries=3
|
||||
|
||||
# Dir where the glusterfs volume is mounted on the compute
|
||||
# node (string value)
|
||||
#glusterfs_mount_point_base=$state_path/mnt
|
||||
|
||||
# use multipath connection of the iSCSI volume (boolean value)
|
||||
#libvirt_iscsi_use_multipath=false
|
||||
|
||||
# use multipath connection of the iSER volume (boolean value)
|
||||
#libvirt_iser_use_multipath=false
|
||||
|
||||
# Path or URL to Scality SOFS configuration file (string
|
||||
# value)
|
||||
#scality_sofs_config=<None>
|
||||
|
||||
# Base dir where Scality SOFS shall be mounted (string value)
|
||||
#scality_sofs_mount_point=$state_path/scality
|
||||
|
||||
# Protocols listed here will be accessed directly from QEMU.
|
||||
# Currently supported protocols: [gluster] (list value)
|
||||
#qemu_allowed_storage_drivers=
|
||||
|
||||
|
||||
#
|
||||
# Options defined in nova.vnc
|
||||
#
|
||||
|
@ -2648,6 +2594,60 @@
|
|||
#use_virtio_for_bridges=true
|
||||
|
||||
|
||||
#
|
||||
# Options defined in nova.virt.libvirt.volume
|
||||
#
|
||||
|
||||
# number of times to rescan iSCSI target to find volume
|
||||
# (integer value)
|
||||
#num_iscsi_scan_tries=3
|
||||
|
||||
# number of times to rescan iSER target to find volume
|
||||
# (integer value)
|
||||
#num_iser_scan_tries=3
|
||||
|
||||
# the RADOS client name for accessing rbd volumes (string
|
||||
# value)
|
||||
#rbd_user=<None>
|
||||
|
||||
# the libvirt uuid of the secret for the rbd_uservolumes
|
||||
# (string value)
|
||||
#rbd_secret_uuid=<None>
|
||||
|
||||
# Dir where the nfs volume is mounted on the compute node
|
||||
# (string value)
|
||||
#nfs_mount_point_base=$state_path/mnt
|
||||
|
||||
# Mount options passed to the nfs client. See section of the
|
||||
# nfs man page for details (string value)
|
||||
#nfs_mount_options=<None>
|
||||
|
||||
# number of times to rediscover AoE target to find volume
|
||||
# (integer value)
|
||||
#num_aoe_discover_tries=3
|
||||
|
||||
# Dir where the glusterfs volume is mounted on the compute
|
||||
# node (string value)
|
||||
#glusterfs_mount_point_base=$state_path/mnt
|
||||
|
||||
# use multipath connection of the iSCSI volume (boolean value)
|
||||
#iscsi_use_multipath=false
|
||||
|
||||
# use multipath connection of the iSER volume (boolean value)
|
||||
#iser_use_multipath=false
|
||||
|
||||
# Path or URL to Scality SOFS configuration file (string
|
||||
# value)
|
||||
#scality_sofs_config=<None>
|
||||
|
||||
# Base dir where Scality SOFS shall be mounted (string value)
|
||||
#scality_sofs_mount_point=$state_path/scality
|
||||
|
||||
# Protocols listed here will be accessed directly from QEMU.
|
||||
# Currently supported protocols: [gluster] (list value)
|
||||
#qemu_allowed_storage_drivers=
|
||||
|
||||
|
||||
[baremetal]
|
||||
|
||||
#
|
||||
|
|
|
@ -519,9 +519,10 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
|
|||
def setUp(self):
|
||||
self.image_class = imagebackend.Rbd
|
||||
super(RbdTestCase, self).setUp()
|
||||
self.flags(images_rbd_pool=self.POOL, group='libvirt')
|
||||
self.flags(rbd_user=self.USER)
|
||||
self.flags(images_rbd_ceph_conf=self.CONF, group='libvirt')
|
||||
self.flags(images_rbd_pool=self.POOL,
|
||||
rbd_user=self.USER,
|
||||
images_rbd_ceph_conf=self.CONF,
|
||||
group='libvirt')
|
||||
self.libvirt_utils = imagebackend.libvirt_utils
|
||||
self.utils = imagebackend.utils
|
||||
self.rbd = self.mox.CreateMockAnything()
|
||||
|
|
|
@ -330,10 +330,10 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
'driver_volume_type': 'rbd',
|
||||
'data': {
|
||||
'name': '%s/%s' % ('rbd', volume['name']),
|
||||
'auth_enabled': CONF.rbd_secret_uuid is not None,
|
||||
'auth_username': CONF.rbd_user,
|
||||
'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
|
||||
'auth_username': CONF.libvirt.rbd_user,
|
||||
'secret_type': 'ceph',
|
||||
'secret_uuid': CONF.rbd_secret_uuid,
|
||||
'secret_uuid': CONF.libvirt.rbd_secret_uuid,
|
||||
'qos_specs': {
|
||||
'total_bytes_sec': '1048576',
|
||||
'read_iops_sec': '500',
|
||||
|
@ -397,7 +397,8 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
|
||||
flags_user = 'bar'
|
||||
self.flags(rbd_user=flags_user,
|
||||
rbd_secret_uuid=flags_uuid)
|
||||
rbd_secret_uuid=flags_uuid,
|
||||
group='libvirt')
|
||||
|
||||
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
|
||||
tree = conf.format_dom()
|
||||
|
@ -436,7 +437,8 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
|
||||
flags_user = 'bar'
|
||||
self.flags(rbd_user=flags_user,
|
||||
rbd_secret_uuid=flags_uuid)
|
||||
rbd_secret_uuid=flags_uuid,
|
||||
group='libvirt')
|
||||
|
||||
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
|
||||
tree = conf.format_dom()
|
||||
|
@ -460,7 +462,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
libvirt_driver.disconnect_volume(connection_info, 'vde')
|
||||
|
||||
def test_libvirt_kvm_volume_with_multipath(self):
|
||||
self.flags(libvirt_iscsi_use_multipath=True)
|
||||
self.flags(iscsi_use_multipath=True, group='libvirt')
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
|
||||
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
|
||||
|
@ -480,7 +482,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
self.assertIn(expected_multipath_cmd, self.executes)
|
||||
|
||||
def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
|
||||
self.flags(libvirt_iscsi_use_multipath=True)
|
||||
self.flags(iscsi_use_multipath=True, group='libvirt')
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
|
||||
name0 = 'volume-00000000'
|
||||
|
@ -501,7 +503,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
libvirt_driver.disconnect_volume(connection_info, 'vde')
|
||||
|
||||
def test_libvirt_kvm_iser_volume_with_multipath(self):
|
||||
self.flags(libvirt_iser_use_multipath=True)
|
||||
self.flags(iser_use_multipath=True, group='libvirt')
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
|
||||
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
|
||||
|
@ -529,7 +531,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
self.assertIn(expected_multipath_cmd, self.executes)
|
||||
|
||||
def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
|
||||
self.flags(libvirt_iser_use_multipath=True)
|
||||
self.flags(iser_use_multipath=True, group='libvirt')
|
||||
self.stubs.Set(os.path, 'exists', lambda x: True)
|
||||
libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
|
||||
name0 = 'volume-00000000'
|
||||
|
@ -562,7 +564,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
def test_libvirt_nfs_driver(self):
|
||||
# NOTE(vish) exists is to make driver assume connecting worked
|
||||
mnt_base = '/mnt'
|
||||
self.flags(nfs_mount_point_base=mnt_base)
|
||||
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
|
||||
|
||||
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
|
||||
export_string = '192.168.1.1:/nfs/share1'
|
||||
|
@ -584,7 +586,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_libvirt_nfs_driver_with_opts(self):
|
||||
mnt_base = '/mnt'
|
||||
self.flags(nfs_mount_point_base=mnt_base)
|
||||
self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
|
||||
|
||||
libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
|
||||
export_string = '192.168.1.1:/nfs/share1'
|
||||
|
@ -633,7 +635,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_libvirt_glusterfs_driver(self):
|
||||
mnt_base = '/mnt'
|
||||
self.flags(glusterfs_mount_point_base=mnt_base)
|
||||
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
|
||||
|
||||
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
|
||||
export_string = '192.168.1.1:/volume-00001'
|
||||
|
@ -675,7 +677,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
|
||||
def test_libvirt_glusterfs_driver_with_opts(self):
|
||||
mnt_base = '/mnt'
|
||||
self.flags(glusterfs_mount_point_base=mnt_base)
|
||||
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
|
||||
|
||||
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
|
||||
export_string = '192.168.1.1:/volume-00001'
|
||||
|
@ -700,7 +702,7 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
self.assertEqual(self.executes, expected_commands)
|
||||
|
||||
def test_libvirt_glusterfs_libgfapi(self):
|
||||
self.flags(qemu_allowed_storage_drivers=['gluster'])
|
||||
self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
|
||||
libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
|
||||
export_string = '192.168.1.1:/volume-00001'
|
||||
name = 'volume-00001'
|
||||
|
@ -824,7 +826,8 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
|
|||
|
||||
self.stubs.Set(os, 'access', _access_wrapper)
|
||||
self.flags(scality_sofs_config=TEST_CONFIG,
|
||||
scality_sofs_mount_point=TEST_MOUNT)
|
||||
scality_sofs_mount_point=TEST_MOUNT,
|
||||
group='libvirt')
|
||||
driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
|
||||
conf = driver.connect_volume(TEST_CONN_INFO, self.disk_info)
|
||||
|
||||
|
|
|
@ -475,7 +475,7 @@ class Rbd(Image):
|
|||
' flag to use rbd images.'))
|
||||
self.pool = CONF.libvirt.images_rbd_pool
|
||||
self.ceph_conf = ascii_str(CONF.libvirt.images_rbd_ceph_conf)
|
||||
self.rbd_user = ascii_str(CONF.rbd_user)
|
||||
self.rbd_user = ascii_str(CONF.libvirt.rbd_user)
|
||||
self.rbd = kwargs.get('rbd', rbd)
|
||||
self.rados = kwargs.get('rados', rados)
|
||||
|
||||
|
@ -546,15 +546,15 @@ class Rbd(Image):
|
|||
info.source_name = '%s/%s' % (self.pool, self.rbd_name)
|
||||
info.source_hosts = hosts
|
||||
info.source_ports = ports
|
||||
auth_enabled = (CONF.rbd_user is not None)
|
||||
if CONF.rbd_secret_uuid:
|
||||
info.auth_secret_uuid = CONF.rbd_secret_uuid
|
||||
auth_enabled = (CONF.libvirt.rbd_user is not None)
|
||||
if CONF.libvirt.rbd_secret_uuid:
|
||||
info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
|
||||
auth_enabled = True # Force authentication locally
|
||||
if CONF.rbd_user:
|
||||
info.auth_username = CONF.rbd_user
|
||||
if CONF.libvirt.rbd_user:
|
||||
info.auth_username = CONF.libvirt.rbd_user
|
||||
if auth_enabled:
|
||||
info.auth_secret_type = 'ceph'
|
||||
info.auth_secret_uuid = CONF.rbd_secret_uuid
|
||||
info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
|
||||
return info
|
||||
|
||||
def _can_fallocate(self):
|
||||
|
|
|
@ -44,47 +44,62 @@ LOG = logging.getLogger(__name__)
|
|||
volume_opts = [
|
||||
cfg.IntOpt('num_iscsi_scan_tries',
|
||||
default=3,
|
||||
help='number of times to rescan iSCSI target to find volume'),
|
||||
help='number of times to rescan iSCSI target to find volume',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.IntOpt('num_iser_scan_tries',
|
||||
default=3,
|
||||
help='number of times to rescan iSER target to find volume'),
|
||||
help='number of times to rescan iSER target to find volume',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('rbd_user',
|
||||
help='the RADOS client name for accessing rbd volumes'),
|
||||
help='the RADOS client name for accessing rbd volumes',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('rbd_secret_uuid',
|
||||
help='the libvirt uuid of the secret for the rbd_user'
|
||||
'volumes'),
|
||||
'volumes',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('nfs_mount_point_base',
|
||||
default=paths.state_path_def('mnt'),
|
||||
help='Dir where the nfs volume is mounted on the compute node'),
|
||||
help='Dir where the nfs volume is mounted on the compute node',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('nfs_mount_options',
|
||||
help='Mount options passed to the nfs client. See section '
|
||||
'of the nfs man page for details'),
|
||||
'of the nfs man page for details',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.IntOpt('num_aoe_discover_tries',
|
||||
default=3,
|
||||
help='number of times to rediscover AoE target to find volume'),
|
||||
help='number of times to rediscover AoE target to find volume',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('glusterfs_mount_point_base',
|
||||
default=paths.state_path_def('mnt'),
|
||||
help='Dir where the glusterfs volume is mounted on the '
|
||||
'compute node'),
|
||||
cfg.BoolOpt('libvirt_iscsi_use_multipath',
|
||||
default=paths.state_path_def('mnt'),
|
||||
help='Dir where the glusterfs volume is mounted on the '
|
||||
'compute node',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.BoolOpt('iscsi_use_multipath',
|
||||
default=False,
|
||||
help='use multipath connection of the iSCSI volume'),
|
||||
cfg.BoolOpt('libvirt_iser_use_multipath',
|
||||
help='use multipath connection of the iSCSI volume',
|
||||
deprecated_group='DEFAULT',
|
||||
deprecated_name='libvirt_iscsi_use_multipath'),
|
||||
cfg.BoolOpt('iser_use_multipath',
|
||||
default=False,
|
||||
help='use multipath connection of the iSER volume'),
|
||||
help='use multipath connection of the iSER volume',
|
||||
deprecated_group='DEFAULT',
|
||||
deprecated_name='libvirt_iser_use_multipath'),
|
||||
cfg.StrOpt('scality_sofs_config',
|
||||
help='Path or URL to Scality SOFS configuration file'),
|
||||
help='Path or URL to Scality SOFS configuration file',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.StrOpt('scality_sofs_mount_point',
|
||||
default='$state_path/scality',
|
||||
help='Base dir where Scality SOFS shall be mounted'),
|
||||
help='Base dir where Scality SOFS shall be mounted',
|
||||
deprecated_group='DEFAULT'),
|
||||
cfg.ListOpt('qemu_allowed_storage_drivers',
|
||||
default=[],
|
||||
help='Protocols listed here will be accessed directly '
|
||||
'from QEMU. Currently supported protocols: [gluster]')
|
||||
default=[],
|
||||
help='Protocols listed here will be accessed directly '
|
||||
'from QEMU. Currently supported protocols: [gluster]',
|
||||
deprecated_group='DEFAULT')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(volume_opts)
|
||||
CONF.register_opts(volume_opts, 'libvirt')
|
||||
|
||||
|
||||
class LibvirtBaseVolumeDriver(object):
|
||||
|
@ -202,11 +217,11 @@ class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
conf.source_ports = netdisk_properties.get('ports', [])
|
||||
auth_enabled = netdisk_properties.get('auth_enabled')
|
||||
if (conf.source_protocol == 'rbd' and
|
||||
CONF.rbd_secret_uuid):
|
||||
conf.auth_secret_uuid = CONF.rbd_secret_uuid
|
||||
CONF.libvirt.rbd_secret_uuid):
|
||||
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
|
||||
auth_enabled = True # Force authentication locally
|
||||
if CONF.rbd_user:
|
||||
conf.auth_username = CONF.rbd_user
|
||||
if CONF.libvirt.rbd_user:
|
||||
conf.auth_username = CONF.libvirt.rbd_user
|
||||
if auth_enabled:
|
||||
conf.auth_username = (conf.auth_username or
|
||||
netdisk_properties['auth_username'])
|
||||
|
@ -221,8 +236,8 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
def __init__(self, connection):
|
||||
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
|
||||
is_block_dev=False)
|
||||
self.num_scan_tries = CONF.num_iscsi_scan_tries
|
||||
self.use_multipath = CONF.libvirt_iscsi_use_multipath
|
||||
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
|
||||
self.use_multipath = CONF.libvirt.iscsi_use_multipath
|
||||
|
||||
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
|
||||
check_exit_code = kwargs.pop('check_exit_code', 0)
|
||||
|
@ -545,8 +560,8 @@ class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
|
|||
"""Driver to attach Network volumes to libvirt."""
|
||||
def __init__(self, connection):
|
||||
super(LibvirtISERVolumeDriver, self).__init__(connection)
|
||||
self.num_scan_tries = CONF.num_iser_scan_tries
|
||||
self.use_multipath = CONF.libvirt_iser_use_multipath
|
||||
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
|
||||
self.use_multipath = CONF.libvirt.iser_use_multipath
|
||||
|
||||
def _get_multipath_iqn(self, multipath_device):
|
||||
entries = self._get_iscsi_devices()
|
||||
|
@ -600,7 +615,7 @@ class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
@type nfs_export: string
|
||||
@type options: string
|
||||
"""
|
||||
mount_path = os.path.join(CONF.nfs_mount_point_base,
|
||||
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
|
||||
self.get_hash_str(nfs_export))
|
||||
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
|
||||
return mount_path
|
||||
|
@ -611,8 +626,8 @@ class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
|
||||
# Construct the NFS mount command.
|
||||
nfs_cmd = ['mount', '-t', 'nfs']
|
||||
if CONF.nfs_mount_options is not None:
|
||||
nfs_cmd.extend(['-o', CONF.nfs_mount_options])
|
||||
if CONF.libvirt.nfs_mount_options is not None:
|
||||
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
|
||||
if options is not None:
|
||||
nfs_cmd.extend(options.split(' '))
|
||||
nfs_cmd.extend([nfs_share, mount_path])
|
||||
|
@ -668,7 +683,7 @@ class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
if os.path.exists(aoedevpath):
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if self.tries >= CONF.num_aoe_discover_tries:
|
||||
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
|
||||
raise exception.NovaException(_("AoE device not found at %s") %
|
||||
(aoedevpath))
|
||||
LOG.warn(_("AoE volume not yet found at: %(aoedevpath)s. "
|
||||
|
@ -713,7 +728,7 @@ class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
|
||||
data = connection_info['data']
|
||||
|
||||
if 'gluster' in CONF.qemu_allowed_storage_drivers:
|
||||
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
|
||||
vol_name = data['export'].split('/')[1]
|
||||
source_host = data['export'].split('/')[0][:-1]
|
||||
|
||||
|
@ -737,7 +752,7 @@ class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
@type glusterfs_export: string
|
||||
@type options: string
|
||||
"""
|
||||
mount_path = os.path.join(CONF.glusterfs_mount_point_base,
|
||||
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
|
||||
self.get_hash_str(glusterfs_export))
|
||||
self._mount_glusterfs(mount_path, glusterfs_export,
|
||||
options, ensure=True)
|
||||
|
@ -846,7 +861,7 @@ class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
self.device_name = os.path.realpath(device)
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if self.tries >= CONF.num_iscsi_scan_tries:
|
||||
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
|
||||
msg = _("Fibre Channel device not found.")
|
||||
raise exception.NovaException(msg)
|
||||
|
||||
|
@ -934,7 +949,7 @@ class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
self._mount_sofs()
|
||||
conf = super(LibvirtScalityVolumeDriver,
|
||||
self).connect_volume(connection_info, disk_info)
|
||||
path = os.path.join(CONF.scality_sofs_mount_point,
|
||||
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
|
||||
connection_info['data']['sofs_path'])
|
||||
conf.source_type = 'file'
|
||||
conf.source_path = path
|
||||
|
@ -951,7 +966,7 @@ class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
"""Sanity checks before attempting to mount SOFS."""
|
||||
|
||||
# config is mandatory
|
||||
config = CONF.scality_sofs_config
|
||||
config = CONF.libvirt.scality_sofs_config
|
||||
if not config:
|
||||
msg = _("Value required for 'scality_sofs_config'")
|
||||
LOG.warn(msg)
|
||||
|
@ -975,8 +990,8 @@ class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
|
|||
raise exception.NovaException(msg)
|
||||
|
||||
def _mount_sofs(self):
|
||||
config = CONF.scality_sofs_config
|
||||
mount_path = CONF.scality_sofs_mount_point
|
||||
config = CONF.libvirt.scality_sofs_config
|
||||
mount_path = CONF.libvirt.scality_sofs_mount_point
|
||||
sysdir = os.path.join(mount_path, 'sys')
|
||||
|
||||
if not os.path.isdir(mount_path):
|
||||
|
|
Loading…
Reference in New Issue