Move PowerVM driver options into a group
Following the compute driver standards, the driver options should be placed in their own configuration group. This change set introduces the 'powervm' group. Also, removed the reference to configuration credentials in the README.rst file. Change-Id: I8e28f118d4c6f9fe4922e6fd5ba095c5f9e62b4a
This commit is contained in:
parent
c3057e9b91
commit
26f158ac90
11
README.rst
11
README.rst
@ -99,9 +99,6 @@ As such, no REST API impacts are anticipated.
|
||||
Security Impact
|
||||
---------------
|
||||
|
||||
The user may need to configure the credentials to communicate with the PowerVM
|
||||
REST API via a CONF file. These should be encoded.
|
||||
|
||||
New root wrap policies may need to be updated to support various commands for
|
||||
the PowerVM REST API.
|
||||
|
||||
@ -140,9 +137,11 @@ Other Deployer Impact
|
||||
The cloud administrator will need to refer to documentation on how to
|
||||
configure OpenStack for use with a PowerVM hypervisor.
|
||||
|
||||
The existing configuration file attributes will be reused as much as possible.
|
||||
This reduces the number of PowerVM specific items that will be needed.
|
||||
However, the driver will require some PowerVM specific options.
|
||||
A 'powervm' configuration group will be used to contain all the PowerVM
|
||||
specific configuration settings. Existing configuration file attributes will be
|
||||
reused as much as possible. This reduces the number of PowerVM specific items
|
||||
that will be needed. However, the driver will require some PowerVM specific
|
||||
options.
|
||||
|
||||
In this case, we plan to keep the PowerVM specifics contained within the
|
||||
configuration file (and driver code). These will be documented on the
|
||||
|
@ -325,7 +325,7 @@ class TestLocalDiskFindVG(test.TestCase):
|
||||
self.apt.read.side_effect = [self.vio_to_vg, self.vg_to_vio]
|
||||
mock_vio_wrap.return_value = self.mock_vios_feed
|
||||
mock_vg_wrap.return_value = self.mock_vg_feed
|
||||
self.flags(volume_group_name='rootvg')
|
||||
self.flags(volume_group_name='rootvg', group='powervm')
|
||||
|
||||
storage = ld.LocalStorage({'adapter': self.apt,
|
||||
'host_uuid': 'host_uuid',
|
||||
@ -348,7 +348,7 @@ class TestLocalDiskFindVG(test.TestCase):
|
||||
|
||||
# Override that we need a specific VIOS...that won't be found.
|
||||
self.flags(volume_group_name='rootvg',
|
||||
volume_group_vios_name='invalid_vios')
|
||||
volume_group_vios_name='invalid_vios', group='powervm')
|
||||
|
||||
self.assertRaises(npvmex.VGNotFound, ld.LocalStorage,
|
||||
{'adapter': self.apt, 'host_uuid': 'host_uuid',
|
||||
|
@ -109,7 +109,7 @@ class TestSSPDiskAdapter(test.TestCase):
|
||||
self.mock_ssp_refresh.return_value = pvm_stg.SSP.wrap(self.ssp_resp)
|
||||
|
||||
# By default, assume the config supplied a Cluster name
|
||||
self.flags(cluster_name='clust1')
|
||||
self.flags(cluster_name='clust1', group='powervm')
|
||||
|
||||
def _get_ssp_stor(self):
|
||||
ssp_stor = ssp.SSPDiskAdapter(
|
||||
@ -157,7 +157,7 @@ class TestSSPDiskAdapter(test.TestCase):
|
||||
|
||||
def test_init_green_no_config(self):
|
||||
"""No cluster name specified in config; one cluster on host - ok."""
|
||||
self.flags(cluster_name='')
|
||||
self.flags(cluster_name='', group='powervm')
|
||||
self.apt.read.return_value = self._bld_resp(
|
||||
entry_or_list=[self.clust_resp.entry])
|
||||
self._get_ssp_stor()
|
||||
@ -188,13 +188,13 @@ class TestSSPDiskAdapter(test.TestCase):
|
||||
|
||||
def test_init_NoConfigNoClusterFound(self):
|
||||
"""No cluster name specified in config, no clusters on host."""
|
||||
self.flags(cluster_name='')
|
||||
self.flags(cluster_name='', group='powervm')
|
||||
self.apt.read.return_value = self._bld_resp(status=204)
|
||||
self.assertRaises(npvmex.NoConfigNoClusterFound, self._get_ssp_stor)
|
||||
|
||||
def test_init_NoConfigTooManyClusters(self):
|
||||
"""No SSP name specified in config, more than one SSP on host."""
|
||||
self.flags(cluster_name='')
|
||||
self.flags(cluster_name='', group='powervm')
|
||||
clust1 = pvm_clust.Cluster.bld(None, 'newclust1',
|
||||
pvm_stg.PV.bld(None, 'hdisk1'),
|
||||
pvm_clust.Node.bld(None, 'vios1'))
|
||||
|
@ -65,7 +65,7 @@ class TestPowerVMDriver(test.TestCase):
|
||||
|
||||
self.wrapper = pvm_ms.System.wrap(entries[0])
|
||||
|
||||
self.flags(disk_driver='localdisk')
|
||||
self.flags(disk_driver='localdisk', group='powervm')
|
||||
self.drv_fix = self.useFixture(fx.PowerVMComputeDriver())
|
||||
self.drv = self.drv_fix.drv
|
||||
self.apt = self.drv_fix.pypvm.apt
|
||||
|
@ -53,7 +53,7 @@ pvm_opts = [
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(pvm_opts)
|
||||
CONF.register_opts(pvm_opts, group='powervm')
|
||||
|
||||
# Options imported from other regions
|
||||
CONF.import_opt('host', 'nova.netconf')
|
||||
@ -62,9 +62,8 @@ CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc', group='vnc')
|
||||
CONF.import_opt('vncserver_listen', 'nova.vnc', group='vnc')
|
||||
|
||||
|
||||
# NPIV Options will go in separate section. Only applicable if the
|
||||
# 'fc_attach_strategy' is set to 'npiv'. Otherwise this section can be
|
||||
# ignored.
|
||||
# NPIV Options. Only applicable if the 'fc_attach_strategy' is set to 'npiv'.
|
||||
# Otherwise this section can be ignored.
|
||||
npiv_opts = [
|
||||
cfg.IntOpt('ports_per_fabric', default=1,
|
||||
help='The number of physical ports that should be connected '
|
||||
@ -81,7 +80,7 @@ npiv_opts = [
|
||||
'The fabric identifiers are used for the '
|
||||
'\'fabric_<identifier>_port_wwpns\' key.')
|
||||
]
|
||||
CONF.register_opts(npiv_opts, group='npiv')
|
||||
CONF.register_opts(npiv_opts, group='powervm')
|
||||
|
||||
# Dictionary where the key is the NPIV Fabric Name, and the value is a list of
|
||||
# Physical WWPNs that match the key.
|
||||
@ -89,23 +88,23 @@ NPIV_FABRIC_WWPNS = {}
|
||||
|
||||
# At this point, the fabrics should be specified. Iterate over those to
|
||||
# determine the port_wwpns per fabric.
|
||||
if CONF.npiv.fabrics is not None:
|
||||
if CONF.powervm.fabrics is not None:
|
||||
port_wwpn_keys = []
|
||||
help_text = ('A comma delimited list of all the physical FC port WWPNs '
|
||||
'that support the specified fabric. Is tied to the NPIV '
|
||||
'fabrics key.')
|
||||
|
||||
fabrics = CONF.npiv.fabrics.split(',')
|
||||
fabrics = CONF.powervm.fabrics.split(',')
|
||||
for fabric in fabrics:
|
||||
opt = cfg.StrOpt('fabric_%s_port_wwpns' % fabric,
|
||||
default='', help=help_text)
|
||||
port_wwpn_keys.append(opt)
|
||||
|
||||
CONF.register_opts(port_wwpn_keys, group='npiv')
|
||||
CONF.register_opts(port_wwpn_keys, group='powervm')
|
||||
|
||||
# Now that we've registered the fabrics, saturate the NPIV dictionary
|
||||
for fabric in fabrics:
|
||||
key = 'fabric_%s_port_wwpns' % fabric
|
||||
wwpns = CONF.npiv[key].split(',')
|
||||
wwpns = CONF.powervm[key].split(',')
|
||||
wwpns = [x.upper().strip(':') for x in wwpns]
|
||||
NPIV_FABRIC_WWPNS[fabric] = wwpns
|
||||
|
@ -50,7 +50,7 @@ localdisk_opts = [
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(localdisk_opts)
|
||||
CONF.register_opts(localdisk_opts, group='powervm')
|
||||
|
||||
|
||||
class LocalStorage(disk_dvr.DiskAdapter):
|
||||
@ -58,7 +58,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
||||
super(LocalStorage, self).__init__(connection)
|
||||
|
||||
# Query to get the Volume Group UUID
|
||||
self.vg_name = CONF.volume_group_name
|
||||
self.vg_name = CONF.powervm.volume_group_name
|
||||
self._vios_uuid, self.vg_uuid = self._get_vg_uuid(self.vg_name)
|
||||
LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"),
|
||||
self.vg_name)
|
||||
@ -267,10 +267,10 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
||||
:return vios_uuid: The Virtual I/O Server pypowervm UUID.
|
||||
:return vg_uuid: The Volume Group pypowervm UUID.
|
||||
"""
|
||||
if CONF.volume_group_vios_name:
|
||||
if CONF.powervm.volume_group_vios_name:
|
||||
# Search for the VIOS if the admin specified it.
|
||||
vios_wraps = pvm_vios.VIOS.search(self.adapter,
|
||||
name=CONF.volume_group_vios_name)
|
||||
vios_wraps = pvm_vios.VIOS.search(
|
||||
self.adapter, name=CONF.powervm.volume_group_vios_name)
|
||||
else:
|
||||
vios_resp = self.adapter.read(pvm_ms.System.schema_type,
|
||||
root_id=self.host_uuid,
|
||||
|
@ -42,7 +42,7 @@ ssp_opts = [
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONF.register_opts(ssp_opts)
|
||||
CONF.register_opts(ssp_opts, group='powervm')
|
||||
|
||||
|
||||
class SSPDiskAdapter(disk_drv.DiskAdapter):
|
||||
@ -63,7 +63,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
||||
"""
|
||||
super(SSPDiskAdapter, self).__init__(connection)
|
||||
|
||||
self._cluster = self._fetch_cluster(CONF.cluster_name)
|
||||
self._cluster = self._fetch_cluster(CONF.powervm.cluster_name)
|
||||
self.clust_name = self._cluster.name
|
||||
|
||||
# _ssp @property method will fetch and cache the SSP.
|
||||
|
@ -59,7 +59,8 @@ CONF = cfg.CONF
|
||||
# only supports Fibre Channel, which has multiple options for connections.
|
||||
# The connection strategy is defined above.
|
||||
VOLUME_DRIVER_MAPPINGS = {
|
||||
'fibre_channel': vol_attach.FC_STRATEGY_MAPPING[CONF.fc_attach_strategy]
|
||||
'fibre_channel': vol_attach.FC_STRATEGY_MAPPING[
|
||||
CONF.powervm.fc_attach_strategy]
|
||||
}
|
||||
|
||||
DISK_ADPT_NS = 'nova_powervm.virt.powervm.disk'
|
||||
@ -109,7 +110,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
||||
'mp_uuid': self.mp_uuid}
|
||||
|
||||
self.disk_dvr = importutils.import_object_ns(
|
||||
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.disk_driver], conn_info)
|
||||
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver],
|
||||
conn_info)
|
||||
|
||||
def _get_host_uuid(self):
|
||||
"""Get the System wrapper and its UUID for the (single) host."""
|
||||
|
@ -87,12 +87,13 @@ class ConfigDrivePowerVM(object):
|
||||
network_info=network_info)
|
||||
|
||||
# Make sure the path exists.
|
||||
if not os.path.exists(CONF.image_meta_local_path):
|
||||
os.mkdir(CONF.image_meta_local_path)
|
||||
im_path = CONF.powervm.image_meta_local_path
|
||||
if not os.path.exists(im_path):
|
||||
os.mkdir(im_path)
|
||||
|
||||
file_name = pvm_util.sanitize_file_name_for_api(
|
||||
instance.name, prefix='config_', suffix='.iso')
|
||||
iso_path = os.path.join(CONF.image_meta_local_path, file_name)
|
||||
iso_path = os.path.join(im_path, file_name)
|
||||
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
|
||||
LOG.info(_LI("Config drive ISO being built for instance %(inst)s "
|
||||
"building to path %(iso_path)s."),
|
||||
@ -213,7 +214,7 @@ class ConfigDrivePowerVM(object):
|
||||
child_type=pvm_stg.VG.schema_type)
|
||||
vg_wraps = pvm_stg.VG.wrap(vg_resp)
|
||||
for vg_wrap in vg_wraps:
|
||||
if vg_wrap.name == CONF.vopt_media_volume_group:
|
||||
if vg_wrap.name == CONF.powervm.vopt_media_volume_group:
|
||||
found_vg = vg_wrap
|
||||
found_vios = vio_wrap
|
||||
break
|
||||
@ -228,12 +229,12 @@ class ConfigDrivePowerVM(object):
|
||||
# exception path.
|
||||
if found_vg is None:
|
||||
raise npvmex.NoMediaRepoVolumeGroupFound(
|
||||
vol_grp=CONF.vopt_media_volume_group)
|
||||
vol_grp=CONF.powervm.vopt_media_volume_group)
|
||||
|
||||
# Ensure that there is a virtual optical media repository within it.
|
||||
if len(found_vg.vmedia_repos) == 0:
|
||||
vopt_repo = pvm_stg.VMediaRepos.bld(self.adapter, 'vopt',
|
||||
str(CONF.vopt_media_rep_size))
|
||||
vopt_repo = pvm_stg.VMediaRepos.bld(
|
||||
self.adapter, 'vopt', str(CONF.powervm.vopt_media_rep_size))
|
||||
found_vg.vmedia_repos = [vopt_repo]
|
||||
found_vg = found_vg.update()
|
||||
|
||||
|
@ -346,7 +346,7 @@ def _crt_lpar_builder(adapter, host_wrapper, instance, flavor):
|
||||
|
||||
attrs = _build_attrs(instance, flavor)
|
||||
stdz = lpar_bldr.DefaultStandardize(
|
||||
host_wrapper, proc_units_factor=CONF.proc_units_factor)
|
||||
host_wrapper, proc_units_factor=CONF.powervm.proc_units_factor)
|
||||
return lpar_bldr.LPARBuilder(adapter, attrs, stdz)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user