Remove deprecated instance_backing from sysinv code

instance_backing is no longer being used during configuration
and so the dead code is being removed.

This has a cascade effect. Other methods and validators no longer
are required.

This update methods have been retained, even though there is nothing
for them to update.

Change-Id: I76b0181a2a3dd7d7d6c8d8b686dd724ab269844d
Story: 2004764
Task: 30119
Signed-off-by: Al Bailey <Al.Bailey@windriver.com>
This commit is contained in:
Al Bailey 2019-06-24 07:53:55 -05:00
parent 59085bed17
commit f22c942ce0
14 changed files with 21 additions and 278 deletions

View File

@ -1545,7 +1545,7 @@ badMediaType (415)
:widths: 20, 20, 20, 60
"volumegroup_id", "URI", "csapi:UUID", "The unique identifier of an existing LVM volume group."
"capabilities (Optional)", "plain", "xsd:string", "A dictionary of key-value pairs prepresenting volume group parameters and values. Valid nova-local parameters are: ``instance_backing``. Valid cinder-volumes parameters are: ``lvm_type``"
"capabilities (Optional)", "plain", "xsd:string", "A dictionary of key-value pairs prepresenting volume group parameters and values. Valid cinder-volumes parameters are: ``lvm_type``"
**Response parameters**

View File

@ -39,38 +39,16 @@ class platform::worker::storage (
$final_pvs,
$lvm_global_filter = '[]',
$lvm_update_filter = '[]',
$instance_backing = 'image',
$images_rbd_pool = 'ephemeral',
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
) {
$adding_pvs_str = join($adding_pvs,' ')
$removing_pvs_str = join($removing_pvs,' ')
$round_to_extent = false
# Ensure partitions update prior to local storage configuration
Class['::platform::partitions'] -> Class[$name]
case $instance_backing {
'image': {
$images_type = 'default'
$images_volume_group = absent
$round_to_extent = false
$local_monitor_state = 'disabled'
$images_rbd_pool_real = absent
$images_rbd_ceph_conf_real = absent
}
'remote': {
$images_type = 'rbd'
$images_volume_group = absent
$round_to_extent = false
$local_monitor_state = 'disabled'
$images_rbd_pool_real = $images_rbd_pool
$images_rbd_ceph_conf_real = $images_rbd_ceph_conf
}
default: {
fail("Unsupported instance backing: ${instance_backing}")
}
}
::platform::worker::storage::wipe_new_pv { $adding_pvs: }
::platform::worker::storage::wipe_pv_and_format { $removing_pvs: }

View File

@ -175,11 +175,6 @@ def do_host_lvg_delete(cc, args):
@utils.arg('lvgnameoruuid',
metavar='<lvg name or uuid>',
help="Name or UUID of lvg [REQUIRED]")
@utils.arg('-b', '--instance_backing',
metavar='<instance backing>',
choices=['image', 'remote'],
help=("Type of instance backing. "
"Allowed values: image, remote. [nova-local]"))
@utils.arg('-l', '--lvm_type',
metavar='<lvm_type>',
choices=['thick', 'thin'],
@ -189,12 +184,11 @@ def do_host_lvg_modify(cc, args):
"""Modify the attributes of a Local Volume Group."""
# Get all the fields from the command arguments
field_list = ['hostnameorid', 'lvgnameoruuid',
'instance_backing', 'lvm_type']
field_list = ['hostnameorid', 'lvgnameoruuid', 'lvm_type']
fields = dict((k, v) for (k, v) in vars(args).items()
if k in field_list and not (v is None))
all_caps_list = ['instance_backing', 'lvm_type']
all_caps_list = ['lvm_type']
requested_caps_dict = {}
for cap in all_caps_list:

View File

@ -322,17 +322,10 @@
<xs:enumeration value="nova-local" />
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="Instance_backing">
<xs:restriction base="xs:string">
<xs:enumeration value="image" />
<xs:enumeration value="remote" />
</xs:restriction>
</xs:simpleType>
<xs:complexType name="Lvg">
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attribute type="Lvm_vg_name" name="lvm_vg_name" use="required" />
<xs:attribute type="Instance_backing" name="instance_backing" use="required" />
</xs:extension>
</xs:simpleContent>
</xs:complexType>

View File

@ -157,10 +157,9 @@
size: minimum size (in GiB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: image, or remote
-->
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="image" />
<lvg lvm_vg_name="nova-local" />
</localstorageProfile>
<localstorageProfile name="localstorage-profile_remote">
@ -169,10 +168,9 @@
size: minimum size (in MB).
The lvg tags below define the local volume group
lvm_vg_name: local volume group name
instance_backing: image, or remote
-->
<disk path="/dev/sdb" size="223" />
<lvg lvm_vg_name="nova-local" instance_backing="remote" />
<lvg lvm_vg_name="nova-local" />
</localstorageProfile>

View File

@ -4183,8 +4183,7 @@ class HostController(rest.RestController):
break
# Prevent unlock if nova-local volume group has: invalid state
# (e.g., removing), invalid instance_backing, no physical
# volumes allocated.
# (e.g., removing), no physical volumes allocated.
if nova_local_storage_lvg:
if nova_local_storage_lvg.vg_state == constants.LVG_DEL:
raise wsme.exc.ClientSideError(
@ -4212,17 +4211,6 @@ class HostController(rest.RestController):
"physical volumes in the adding or provisioned "
"state."))
lvg_capabilities = nova_local_storage_lvg['capabilities']
instance_backing = lvg_capabilities.get(
constants.LVG_NOVA_PARAM_BACKING)
if instance_backing not in [
constants.LVG_NOVA_BACKING_IMAGE,
constants.LVG_NOVA_BACKING_REMOTE]:
raise wsme.exc.ClientSideError(
_("A host with worker functionality and a "
"nova-local volume group requires that a valid "
"instance backing is configured. "))
else:
# This method is only called with hosts that have a worker
# subfunction and is locked or if subfunction_config action is

View File

@ -408,45 +408,10 @@ def _cinder_volumes_patch_semantic_checks(caps_dict):
raise wsme.exc.ClientSideError(msg)
def _nova_local_patch_semantic_checks(caps_dict):
# make sure that only valid capabilities are provided
valid_caps = set([constants.LVG_NOVA_PARAM_BACKING])
invalid_caps = set(caps_dict.keys()) - valid_caps
# Do we have something unexpected?
if len(invalid_caps) > 0:
raise wsme.exc.ClientSideError(
_("Invalid parameter(s) for volume group %s: %s " %
(constants.LVG_NOVA_LOCAL,
", ".join(str(i) for i in invalid_caps))))
# make sure that we are modifying something
elif len(caps_dict) == 0:
msg = _('No parameter specified. No action taken')
raise wsme.exc.ClientSideError(msg)
def _lvg_pre_patch_checks(lvg_obj, patch_obj):
lvg_dict = lvg_obj.as_dict()
# nova-local VG checks:
if lvg_dict['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
for p in patch_obj:
if p['path'] == '/capabilities':
patch_caps_dict = p['value']
# Make sure we've been handed a valid patch
_nova_local_patch_semantic_checks(patch_caps_dict)
# Update the patch with the current capabilities that aren't
# being patched
current_caps_dict = lvg_dict['capabilities']
for k in (set(current_caps_dict.keys()) -
set(patch_caps_dict.keys())):
patch_caps_dict[k] = current_caps_dict[k]
p['value'] = patch_caps_dict
elif lvg_dict['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
if lvg_dict['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
for p in patch_obj:
if p['path'] == '/capabilities':
patch_caps_dict = p['value']
@ -584,10 +549,11 @@ def _check(op, lvg):
elif op == "modify":
# Sanity check: parameters
if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
if lvg['lvm_vg_name'] in [constants.LVG_CGTS_VG,
constants.LVG_NOVA_LOCAL]:
raise wsme.exc.ClientSideError(_("%s volume group does not have "
"any parameters to modify") %
constants.LVG_CGTS_VG)
lvg['lvm_vg_name'])
elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES:
if constants.LVG_CINDER_PARAM_LVM_TYPE not in lvg_caps:
raise wsme.exc.ClientSideError(
@ -605,44 +571,6 @@ def _check(op, lvg):
constants.LVG_CINDER_LVM_TYPE_THICK))
raise wsme.exc.ClientSideError(msg)
elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
# instance_backing: This is a required parameter
if constants.LVG_NOVA_PARAM_BACKING not in lvg_caps:
raise wsme.exc.ClientSideError(
_('Internal Error: %s parameter missing for volume '
'group.') % constants.LVG_NOVA_PARAM_BACKING)
else:
# Instances backed by remote ephemeral storage can only be
# used on systems that have a Ceph (internal or external)
# backend.
if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
constants.LVG_NOVA_BACKING_REMOTE) and
not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.SB_TYPE_CEPH,
service=constants.SB_SVC_NOVA,
check_only_defaults=False,
rpcapi=pecan.request.rpcapi) and
not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.SB_TYPE_CEPH_EXTERNAL,
service=constants.SB_SVC_NOVA,
check_only_defaults=False,
rpcapi=pecan.request.rpcapi)):
raise wsme.exc.ClientSideError(
_('Invalid value for instance_backing. Instances '
'backed by remote ephemeral storage can only be '
'used on systems that have a Ceph (internal or '
'external) backend.'))
if (lvg['lvm_cur_lv'] > 1):
raise wsme.exc.ClientSideError(
_("Can't modify the volume group: %s. There are currently "
"%d instance volumes present in the volume group. "
"Terminate or migrate all instances from the worker to "
"allow volume group madifications." %
(lvg['lvm_vg_name'], lvg['lvm_cur_lv'] - 1)))
elif op == "delete":
if lvg['lvm_vg_name'] == constants.LVG_CGTS_VG:
raise wsme.exc.ClientSideError(_("%s volume group cannot be deleted") %
@ -656,13 +584,8 @@ def _check(op, lvg):
_("cinder-volumes LVG cannot be removed once it is "
"provisioned and LVM backend is added."))
elif lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL:
if (lvg['lvm_cur_lv'] and lvg['lvm_cur_lv'] > 1):
raise wsme.exc.ClientSideError(
_("Can't delete volume group: %s. There are currently %d "
"instance volumes present in the volume group. Terminate"
" or migrate all instances from the worker to allow "
"volume group deletion." % (lvg['lvm_vg_name'],
lvg['lvm_cur_lv'] - 1)))
# We never have more than 1 LV in nova-local VG
pass
else:
raise wsme.exc.ClientSideError(
_("Internal Error: Invalid Volume Group operation: %s" % op))
@ -725,21 +648,7 @@ def _create(lvg, iprofile=None, applyprofile=None):
if not lvg_in_db:
# Add the default volume group parameters
if lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and not iprofile:
lvg_caps = lvg['capabilities']
if applyprofile:
# defined from create or inherit the capabilities
LOG.info("LVG create %s applyprofile=%s" %
(lvg_caps, applyprofile))
else:
lvg_caps_dict = {
constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_IMAGE
}
lvg_caps.update(lvg_caps_dict)
LOG.info("Updated lvg capabilities=%s" % lvg_caps)
elif lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES and not iprofile:
if lvg['lvm_vg_name'] == constants.LVG_CINDER_VOLUMES and not iprofile:
lvg_caps = lvg['capabilities']
if (constants.LVG_CINDER_PARAM_LVM_TYPE in lvg_caps) or applyprofile:

View File

@ -1815,18 +1815,7 @@ def _create_localstorage_profile(profile_name, profile_node):
profile_id = ihost.id
try:
ilvg = ilvgs_local[0]
instance_backing = ilvg.get(constants.LVG_NOVA_PARAM_BACKING)
if instance_backing == constants.LVG_NOVA_BACKING_IMAGE:
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_IMAGE}
elif instance_backing == constants.LVG_NOVA_BACKING_REMOTE:
capabilities_dict = {constants.LVG_NOVA_PARAM_BACKING:
constants.LVG_NOVA_BACKING_REMOTE}
else:
return ("Error", _('error: Local Storage profile %s is invalid')
% profile_name,
_('Unrecognized instance_backing %s.') % instance_backing)
capabilities_dict = {}
# create profile ilvg
lvgdict = {'capabilities': capabilities_dict,

View File

@ -704,24 +704,6 @@ def _apply_backend_changes(op, sb_obj):
services)
def _apply_nova_specific_changes(sb_obj, old_sb_obj=None):
"""If the backend's services have been modified and nova has been either
added or (re)moved, set the hosts with worker functionality and a
certain nova-local instance backing to Config out-of-date.
"""
services = api_helper.getListFromServices(sb_obj.as_dict())
if old_sb_obj:
old_services = api_helper.getListFromServices(old_sb_obj.as_dict())
else:
old_services = []
diff_services = set(services) ^ set(old_services)
if constants.SB_SVC_NOVA in diff_services:
pecan.request.rpcapi.config_update_nova_local_backed_hosts(
pecan.request.context,
constants.LVG_NOVA_BACKING_REMOTE)
#
# Create
#
@ -843,9 +825,6 @@ def _create(storage_ceph):
# Enable the backend:
_apply_backend_changes(constants.SB_API_OP_CREATE, storage_backend_obj)
# Make any needed changes for nova local.
_apply_nova_specific_changes(storage_backend_obj)
return storage_ceph_obj
@ -1391,8 +1370,6 @@ def _patch(storceph_uuid, patch):
_apply_backend_changes(constants.SB_API_OP_MODIFY,
rpc_storceph)
_apply_nova_specific_changes(rpc_storceph, ostorceph)
return StorageCeph.convert_with_links(rpc_storceph)
except exception.HTTPNotFound:

View File

@ -583,13 +583,8 @@ PV_TYPE_PARTITION = 'partition'
PV_NAME_UNKNOWN = 'unknown'
# Storage: Volume Group Parameter Types
LVG_NOVA_PARAM_BACKING = 'instance_backing'
LVG_CINDER_PARAM_LVM_TYPE = 'lvm_type'
# Storage: Volume Group Parameter: Nova: Backing types
LVG_NOVA_BACKING_IMAGE = 'image'
LVG_NOVA_BACKING_REMOTE = 'remote'
# Storage: Volume Group Parameter: Cinder: LVM provisioing
LVG_CINDER_LVM_TYPE_THIN = 'thin'
LVG_CINDER_LVM_TYPE_THICK = 'thick'
@ -816,7 +811,7 @@ SB_TIER_CEPH_POOLS = [
'data_pt': 20}]
# See http://ceph.com/pgcalc/. We set it to more than 100 because pool usage
# varies greatly in Titanium Cloud and we want to avoid running too low on PGs
# varies greatly in StarlingX and we want to avoid running too low on PGs
CEPH_TARGET_PGS_PER_OSD = 200
# Dual node and Storage
@ -1377,12 +1372,12 @@ WARN_CINDER_ON_ROOT_WITH_LVM = 1
WARN_CINDER_ON_ROOT_WITH_CEPH = 2
WARNING_ROOT_PV_CINDER_LVM_MSG = (
"Warning: All deployed VMs must be booted from Cinder volumes and "
"not use ephemeral or swap disks. See Titanium Cloud System Engineering "
"not use ephemeral or swap disks. See StarlingX System Engineering "
"Guidelines for more details on supported worker configurations.")
WARNING_ROOT_PV_CINDER_CEPH_MSG = (
"Warning: This worker must have instance_backing set to 'remote' "
"or use a secondary disk for local storage. See Titanium Cloud System "
"Engineering Guidelines for more details on supported worker configurations.")
"Warning: This worker must use a secondary disk for local storage. "
"See StarlingX System Engineering Guidelines for more details on "
"supported worker configurations.")
PV_WARNINGS = {WARN_CINDER_ON_ROOT_WITH_LVM: WARNING_ROOT_PV_CINDER_LVM_MSG,
WARN_CINDER_ON_ROOT_WITH_CEPH: WARNING_ROOT_PV_CINDER_CEPH_MSG}

View File

@ -5952,30 +5952,6 @@ class ConductorManager(service.PeriodicService):
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def config_update_nova_local_backed_hosts(self, context, instance_backing):
hosts_uuid = self.hosts_with_nova_local(instance_backing)
if hosts_uuid:
personalities = [constants.CONTROLLER, constants.WORKER]
self._config_update_hosts(context,
personalities,
host_uuids=hosts_uuid,
reboot=True)
def hosts_with_nova_local(self, backing_type):
"""Returns a list of hosts with certain backing type of nova_local"""
hosts_uuid = []
hosts = self.dbapi.ihost_get_list()
for host in hosts:
if ((host.personality and host.personality == constants.WORKER) or
(host.subfunctions and constants.WORKER in host.subfunctions)):
ilvgs = self.dbapi.ilvg_get_by_ihost(host['uuid'])
for lvg in ilvgs:
if (lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
lvg['capabilities'].get(constants.LVG_NOVA_PARAM_BACKING) ==
backing_type):
hosts_uuid.append(host['uuid'])
return hosts_uuid
def update_ceph_external_config(self, context, sb_uuid, services):
"""Update the manifests for Cinder/Glance External Ceph backend"""
@ -6066,10 +6042,6 @@ class ConductorManager(service.PeriodicService):
'task': None}
self.dbapi.storage_ceph_external_update(sb_uuid, values)
if constants.SB_SVC_NOVA in services:
self.config_update_nova_local_backed_hosts(
context, constants.LVG_NOVA_BACKING_REMOTE)
def _update_storage_backend_alarm(self, alarm_state, backend, reason_text=None):
""" Update storage backend configuration alarm"""
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_STORAGE_BACKEND,

View File

@ -930,18 +930,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
sb_uuid=sb_uuid,
services=services))
def config_update_nova_local_backed_hosts(self, context, instance_backing):
"""Synchronously, have the conductor set the hosts with worker
functionality and with a certain nova-local instance backing to
config out-of-date.
:param context: request context
:param instance_backing: the host's instance backing
"""
return self.call(context,
self.make_msg('config_update_nova_local_backed_hosts',
instance_backing=instance_backing))
def update_external_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder Exernal(shared)
on a controller.

View File

@ -134,19 +134,6 @@ class CephPuppet(openstack.OpenstackBasePuppet):
return config
def _is_ceph_mon_required(self, host, operator):
# Two conditions that we need to check for:
# 1) If cinder is a shared service and it has a ceph backend
# 2) If remote instance backing is configured on the host
if (constants.SERVICE_TYPE_VOLUME in self._get_shared_services() and
operator.region_has_ceph_backend()):
lvgs = self.dbapi.ilvg_get_by_ihost(host.uuid)
for lvg in lvgs:
if lvg.capabilities.get(constants.LVG_NOVA_PARAM_BACKING) \
== constants.LVG_NOVA_BACKING_REMOTE:
return True
return False
def _get_remote_ceph_mon_info(self, operator):
# retrieve the ceph monitor information from the primary
ceph_mon_info = operator.get_ceph_mon_info()
@ -182,17 +169,6 @@ class CephPuppet(openstack.OpenstackBasePuppet):
if host.personality in [constants.CONTROLLER, constants.STORAGE]:
config.update(self._get_ceph_osd_config(host))
config.update(self._get_ceph_mon_config(host))
# if it is a worker node and on an secondary region,
# check if ceph mon configuration is required
if constants.WORKER in host.subfunctions and self._region_config():
from sysinv.conductor import openstack
op = openstack.OpenStackOperator(self.dbapi)
if self._is_ceph_mon_required(host, op):
ceph_mon_info = self._get_remote_ceph_mon_info(op)
if ceph_mon_info is not None:
config.update(ceph_mon_info)
return config
def get_public_url(self):

View File

@ -440,15 +440,9 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
def _get_storage_config(self, host):
pvs = self.dbapi.ipv_get_by_ihost(host.id)
# TODO(abailey) instance_backing is deprecated.
# local vs remote storage is now determined by a
# kubernetes label: common.LABEL_REMOTE_STORAGE
instance_backing = constants.LVG_NOVA_BACKING_IMAGE
final_pvs = []
adding_pvs = []
removing_pvs = []
nova_lvg_uuid = None
for pv in pvs:
if (pv.lvm_vg_name == constants.LVG_NOVA_LOCAL and
pv.pv_state != constants.PV_ERR):
@ -468,13 +462,6 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
removing_pvs.append(pv_path)
else:
final_pvs.append(pv_path)
nova_lvg_uuid = pv.ilvg_uuid
if nova_lvg_uuid:
lvg = self.dbapi.ilvg_get(nova_lvg_uuid)
instance_backing = lvg.capabilities.get(
constants.LVG_NOVA_PARAM_BACKING)
global_filter, update_filter = self._get_lvm_global_filter(host)
@ -483,8 +470,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
'platform::worker::storage::adding_pvs': adding_pvs,
'platform::worker::storage::removing_pvs': removing_pvs,
'platform::worker::storage::lvm_global_filter': global_filter,
'platform::worker::storage::lvm_update_filter': update_filter,
'platform::worker::storage::instance_backing': instance_backing}
'platform::worker::storage::lvm_update_filter': update_filter}
# If NOVA is a service on a ceph-external backend, use the ephemeral_pool
# and ceph_conf file that are stored in that DB entry.