CEPH persistent storage backend for Kubernetes
- add support for rbd-provisioner: these changes set up the environment for launching the rbd-provisioner helm chart - add a new pool for rbd-provisioner: kube-rbd for the primary CEPH tier, for secondary tiers, it respects the <pool_name>_<tier_name> rule -> adjust quotas for the new pool(s) - add a new service for internal CEPH backend(s): 'rbd-provisioner' and the needed operations: -> add the service to a CEPH backend -> remove the service from a CEPH backend -> modify capabilities related to the rbd-provisioner service: namespaces, storage class names - when the rbd-provisioner is added as a service, we generate CEPH keys and k8s secrets so that k8s can access and use the new kube-rbd pool(s) - restructure the way we decide to apply manifests when modifying services: nova and the rbd-provisioner are fast-changing services - update py27 tests Change-Id: I86295c6f5e1e3d00b44a99688f027cc8a48e361f Story: 2002844 Task: 26876 Co-Authored-By: Ovidiu Poncea <Ovidiu.Poncea@windriver.com> Signed-off-by: Irina Mihai <Irina.Mihai@windriver.com>
This commit is contained in:
parent
1b90e60caf
commit
2a8e146e74
|
@ -27,6 +27,12 @@ class StorageBackend(base.Resource):
|
|||
return "<storage_backends %s>" % self._info
|
||||
|
||||
|
||||
def _format_cap(obj):
|
||||
obj.capabilities = [str("%s: %s" % (k, v)) for (k, v)
|
||||
in obj.capabilities.items() if k[0] != '.']
|
||||
obj.capabilities = "\n".join(obj.capabilities)
|
||||
|
||||
|
||||
class StorageBackendManager(base.Manager):
|
||||
resource_class = StorageBackend
|
||||
|
||||
|
@ -34,8 +40,13 @@ class StorageBackendManager(base.Manager):
|
|||
def _path(id=None):
|
||||
return '/v1/storage_backend/%s' % id if id else '/v1/storage_backend'
|
||||
|
||||
def list(self):
|
||||
return self._list(self._path(), "storage_backends")
|
||||
def list(self, asdict=False):
|
||||
backends = self._list(self._path(), "storage_backends")
|
||||
if not asdict:
|
||||
for bk in backends:
|
||||
_format_cap(bk)
|
||||
|
||||
return backends
|
||||
|
||||
def get(self, storage_backend_id):
|
||||
try:
|
||||
|
@ -95,7 +106,7 @@ def _show_backend(backend_obj, extra_fields=None):
|
|||
utils.print_tuple_list(data)
|
||||
|
||||
|
||||
def backend_show(cc, backend_name_or_uuid):
|
||||
def backend_show(cc, backend_name_or_uuid, asdict=False):
|
||||
db_backends = cc.storage_backend.list()
|
||||
db_backend = next((b for b in db_backends
|
||||
if ((b.name == backend_name_or_uuid) or
|
||||
|
@ -108,6 +119,8 @@ def backend_show(cc, backend_name_or_uuid):
|
|||
backend_type = db_backend.backend.replace('-', '_')
|
||||
backend_client = getattr(cc, 'storage_' + backend_type)
|
||||
backend_obj = backend_client.get(db_backend.uuid)
|
||||
if not asdict:
|
||||
_format_cap(backend_obj)
|
||||
extra_fields = getattr(eval('storage_' + backend_type),
|
||||
'DISPLAY_ATTRIBUTES')
|
||||
_show_backend(backend_obj, extra_fields)
|
||||
|
|
|
@ -36,10 +36,15 @@ def do_storage_usage_list(cc, args):
|
|||
utils.print_list(usage, fields, field_labels, sortby=0)
|
||||
|
||||
|
||||
@utils.arg('--asdict',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Format capabilities field as dictionary.'))
|
||||
def do_storage_backend_list(cc, args):
|
||||
"""List storage backends."""
|
||||
|
||||
storage_backends = cc.storage_backend.list()
|
||||
asdict = args.asdict if 'asdict' in args else None
|
||||
storage_backends = cc.storage_backend.list(asdict)
|
||||
|
||||
field_labels = ['uuid', 'name', 'backend', 'state', 'task', 'services',
|
||||
'capabilities']
|
||||
|
@ -51,11 +56,16 @@ def do_storage_backend_list(cc, args):
|
|||
@utils.arg('backend_name_or_uuid',
|
||||
metavar='<backend name or uuid>',
|
||||
help="Name or UUID of the backend [REQUIRED]")
|
||||
@utils.arg('--asdict',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=('Format capabilities field as dictionary.'))
|
||||
def do_storage_backend_show(cc, args):
|
||||
"""Show a storage backend."""
|
||||
|
||||
asdict = args.asdict if 'asdict' in args else None
|
||||
storage_backend_utils.backend_show(
|
||||
cc, args.backend_name_or_uuid)
|
||||
cc, args.backend_name_or_uuid, asdict)
|
||||
|
||||
|
||||
@utils.arg('backend',
|
||||
|
@ -102,6 +112,11 @@ def do_storage_backend_add(cc, args):
|
|||
@utils.arg('backend_name_or_uuid',
|
||||
metavar='<backend name or uuid>',
|
||||
help="Name or UUID of the backend [REQUIRED]")
|
||||
@utils.arg('attributes',
|
||||
metavar='<parameter=value>',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help="Required backend/service parameters to apply.")
|
||||
@utils.arg('-s', '--services',
|
||||
metavar='<services>',
|
||||
help=('Optional string of comma separated services to add/update. '
|
||||
|
@ -110,11 +125,6 @@ def do_storage_backend_add(cc, args):
|
|||
metavar='<ceph_conf>',
|
||||
help=('Location of the Ceph configuration file used for provisioning'
|
||||
' an external backend.'))
|
||||
@utils.arg('attributes',
|
||||
metavar='<parameter=value>',
|
||||
nargs='*',
|
||||
default=[],
|
||||
help="Required backend/service parameters to apply.")
|
||||
def do_storage_backend_modify(cc, args):
|
||||
"""Modify a storage backend."""
|
||||
|
||||
|
|
|
@ -13,14 +13,14 @@ from cgtsclient import exc
|
|||
CREATION_ATTRIBUTES = ['confirmed', 'name', 'services', 'capabilities',
|
||||
'tier_uuid', 'cinder_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib', 'object_pool_gib',
|
||||
'object_gateway']
|
||||
'kube_pool_gib', 'object_gateway']
|
||||
DISPLAY_ATTRIBUTES = ['object_gateway', 'ceph_total_space_gib',
|
||||
'object_pool_gib', 'cinder_pool_gib',
|
||||
'glance_pool_gib', 'ephemeral_pool_gib',
|
||||
'kube_pool_gib', 'glance_pool_gib', 'ephemeral_pool_gib',
|
||||
'tier_name', 'tier_uuid']
|
||||
PATCH_ATTRIBUTES = ['object_gateway', 'object_pool_gib',
|
||||
'cinder_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib']
|
||||
'ephemeral_pool_gib', 'kube_pool_gib']
|
||||
|
||||
|
||||
class StorageCeph(base.Resource):
|
||||
|
|
|
@ -4466,8 +4466,8 @@ class HostController(rest.RestController):
|
|||
|
||||
@staticmethod
|
||||
def _update_add_ceph_state():
|
||||
|
||||
api = pecan.request.dbapi
|
||||
|
||||
backend = StorageBackendConfig.get_configuring_backend(api)
|
||||
if backend and backend.backend == constants.CINDER_BACKEND_CEPH:
|
||||
ihosts = api.ihost_get_by_personality(
|
||||
|
|
|
@ -775,12 +775,13 @@ def _create(stor, iprofile=None, create_pv=True):
|
|||
# Get the tier the stor should be associated with
|
||||
tierId = stor.get('fortierid') or stor.get('tier_uuid')
|
||||
if not tierId:
|
||||
# Get the available tiers. If only one exists (the default tier) then add
|
||||
# it.
|
||||
# Get the available tiers. If only one exists (the default tier)
|
||||
# then add it.
|
||||
default_ceph_tier_name = constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH]
|
||||
tier_list = pecan.request.dbapi.storage_tier_get_list()
|
||||
if len(tier_list) == 1 and tier_list[0].name == default_ceph_tier_name:
|
||||
if (len(tier_list) == 1 and
|
||||
tier_list[0].name == default_ceph_tier_name):
|
||||
tierId = tier_list[0].uuid
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
# Copyright (c) 2013-2018 Wind River Systems, Inc.
|
||||
#
|
||||
|
||||
import jsonpatch
|
||||
import copy
|
||||
import jsonpatch
|
||||
import re
|
||||
|
||||
from oslo_utils import strutils
|
||||
from oslo_serialization import jsonutils
|
||||
|
@ -43,6 +44,7 @@ from sysinv.common import constants
|
|||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
from sysinv import objects
|
||||
from sysinv.openstack.common import log
|
||||
from sysinv.openstack.common import uuidutils
|
||||
|
@ -52,13 +54,25 @@ import controller_fs as controller_fs_api
|
|||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
HIERA_DATA = {
|
||||
CAPABILITIES = {
|
||||
'backend': [constants.CEPH_BACKEND_REPLICATION_CAP,
|
||||
constants.CEPH_BACKEND_MIN_REPLICATION_CAP],
|
||||
constants.SB_SVC_CINDER: [],
|
||||
constants.SB_SVC_GLANCE: [],
|
||||
constants.SB_SVC_SWIFT: [],
|
||||
constants.SB_SVC_NOVA: [],
|
||||
constants.SB_SVC_RBD_PROVISIONER: [constants.K8S_RBD_PROV_NAMESPACES,
|
||||
constants.K8S_RBD_PROV_STORAGECLASS_NAME],
|
||||
}
|
||||
|
||||
MANDATORY_CAP = {
|
||||
'backend': [constants.CEPH_BACKEND_REPLICATION_CAP,
|
||||
constants.CEPH_BACKEND_MIN_REPLICATION_CAP],
|
||||
constants.SB_SVC_CINDER: [],
|
||||
constants.SB_SVC_GLANCE: [],
|
||||
constants.SB_SVC_SWIFT: [],
|
||||
constants.SB_SVC_NOVA: [],
|
||||
constants.SB_SVC_RBD_PROVISIONER: [],
|
||||
}
|
||||
|
||||
|
||||
|
@ -105,6 +119,9 @@ class StorageCeph(base.APIBase):
|
|||
"The object gateway pool GiB of storage ceph - ceph object gateway pool "
|
||||
"quota."
|
||||
|
||||
kube_pool_gib = int
|
||||
"The k8s pool GiB of storage ceph - ceph pool quota for k8s."
|
||||
|
||||
object_gateway = bool
|
||||
"If object gateway is configured."
|
||||
|
||||
|
@ -204,6 +221,7 @@ class StorageCeph(base.APIBase):
|
|||
'glance_pool_gib',
|
||||
'ephemeral_pool_gib',
|
||||
'object_pool_gib',
|
||||
'kube_pool_gib',
|
||||
'object_gateway',
|
||||
'ceph_total_space_gib',
|
||||
'tier_name',
|
||||
|
@ -361,9 +379,9 @@ def _get_options_string(storage_ceph):
|
|||
return opt_str
|
||||
|
||||
|
||||
def _discover_and_validate_backend_hiera_data(caps_dict, confirmed):
|
||||
def _discover_and_validate_backend_config_data(caps_dict, confirmed):
|
||||
# Validate parameters
|
||||
for k in HIERA_DATA['backend']:
|
||||
for k in CAPABILITIES['backend']:
|
||||
v = caps_dict.get(k, None)
|
||||
if not v:
|
||||
raise wsme.exc.ClientSideError("Missing required backend "
|
||||
|
@ -417,37 +435,74 @@ def _discover_and_validate_backend_hiera_data(caps_dict, confirmed):
|
|||
'ceph backend'))
|
||||
|
||||
|
||||
def _discover_and_validate_cinder_hiera_data(caps_dict):
|
||||
# Currently there is no backend specific hiera_data for this backend
|
||||
def _discover_and_validate_cinder_capabilities(caps_dict, storage_ceph):
|
||||
# Currently there is no backend specific data for this backend
|
||||
pass
|
||||
|
||||
|
||||
def _discover_and_validate_glance_hiera_data(caps_dict):
|
||||
# Currently there is no backend specific hiera_data for this backend
|
||||
def _discover_and_validate_glance_capabilities(caps_dict, storage_ceph):
|
||||
# Currently there is no backend specific data for this backend
|
||||
pass
|
||||
|
||||
|
||||
def _discover_and_validate_swift_hiera_data(caps_dict):
|
||||
# Currently there is no backend specific hiera_data for this backend
|
||||
def _discover_and_validate_swift_capabilities(caps_dict, storage_ceph):
|
||||
# Currently there is no backend specific data for this backend
|
||||
pass
|
||||
|
||||
|
||||
def _discover_and_validate_nova_hiera_data(caps_dict):
|
||||
# Currently there is no backend specific hiera_data for this backend
|
||||
def _discover_and_validate_nova_capabilities(caps_dict, storage_ceph):
|
||||
# Currently there is no backend specific data for this backend
|
||||
pass
|
||||
|
||||
|
||||
def _discover_and_validate_rbd_provisioner_capabilities(caps_dict, storage_ceph):
|
||||
# Use same regex that Kubernetes uses to validate its labels
|
||||
r = re.compile(r'[a-z0-9]([-a-z0-9]*[a-z0-9])')
|
||||
msg_help = ("Each name or label must consist of lower case "
|
||||
"alphanumeric characters or '-', and must start "
|
||||
"and end with an alphanumeric character.")
|
||||
|
||||
# Check for a valid list of namespaces
|
||||
if constants.K8S_RBD_PROV_NAMESPACES in caps_dict:
|
||||
namespaces = caps_dict[constants.K8S_RBD_PROV_NAMESPACES].split(',')
|
||||
for namespace in namespaces:
|
||||
if not r.match(namespace):
|
||||
msg = _("Invalid list of namespaces provided: '%s' please "
|
||||
"provide a valid comma separated list of Kubernetes "
|
||||
"namespaces. %s" % (namespaces, msg_help))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
if constants.K8S_RBD_PROV_STORAGECLASS_NAME in caps_dict:
|
||||
# Check for a valid RBD StorageClass name
|
||||
name = caps_dict[constants.K8S_RBD_PROV_STORAGECLASS_NAME]
|
||||
if not r.match(name):
|
||||
msg = _("Invalid RBD StorageClass name '%s'. %s" %
|
||||
(name, msg_help))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
# Check the uniqueness of RBD StorageClass name in DB.
|
||||
if constants.K8S_RBD_PROV_STORAGECLASS_NAME in caps_dict:
|
||||
ceph_backends = [bk for bk in pecan.request.dbapi.storage_backend_get_list()
|
||||
if bk.backend == constants.SB_TYPE_CEPH and
|
||||
bk.id != storage_ceph['id']]
|
||||
storclass_names = [bk.capabilities.get(constants.K8S_RBD_PROV_STORAGECLASS_NAME)
|
||||
for bk in ceph_backends]
|
||||
if name in storclass_names:
|
||||
msg = _("RBD StorageClass name '%s'is already used by another backend." % name)
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
||||
# check for the backend parameters
|
||||
capabilities = storage_ceph.get('capabilities', {})
|
||||
|
||||
# Discover the latest hiera_data for the supported service
|
||||
_discover_and_validate_backend_hiera_data(capabilities, confirmed)
|
||||
# Discover the latest config data for the supported service
|
||||
_discover_and_validate_backend_config_data(capabilities, confirmed)
|
||||
|
||||
for k in HIERA_DATA['backend']:
|
||||
for k in CAPABILITIES['backend']:
|
||||
if not capabilities.get(k, None):
|
||||
raise wsme.exc.ClientSideError("Missing required backend "
|
||||
"parameter: %s" % k)
|
||||
raise wsme.exc.ClientSideError(_("Missing required backend "
|
||||
"parameter: %s" % k))
|
||||
|
||||
# Check restrictions based on the primary or seconday backend.:
|
||||
if api_helper.is_primary_ceph_backend(storage_ceph['name']):
|
||||
|
@ -469,20 +524,21 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
|||
req_services = api_helper.getListFromServices(storage_ceph)
|
||||
for svc in req_services:
|
||||
if svc not in supported_svcs:
|
||||
raise wsme.exc.ClientSideError("Service %s is not supported for the"
|
||||
" %s backend %s" %
|
||||
(svc, constants.SB_TYPE_CEPH,
|
||||
storage_ceph['name']))
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Service %s is not supported for the %s backend %s" %
|
||||
(svc, constants.SB_TYPE_CEPH, storage_ceph['name'])))
|
||||
|
||||
# Service is valid. Discover the latest hiera_data for the supported service
|
||||
discover_func = eval('_discover_and_validate_' + svc + '_hiera_data')
|
||||
discover_func(capabilities)
|
||||
# Service is valid. Discover the latest config data for the supported
|
||||
# service.
|
||||
discover_func = eval(
|
||||
'_discover_and_validate_' + svc.replace('-', '_') + '_capabilities')
|
||||
discover_func(capabilities, storage_ceph)
|
||||
|
||||
# Service is valid. Check the params
|
||||
for k in HIERA_DATA[svc]:
|
||||
for k in MANDATORY_CAP[svc]:
|
||||
if not capabilities.get(k, None):
|
||||
raise wsme.exc.ClientSideError("Missing required %s service "
|
||||
"parameter: %s" % (svc, k))
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Missing required %s service parameter: %s" % (svc, k)))
|
||||
|
||||
# TODO (rchurch): Remove this in R6 with object_gateway refactoring. Should
|
||||
# be enabled only if the service is present in the service list. Special
|
||||
|
@ -505,8 +561,8 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
|||
{'name': constants.SB_TIER_DEFAULT_NAMES[
|
||||
constants.SB_TIER_TYPE_CEPH]})
|
||||
except exception.StorageTierNotFoundByName:
|
||||
raise wsme.exc.ClientSideError(_("Default tier not found for"
|
||||
" this backend."))
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Default tier not found for this backend."))
|
||||
else:
|
||||
raise wsme.exc.ClientSideError(_("No tier specified for this "
|
||||
"backend."))
|
||||
|
@ -538,28 +594,95 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
|
|||
|
||||
|
||||
def check_and_update_services(storage_ceph):
|
||||
"""Update backends' services that allow a single service instance."""
|
||||
req_services = api_helper.getListFromServices(storage_ceph)
|
||||
|
||||
# If glance/nova is already a service on an external ceph backend, remove it from there
|
||||
check_svcs = [constants.SB_SVC_GLANCE, constants.SB_SVC_NOVA]
|
||||
check_data = {constants.SB_SVC_GLANCE: ['glance_pool'],
|
||||
constants.SB_SVC_NOVA: ['ephemeral_pool']}
|
||||
|
||||
for s in check_svcs:
|
||||
if s in req_services:
|
||||
sb_list = pecan.request.dbapi.storage_backend_get_list()
|
||||
for sb in pecan.request.dbapi.storage_backend_get_list():
|
||||
if (sb.backend == constants.SB_TYPE_CEPH_EXTERNAL and
|
||||
s in sb.get('services')):
|
||||
services = api_helper.getListFromServices(sb)
|
||||
services.remove(s)
|
||||
cap = sb.capabilities
|
||||
for k in check_data[s]:
|
||||
cap.pop(k, None)
|
||||
values = {'services': ','.join(services),
|
||||
'capabilities': cap}
|
||||
pecan.request.dbapi.storage_backend_update(
|
||||
sb.uuid, values)
|
||||
|
||||
if sb_list:
|
||||
for sb in sb_list:
|
||||
if (sb.backend == constants.SB_TYPE_CEPH_EXTERNAL and
|
||||
s in sb.get('services')):
|
||||
services = api_helper.getListFromServices(sb)
|
||||
services.remove(s)
|
||||
cap = sb.capabilities
|
||||
for k in check_data[s]:
|
||||
cap.pop(k, None)
|
||||
values = {'services': ','.join(services),
|
||||
'capabilities': cap, }
|
||||
pecan.request.dbapi.storage_backend_update(sb.uuid, values)
|
||||
|
||||
def validate_k8s_namespaces(values):
|
||||
""" Check if a list of namespaces is configured in Kubernetes """
|
||||
configured_namespaces = \
|
||||
pecan.request.rpcapi.get_k8s_namespaces(pecan.request.context)
|
||||
invalid_namespaces = []
|
||||
for namespace in values:
|
||||
if namespace not in configured_namespaces:
|
||||
invalid_namespaces.append(namespace)
|
||||
|
||||
if invalid_namespaces:
|
||||
msg = _("Error configuring rbd-provisioner service. "
|
||||
"The following Kubernetes namespaces are not "
|
||||
"configured: %s." % ', '.join(invalid_namespaces))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
|
||||
def _check_and_update_rbd_provisioner(new_storceph, remove=False):
|
||||
""" Check and/or update RBD Provisioner configuration """
|
||||
capab = new_storceph['capabilities']
|
||||
if remove:
|
||||
# Remove the RBD Provisioner
|
||||
del capab[constants.K8S_RBD_PROV_NAMESPACES]
|
||||
if constants.K8S_RBD_PROV_STORAGECLASS_NAME in capab:
|
||||
del capab[constants.K8S_RBD_PROV_STORAGECLASS_NAME]
|
||||
else:
|
||||
bk_services = api_helper.getListFromServices(new_storceph)
|
||||
if constants.SB_SVC_RBD_PROVISIONER not in bk_services:
|
||||
# RBD Provisioner service not involved, return early
|
||||
return new_storceph
|
||||
|
||||
# Use default namespace if not specified
|
||||
if not capab.get(constants.K8S_RBD_PROV_NAMESPACES):
|
||||
capab[constants.K8S_RBD_PROV_NAMESPACES] = \
|
||||
constants.K8S_RBD_PROV_NAMESPACE_DEFAULT
|
||||
|
||||
namespaces_to_add, namespaces_to_rm = K8RbdProvisioner.getNamespacesDelta(new_storceph)
|
||||
if not namespaces_to_add and not namespaces_to_rm:
|
||||
# No changes to namespaces, return early
|
||||
return new_storceph
|
||||
|
||||
validate_k8s_namespaces(K8RbdProvisioner.getListFromNamespaces(new_storceph))
|
||||
|
||||
# Check if cluster is configured
|
||||
storage_hosts = pecan.request.dbapi.ihost_get_by_personality(
|
||||
constants.STORAGE
|
||||
)
|
||||
available_storage_hosts = [h for h in storage_hosts if
|
||||
h['availability'] == constants.AVAILABILITY_AVAILABLE]
|
||||
if not available_storage_hosts:
|
||||
LOG.info("No storage hosts installed, delaying "
|
||||
"rbd-provisioner configuration.")
|
||||
# Configuration will be resumed when first storage node comes up and
|
||||
# after pools are configured.
|
||||
return new_storceph
|
||||
|
||||
# Cluster is configured, run live.
|
||||
try:
|
||||
new_storceph = \
|
||||
pecan.request.rpcapi.check_and_update_rbd_provisioner(pecan.request.context,
|
||||
new_storceph)
|
||||
except Exception as e:
|
||||
msg = _("Error configuring rbd-provisioner service. Please "
|
||||
"investigate and try again: %s." % str(e))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
return new_storceph
|
||||
|
||||
|
||||
def _apply_backend_changes(op, sb_obj):
|
||||
|
@ -644,7 +767,7 @@ def _set_defaults(storage_ceph):
|
|||
|
||||
def_capabilities = {
|
||||
constants.CEPH_BACKEND_REPLICATION_CAP: def_replication,
|
||||
constants.CEPH_BACKEND_MIN_REPLICATION_CAP: def_min_replication
|
||||
constants.CEPH_BACKEND_MIN_REPLICATION_CAP: def_min_replication,
|
||||
}
|
||||
|
||||
defaults = {
|
||||
|
@ -658,16 +781,20 @@ def _set_defaults(storage_ceph):
|
|||
'glance_pool_gib': None,
|
||||
'ephemeral_pool_gib': None,
|
||||
'object_pool_gib': None,
|
||||
'kube_pool_gib': None,
|
||||
'object_gateway': False,
|
||||
}
|
||||
sc = api_helper.set_backend_data(storage_ceph,
|
||||
defaults,
|
||||
HIERA_DATA,
|
||||
CAPABILITIES,
|
||||
constants.SB_CEPH_SVCS_SUPPORTED)
|
||||
return sc
|
||||
|
||||
|
||||
def _create(storage_ceph):
|
||||
# Validate provided capabilities at creation
|
||||
_capabilities_semantic_checks(storage_ceph.get('capabilities', {}))
|
||||
|
||||
# Set the default for the storage backend
|
||||
storage_ceph = _set_defaults(storage_ceph)
|
||||
|
||||
|
@ -682,6 +809,10 @@ def _create(storage_ceph):
|
|||
storage_ceph,
|
||||
storage_ceph.pop('confirmed', False))
|
||||
|
||||
# Setup new rbd-provisioner keys and services early on.
|
||||
# Failures here are critical and no backend should be created
|
||||
storage_ceph = _check_and_update_rbd_provisioner(storage_ceph)
|
||||
|
||||
check_and_update_services(storage_ceph)
|
||||
|
||||
# Conditionally update the DB based on any previous create attempts. This
|
||||
|
@ -717,22 +848,33 @@ def _create(storage_ceph):
|
|||
# Update/Modify/Patch
|
||||
#
|
||||
|
||||
def _hiera_data_semantic_checks(caps_dict):
|
||||
""" Validate each individual data value to make sure it's of the correct
|
||||
type and value.
|
||||
"""
|
||||
# Filter out unsupported parameters which have been passed
|
||||
valid_hiera_data = {}
|
||||
def _capabilities_semantic_checks(caps_dict):
|
||||
""" Early check of capabilities """
|
||||
|
||||
# Get supported capabilities
|
||||
valid_data = {}
|
||||
for key in caps_dict:
|
||||
if key in HIERA_DATA['backend']:
|
||||
valid_hiera_data[key] = caps_dict[key]
|
||||
if key in CAPABILITIES['backend']:
|
||||
valid_data[key] = caps_dict[key]
|
||||
continue
|
||||
for svc in constants.SB_CEPH_SVCS_SUPPORTED:
|
||||
if key in HIERA_DATA[svc]:
|
||||
valid_hiera_data[key] = caps_dict[key]
|
||||
if key in CAPABILITIES[svc]:
|
||||
valid_data[key] = caps_dict[key]
|
||||
|
||||
return valid_hiera_data
|
||||
# Raise exception if unsupported capabilities are passed
|
||||
invalid_data = set(caps_dict.keys()) - set(valid_data.keys())
|
||||
if valid_data.keys() != caps_dict.keys():
|
||||
# Build short customer message to help with supported capabilities
|
||||
# he can then search for them in the manual.
|
||||
params = " backend: %s\n" % ", ".join(CAPABILITIES['backend'])
|
||||
for svc in constants.SB_CEPH_SVCS_SUPPORTED:
|
||||
if CAPABILITIES[svc]:
|
||||
params += " %s service: %s\n" % (svc, ", ".join(CAPABILITIES[svc]))
|
||||
msg = ("Invalid Ceph parameters: '%s', supported "
|
||||
"parameters:\n%s" % (", ".join(invalid_data), params))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
return valid_data
|
||||
|
||||
|
||||
def _pre_patch_checks(storage_ceph_obj, patch_obj):
|
||||
|
@ -743,7 +885,7 @@ def _pre_patch_checks(storage_ceph_obj, patch_obj):
|
|||
patch_caps_dict = p['value']
|
||||
|
||||
# Validate the change to make sure it valid
|
||||
patch_caps_dict = _hiera_data_semantic_checks(patch_caps_dict)
|
||||
patch_caps_dict = _capabilities_semantic_checks(patch_caps_dict)
|
||||
|
||||
# If 'replication' parameter is provided with a valid value and optional
|
||||
# 'min_replication' parameter is not provided, default its value
|
||||
|
@ -782,7 +924,10 @@ def _pre_patch_checks(storage_ceph_obj, patch_obj):
|
|||
|
||||
# Make sure we aren't removing a service.on the primary tier. - Not currently supported.
|
||||
if len(current_svcs - updated_svcs):
|
||||
if api_helper.is_primary_ceph_tier(storage_ceph_obj.tier_name):
|
||||
new_svc = current_svcs - updated_svcs
|
||||
if (api_helper.is_primary_ceph_tier(
|
||||
storage_ceph_obj.tier_name) and
|
||||
new_svc != set([constants.SB_SVC_RBD_PROVISIONER])):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("Removing %s is not supported.") % ','.join(
|
||||
current_svcs - updated_svcs))
|
||||
|
@ -794,7 +939,8 @@ def _is_quotaconfig_changed(ostorceph, storceph):
|
|||
if (storceph.cinder_pool_gib != ostorceph.cinder_pool_gib or
|
||||
storceph.glance_pool_gib != ostorceph.glance_pool_gib or
|
||||
storceph.ephemeral_pool_gib != ostorceph.ephemeral_pool_gib or
|
||||
storceph.object_pool_gib != ostorceph.object_pool_gib):
|
||||
storceph.object_pool_gib != ostorceph.object_pool_gib or
|
||||
storceph.kube_pool_gib != ostorceph.kube_pool_gib):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -812,13 +958,15 @@ def _check_pool_quotas_data(ostorceph, storceph):
|
|||
pools_key = ['cinder_pool_gib',
|
||||
'glance_pool_gib',
|
||||
'ephemeral_pool_gib',
|
||||
'object_pool_gib']
|
||||
'object_pool_gib',
|
||||
'kube_pool_gib']
|
||||
for k in pools_key:
|
||||
if storceph[k]:
|
||||
if (k != 'cinder_pool_gib' and not
|
||||
if (k != 'cinder_pool_gib' and k != 'kube_pool_gib' and not
|
||||
api_helper.is_primary_ceph_backend(storceph['name'])):
|
||||
raise wsme.exc.ClientSideError(_("Secondary ceph backend only "
|
||||
"supports cinder pool."))
|
||||
raise wsme.exc.ClientSideError(_(
|
||||
"Secondary ceph backend only supports cinder and kube "
|
||||
"pools."))
|
||||
|
||||
if (not cutils.is_int_like(storceph[k]) or
|
||||
int(storceph[k]) < 0):
|
||||
|
@ -850,6 +998,15 @@ def _check_pool_quotas_data(ostorceph, storceph):
|
|||
"must be greater than the already occupied space (%s GiB)")
|
||||
% (storceph['cinder_pool_gib'],
|
||||
float(ceph_pool['stats']['bytes_used']) / (1024 ** 3)))
|
||||
elif ceph_pool['name'] == constants.CEPH_POOL_KUBE_NAME:
|
||||
if (int(storceph['kube_pool_gib']) > 0 and
|
||||
(int(ceph_pool['stats']['bytes_used']) >
|
||||
int(storceph['kube_pool_gib'] * 1024 ** 3))):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("The configured quota for the kube pool (%s GiB) "
|
||||
"must be greater than the already occupied space (%s GiB)")
|
||||
% (storceph['kube_pool_gib'],
|
||||
float(ceph_pool['stats']['bytes_used']) / (1024 ** 3)))
|
||||
elif ceph_pool['name'] == constants.CEPH_POOL_EPHEMERAL_NAME:
|
||||
if (int(storceph['ephemeral_pool_gib']) > 0 and
|
||||
(int(ceph_pool['stats']['bytes_used']) >
|
||||
|
@ -888,6 +1045,15 @@ def _check_pool_quotas_data(ostorceph, storceph):
|
|||
"must be greater than the already occupied space (%s GiB)")
|
||||
% (storceph['cinder_pool_gib'],
|
||||
float(ceph_pool['stats']['bytes_used']) / (1024 ** 3)))
|
||||
elif K8RbdProvisioner.get_pool(storceph) == ceph_pool['name']:
|
||||
if (int(storceph['kube_pool_gib']) > 0 and
|
||||
(int(ceph_pool['stats']['bytes_used']) >
|
||||
int(storceph['kube_pool_gib'] * 1024 ** 3))):
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("The configured quota for the kube pool (%s GiB) "
|
||||
"must be greater than the already occupied space (%s GiB)")
|
||||
% (storceph['kube_pool_gib'],
|
||||
float(ceph_pool['stats']['bytes_used']) / (1024 ** 3)))
|
||||
|
||||
# sanity check the quota
|
||||
total_quota_gib = 0
|
||||
|
@ -929,11 +1095,17 @@ def _update_pool_quotas(storceph):
|
|||
{'name': constants.CEPH_POOL_EPHEMERAL_NAME,
|
||||
'quota_key': 'ephemeral_pool_gib'},
|
||||
{'name': object_pool_name,
|
||||
'quota_key': 'object_pool_gib'}]
|
||||
'quota_key': 'object_pool_gib'},
|
||||
{'name': constants.CEPH_POOL_KUBE_NAME,
|
||||
'quota_key': 'kube_pool_gib'}]
|
||||
else:
|
||||
pools = [{'name': "{0}-{1}".format(constants.CEPH_POOL_VOLUMES_NAME,
|
||||
storceph['tier_name']),
|
||||
'quota_key': 'cinder_pool_gib'}]
|
||||
'quota_key': 'cinder_pool_gib'},
|
||||
{'name': "{0}-{1}".format(constants.CEPH_POOL_KUBE_NAME,
|
||||
storceph['tier_name']),
|
||||
'quota_key': 'kube_pool_gib'}
|
||||
]
|
||||
|
||||
for p in pools:
|
||||
if storceph[p['quota_key']] is not None:
|
||||
|
@ -959,7 +1131,6 @@ def _patch(storceph_uuid, patch):
|
|||
storceph_uuid)
|
||||
|
||||
object_gateway_install = False
|
||||
add_nova_only = False
|
||||
patch_obj = jsonpatch.JsonPatch(patch)
|
||||
for p in patch_obj:
|
||||
if p['path'] == '/capabilities':
|
||||
|
@ -991,14 +1162,52 @@ def _patch(storceph_uuid, patch):
|
|||
'glance_pool_gib',
|
||||
'ephemeral_pool_gib',
|
||||
'object_pool_gib',
|
||||
'kube_pool_gib',
|
||||
'object_gateway']
|
||||
quota_attributes = ['cinder_pool_gib', 'glance_pool_gib',
|
||||
'ephemeral_pool_gib', 'object_pool_gib']
|
||||
'ephemeral_pool_gib', 'object_pool_gib',
|
||||
'kube_pool_gib']
|
||||
|
||||
if len(delta) == 0 and rpc_storceph['state'] != constants.SB_STATE_CONFIG_ERR:
|
||||
raise wsme.exc.ClientSideError(
|
||||
_("No changes to the existing backend settings were detected."))
|
||||
|
||||
# Get changes to services
|
||||
services_added = (
|
||||
set(api_helper.getListFromServices(storceph_config.as_dict())) -
|
||||
set(api_helper.getListFromServices(ostorceph.as_dict()))
|
||||
)
|
||||
|
||||
services_removed = (
|
||||
set(api_helper.getListFromServices(ostorceph.as_dict())) -
|
||||
set(api_helper.getListFromServices(storceph_config.as_dict()))
|
||||
)
|
||||
|
||||
# Some services allow fast settings update, check if we are in this case.
|
||||
# Adding/removing services or just making changes to the configuration
|
||||
# these services depend on will not trigger manifest application.
|
||||
fast_config = False
|
||||
if not (delta - set(['capabilities']) - set(['services'])):
|
||||
fast_cfg_services = [constants.SB_SVC_NOVA, constants.SB_SVC_RBD_PROVISIONER]
|
||||
|
||||
# Changes to unrelated capabilities?
|
||||
storceph_cap = storceph_config.as_dict()['capabilities'].items()
|
||||
ostorceph_cap = ostorceph.as_dict()['capabilities'].items()
|
||||
related_cap = []
|
||||
for service in fast_cfg_services:
|
||||
related_cap.extend(CAPABILITIES[service])
|
||||
cap_modified = dict(set(storceph_cap) - set(ostorceph_cap))
|
||||
unrelated_cap_modified = [k for k in cap_modified.keys() if k not in related_cap]
|
||||
|
||||
# Changes to unrelated services?
|
||||
unrelated_services_modified = ((set(services_added) |
|
||||
set(services_removed)) -
|
||||
set(fast_cfg_services))
|
||||
|
||||
if not unrelated_services_modified and not unrelated_cap_modified:
|
||||
# We only have changes to fast configurable services and/or to their capabilities
|
||||
fast_config = True
|
||||
|
||||
quota_only_update = True
|
||||
for d in delta:
|
||||
if d not in allowed_attributes:
|
||||
|
@ -1032,11 +1241,7 @@ def _patch(storceph_uuid, patch):
|
|||
storceph_config.object_gateway = True
|
||||
storceph_config.task = constants.SB_TASK_ADD_OBJECT_GATEWAY
|
||||
object_gateway_install = True
|
||||
if ((set(api_helper.getListFromServices(storceph_config.as_dict())) -
|
||||
set(api_helper.getListFromServices(ostorceph.as_dict())) ==
|
||||
set([constants.SB_SVC_NOVA])) and
|
||||
(delta == set(['services']))):
|
||||
add_nova_only = True
|
||||
|
||||
elif d == 'capabilities':
|
||||
# Go through capabilities parameters and check
|
||||
# if any values changed
|
||||
|
@ -1057,9 +1262,9 @@ def _patch(storceph_uuid, patch):
|
|||
if constants.CEPH_BACKEND_REPLICATION_CAP in new_cap and \
|
||||
constants.CEPH_BACKEND_REPLICATION_CAP in orig_cap:
|
||||
|
||||
# Currently, the only moment when we allow modification
|
||||
# of ceph storage backend parameters is after the manifests have
|
||||
# been applied and before first storage node has been configured.
|
||||
# Currently, the only moment when we allow modification of ceph
|
||||
# storage backend parameters is after the manifests have been
|
||||
# applied and before first storage node has been configured.
|
||||
ceph_task = StorageBackendConfig.get_ceph_backend_task(pecan.request.dbapi)
|
||||
ceph_state = StorageBackendConfig.get_ceph_backend_state(pecan.request.dbapi)
|
||||
if ceph_task != constants.SB_TASK_PROVISION_STORAGE and \
|
||||
|
@ -1102,8 +1307,8 @@ def _patch(storceph_uuid, patch):
|
|||
_check_pool_quotas_data(ostorceph, storceph_config.as_dict())
|
||||
|
||||
if not quota_only_update:
|
||||
# Execute the common semantic checks for all backends, if backend is not
|
||||
# present this will not return
|
||||
# Execute the common semantic checks for all backends, if backend is
|
||||
# not present this will not return.
|
||||
api_helper.common_checks(constants.SB_API_OP_MODIFY,
|
||||
rpc_storceph.as_dict())
|
||||
|
||||
|
@ -1118,20 +1323,26 @@ def _patch(storceph_uuid, patch):
|
|||
if object_gateway_install:
|
||||
_check_object_gateway_install()
|
||||
|
||||
# Update current ceph storage object again for object_gateway delta adjustments
|
||||
# Update current ceph storage object again for object_gateway delta
|
||||
# adjustments.
|
||||
for field in objects.storage_ceph.fields:
|
||||
if (field in storceph_config.as_dict() and
|
||||
rpc_storceph[field] != storceph_config.as_dict()[field]):
|
||||
rpc_storceph[field] = storceph_config.as_dict()[field]
|
||||
|
||||
# Perform changes to the RBD Provisioner service
|
||||
remove_rbd_provisioner = constants.SB_SVC_RBD_PROVISIONER in services_removed
|
||||
ret = _check_and_update_rbd_provisioner(rpc_storceph.as_dict(), remove_rbd_provisioner)
|
||||
rpc_storceph['capabilities'] = ret['capabilities']
|
||||
|
||||
LOG.info("SYS_I new storage_ceph: %s " % rpc_storceph.as_dict())
|
||||
try:
|
||||
check_and_update_services(rpc_storceph.as_dict())
|
||||
|
||||
rpc_storceph.save()
|
||||
|
||||
if ((not quota_only_update and not add_nova_only) or
|
||||
(storceph_config.state == constants.SB_STATE_CONFIG_ERR)):
|
||||
if ((not quota_only_update and not fast_config) or
|
||||
(storceph_config.state == constants.SB_STATE_CONFIG_ERR)):
|
||||
# Enable the backend changes:
|
||||
_apply_backend_changes(constants.SB_API_OP_MODIFY,
|
||||
rpc_storceph)
|
||||
|
@ -1145,6 +1356,18 @@ def _patch(storceph_uuid, patch):
|
|||
" patch %s"
|
||||
% (storceph_config, patch))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
except Exception as e:
|
||||
rpc_storceph = objects.storage_ceph.get_by_uuid(
|
||||
pecan.request.context,
|
||||
storceph_uuid)
|
||||
for field in allowed_attributes:
|
||||
if (field in ostorceph.as_dict() and
|
||||
rpc_storceph[field] != ostorceph.as_dict()[field]):
|
||||
rpc_storceph[field] = ostorceph.as_dict()[field]
|
||||
rpc_storceph.save()
|
||||
msg = _("There was an error trying to update the backend. Please "
|
||||
"investigate and try again: %s" % str(e))
|
||||
raise wsme.exc.ClientSideError(msg)
|
||||
|
||||
#
|
||||
# Delete
|
||||
|
|
|
@ -644,3 +644,10 @@ class SBApiHelper(object):
|
|||
if name_string == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def remove_service_from_backend(sb, svc_name):
|
||||
services = SBApiHelper.getListFromServices(sb)
|
||||
services.remove(svc_name)
|
||||
pecan.request.dbapi.storage_backend_update(
|
||||
sb.id, {'services': ','.join(services)})
|
||||
|
|
|
@ -699,3 +699,14 @@ class CephApiOperator(object):
|
|||
num_active_monitors = len(active_monitors)
|
||||
|
||||
return num_active_monitors, required_monitors, active_monitors
|
||||
|
||||
def list_osd_pools(self):
|
||||
"""List all osd pools."""
|
||||
resp, pools = self._ceph_api.osd_pool_ls(body='json')
|
||||
if not resp.ok:
|
||||
e = exception.CephPoolListFailure(
|
||||
reason=resp.reason)
|
||||
LOG.error(e)
|
||||
raise e
|
||||
else:
|
||||
return pools['output']
|
||||
|
|
|
@ -362,10 +362,13 @@ SB_SVC_CINDER = 'cinder'
|
|||
SB_SVC_GLANCE = 'glance'
|
||||
SB_SVC_NOVA = 'nova'
|
||||
SB_SVC_SWIFT = 'swift'
|
||||
SB_SVC_RBD_PROVISIONER = 'rbd-provisioner'
|
||||
|
||||
SB_FILE_SVCS_SUPPORTED = [SB_SVC_GLANCE]
|
||||
SB_LVM_SVCS_SUPPORTED = [SB_SVC_CINDER]
|
||||
SB_CEPH_SVCS_SUPPORTED = [SB_SVC_GLANCE, SB_SVC_CINDER, SB_SVC_SWIFT, SB_SVC_NOVA] # supported primary tier svcs
|
||||
# Primary tier supported services.
|
||||
SB_CEPH_SVCS_SUPPORTED = [SB_SVC_GLANCE, SB_SVC_CINDER, SB_SVC_SWIFT,
|
||||
SB_SVC_NOVA, SB_SVC_RBD_PROVISIONER]
|
||||
SB_CEPH_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE, SB_SVC_NOVA]
|
||||
SB_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE]
|
||||
|
||||
|
@ -384,7 +387,9 @@ SB_TIER_SUPPORTED = [SB_TIER_TYPE_CEPH]
|
|||
SB_TIER_DEFAULT_NAMES = {
|
||||
SB_TIER_TYPE_CEPH: 'storage' # maps to crushmap 'storage-tier' root
|
||||
}
|
||||
SB_TIER_CEPH_SECONDARY_SVCS = [SB_SVC_CINDER] # supported secondary tier svcs
|
||||
|
||||
# Supported secondary tier services.
|
||||
SB_TIER_CEPH_SECONDARY_SVCS = [SB_SVC_CINDER, SB_SVC_RBD_PROVISIONER]
|
||||
|
||||
SB_TIER_STATUS_DEFINED = 'defined'
|
||||
SB_TIER_STATUS_IN_USE = 'in-use'
|
||||
|
@ -705,6 +710,11 @@ CEPH_POOL_EPHEMERAL_PG_NUM = 512
|
|||
CEPH_POOL_EPHEMERAL_PGP_NUM = 512
|
||||
CEPH_POOL_EPHEMERAL_QUOTA_GIB = 0
|
||||
|
||||
CEPH_POOL_KUBE_NAME = 'kube-rbd'
|
||||
CEPH_POOL_KUBE_PG_NUM = 128
|
||||
CEPH_POOL_KUBE_PGP_NUM = 128
|
||||
CEPH_POOL_KUBE_QUOTA_GIB = 20
|
||||
|
||||
# Ceph RADOS Gateway default data pool
|
||||
# Hammer version pool name will be kept if upgrade from R3 and
|
||||
# Swift/Radosgw was configured/enabled in R3.
|
||||
|
@ -724,21 +734,26 @@ CEPH_POOLS = [{'pool_name': CEPH_POOL_VOLUMES_NAME,
|
|||
'pg_num': CEPH_POOL_VOLUMES_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_VOLUMES_PGP_NUM,
|
||||
'quota_gib': None,
|
||||
'data_pt': 40},
|
||||
'data_pt': 35},
|
||||
{'pool_name': CEPH_POOL_IMAGES_NAME,
|
||||
'pg_num': CEPH_POOL_IMAGES_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_IMAGES_PGP_NUM,
|
||||
'quota_gib': None,
|
||||
'data_pt': 20},
|
||||
'data_pt': 18},
|
||||
{'pool_name': CEPH_POOL_EPHEMERAL_NAME,
|
||||
'pg_num': CEPH_POOL_EPHEMERAL_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_EPHEMERAL_PGP_NUM,
|
||||
'quota_gib': None,
|
||||
'data_pt': 30},
|
||||
'data_pt': 27},
|
||||
{'pool_name': CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL,
|
||||
'pg_num': CEPH_POOL_OBJECT_GATEWAY_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_OBJECT_GATEWAY_PGP_NUM,
|
||||
'quota_gib': None,
|
||||
'data_pt': 10},
|
||||
{'pool_name': CEPH_POOL_KUBE_NAME,
|
||||
'pg_num': CEPH_POOL_KUBE_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_KUBE_PGP_NUM,
|
||||
'quota_gib': None,
|
||||
'data_pt': 10}]
|
||||
|
||||
ALL_CEPH_POOLS = [CEPH_POOL_RBD_NAME,
|
||||
|
@ -746,7 +761,8 @@ ALL_CEPH_POOLS = [CEPH_POOL_RBD_NAME,
|
|||
CEPH_POOL_IMAGES_NAME,
|
||||
CEPH_POOL_EPHEMERAL_NAME,
|
||||
CEPH_POOL_OBJECT_GATEWAY_NAME_JEWEL,
|
||||
CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER]
|
||||
CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER,
|
||||
CEPH_POOL_KUBE_NAME]
|
||||
|
||||
# Supported pools for secondary ceph tiers
|
||||
SB_TIER_CEPH_POOLS = [
|
||||
|
@ -755,7 +771,13 @@ SB_TIER_CEPH_POOLS = [
|
|||
'pgp_num': CEPH_POOL_VOLUMES_PGP_NUM,
|
||||
'be_quota_attr': 'cinder_pool_gib',
|
||||
'quota_default': 0,
|
||||
'data_pt': 100}]
|
||||
'data_pt': 80},
|
||||
{'pool_name': CEPH_POOL_KUBE_NAME,
|
||||
'pg_num': CEPH_POOL_KUBE_PG_NUM,
|
||||
'pgp_num': CEPH_POOL_KUBE_PGP_NUM,
|
||||
'be_quota_attr': 'kube_pool_gib',
|
||||
'quota_default': 20,
|
||||
'data_pt': 20}]
|
||||
|
||||
# See http://ceph.com/pgcalc/. We set it to more than 100 because pool usage
|
||||
# varies greatly in Titanium Cloud and we want to avoid running too low on PGs
|
||||
|
@ -1425,3 +1447,16 @@ SUPPORTED_HELM_APP_CHARTS = {
|
|||
HELM_CHART_MAGNUM
|
||||
]
|
||||
}
|
||||
|
||||
# RBD Provisioner Ceph backend capabilities fields
|
||||
K8S_RBD_PROV_STORAGECLASS_NAME = 'rbd_storageclass_name' # Customer
|
||||
K8S_RBD_PROV_NAMESPACES = 'rbd_provisioner_namespaces' # Customer
|
||||
K8S_RBD_PROV_NAMESPACES_READY = '.rbd_provisioner_namespaces_ready' # Hidden
|
||||
K8S_RBD_PROV_ADMIN_SECRET_READY = '.k8s_admin_secret_ready' # Hidden
|
||||
K8S_RBD_PROV_CEPH_POOL_KEY_READY = '.k8s_pool_secret_ready' # Hidden
|
||||
|
||||
# RBD Provisioner defaults and constants
|
||||
K8S_RBD_PROV_NAMESPACE_DEFAULT = "kube-system"
|
||||
K8S_RBD_PROV_USER_NAME = 'admin'
|
||||
K8S_RBD_PROV_ADMIN_SECRET_NAME = 'ceph-admin'
|
||||
K8S_RBD_PROV_STOR_CLASS_NAME = 'general'
|
||||
|
|
|
@ -17,6 +17,7 @@ import ast
|
|||
|
||||
from sysinv.common import constants
|
||||
from sysinv.common import exception
|
||||
from sysinv.common import utils as cutils
|
||||
from sysinv.openstack.common.gettextutils import _
|
||||
from sysinv.openstack.common import log
|
||||
|
||||
|
@ -435,3 +436,209 @@ class StorageBackendConfig(object):
|
|||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class K8RbdProvisioner(object):
|
||||
""" Utility methods for getting the k8 overrides for internal ceph
|
||||
from a corresponding storage backend.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def getListFromNamespaces(bk, get_configured=False):
|
||||
cap = bk['capabilities']
|
||||
capab_type = constants.K8S_RBD_PROV_NAMESPACES if not get_configured else \
|
||||
constants.K8S_RBD_PROV_NAMESPACES_READY
|
||||
|
||||
return [] if not cap.get(capab_type) else \
|
||||
cap[capab_type].split(',')
|
||||
|
||||
@staticmethod
|
||||
def setNamespacesFromList(bk, namespace_list, set_configured=False):
|
||||
capab_type = constants.K8S_RBD_PROV_NAMESPACES if not set_configured else \
|
||||
constants.K8S_RBD_PROV_NAMESPACES_READY
|
||||
bk[capab_type] = ','.join(namespace_list)
|
||||
return bk[capab_type]
|
||||
|
||||
@staticmethod
|
||||
def getNamespacesDelta(bk):
|
||||
""" Get changes in namespaces
|
||||
:returns namespaces_to_add, namespaces_to_rm
|
||||
"""
|
||||
namespaces = K8RbdProvisioner.getListFromNamespaces(bk)
|
||||
namespaces_configured = K8RbdProvisioner.getListFromNamespaces(bk, get_configured=True)
|
||||
namespaces_to_add = set(namespaces) - set(namespaces_configured)
|
||||
namespaces_to_rm = set(namespaces_configured) - set(namespaces)
|
||||
return namespaces_to_add, namespaces_to_rm
|
||||
|
||||
@staticmethod
|
||||
def get_storage_class_name(bk):
|
||||
""" Get the name of the storage class for an rbd provisioner
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['capabilities'].get(constants.K8S_RBD_PROV_STORAGECLASS_NAME):
|
||||
name = bk['capabilities'][constants.K8S_RBD_PROV_STORAGECLASS_NAME]
|
||||
elif bk.name == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = constants.K8S_RBD_PROV_STOR_CLASS_NAME
|
||||
else:
|
||||
name = bk.name + '-' + constants.K8S_RBD_PROV_STOR_CLASS_NAME
|
||||
|
||||
return str(name)
|
||||
|
||||
@staticmethod
|
||||
def get_pool(bk):
|
||||
""" Get the name of the ceph pool for an rbd provisioner
|
||||
This naming convention is valid only for internal backends
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
return constants.CEPH_POOL_KUBE_NAME
|
||||
else:
|
||||
return str(constants.CEPH_POOL_KUBE_NAME + '-' + bk['name'])
|
||||
|
||||
@staticmethod
|
||||
def get_user_id(bk):
|
||||
""" Get the non admin user name for an rbd provisioner secret
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of the rbd provisioner
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = K8RbdProvisioner.get_pool(bk)
|
||||
else:
|
||||
name = K8RbdProvisioner.get_pool(bk)
|
||||
|
||||
prefix = 'ceph-pool'
|
||||
return str(prefix + '-' + name)
|
||||
|
||||
@staticmethod
|
||||
def get_user_secret_name(bk):
|
||||
""" Get the name for the non admin secret key of a pool
|
||||
:param bk: Ceph storage backend object
|
||||
:returns: name of k8 secret
|
||||
"""
|
||||
if bk['name'] == constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH]:
|
||||
name = K8RbdProvisioner.get_pool(bk)
|
||||
else:
|
||||
name = K8RbdProvisioner.get_pool(bk)
|
||||
|
||||
base_name = 'ceph-pool'
|
||||
return str(base_name + '-' + name)
|
||||
|
||||
@staticmethod
|
||||
def get_k8s_secret(secret_name, namespace=None):
|
||||
try:
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'get', 'secrets', secret_name]
|
||||
if namespace:
|
||||
cmd.append('--namespace=%s' % namespace)
|
||||
stdout, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
if "not found" in e.stderr.lower():
|
||||
return None
|
||||
raise exception.SysinvException(
|
||||
"Error getting secret: %s in namespace: %s, "
|
||||
"Details: %s" % (secret_name, namespace, str(e)))
|
||||
|
||||
return stdout
|
||||
|
||||
@staticmethod
|
||||
def create_k8s_pool_secret(bk, key=None, namespace=None, force=False):
|
||||
user_secret_name = K8RbdProvisioner.get_user_secret_name(bk)
|
||||
|
||||
if K8RbdProvisioner.get_k8s_secret(user_secret_name,
|
||||
namespace=namespace):
|
||||
if not force:
|
||||
return
|
||||
# Key already exists
|
||||
LOG.warning("K8S Secret for backend: %s and namespace: %s exists and "
|
||||
"should not be present! Removing existing and creating "
|
||||
"a new one." % (bk['name'], namespace))
|
||||
K8RbdProvisioner.remove_k8s_pool_secret(bk, namespace)
|
||||
|
||||
LOG.info("Creating Kubernetes RBD Provisioner Ceph pool secret "
|
||||
"for namespace: %s." % namespace)
|
||||
try:
|
||||
# Create the k8s secret for the given Ceph pool and namespace.
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'create', 'secret', 'generic',
|
||||
user_secret_name,
|
||||
'--type=kubernetes.io/rbd']
|
||||
if key:
|
||||
cmd.append('--from-literal=key=%s' % key)
|
||||
if namespace:
|
||||
cmd.append('--namespace=%s' % namespace)
|
||||
_, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.SysinvException(
|
||||
"Could not create Kubernetes secret: %s for backend: %s, "
|
||||
"namespace: %s, Details: %s." %
|
||||
(user_secret_name, bk['name'], namespace, str(e)))
|
||||
|
||||
@staticmethod
|
||||
def remove_k8s_pool_secret(bk, namespace):
|
||||
user_secret_name = K8RbdProvisioner.get_user_secret_name(bk)
|
||||
if not K8RbdProvisioner.get_k8s_secret(user_secret_name,
|
||||
namespace=namespace):
|
||||
LOG.warning("K8S secret for backend: %s and namespace: %s "
|
||||
"does not exists. Skipping removal." % (bk['name'], namespace))
|
||||
return
|
||||
try:
|
||||
# Remove the k8s secret from given namepsace.
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'delete', 'secret', user_secret_name,
|
||||
'--namespace=%s' % namespace]
|
||||
_, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.SysinvException(
|
||||
"Could not remove Kubernetes secret: %s for backend: %s, "
|
||||
"namespace: %s, Details: %s." %
|
||||
(user_secret_name, bk['name'], namespace, str(e)))
|
||||
|
||||
@staticmethod
|
||||
def create_k8s_admin_secret():
|
||||
admin_secret_name = constants.K8S_RBD_PROV_ADMIN_SECRET_NAME
|
||||
namespace = constants.K8S_RBD_PROV_NAMESPACE_DEFAULT
|
||||
|
||||
if K8RbdProvisioner.get_k8s_secret(
|
||||
admin_secret_name, namespace=namespace):
|
||||
# Key already exists
|
||||
return
|
||||
|
||||
LOG.info("Creating Kubernetes RBD Provisioner Ceph admin secret.")
|
||||
try:
|
||||
# TODO(oponcea): Get admin key on Ceph clusters with
|
||||
# enabled authentication. For now feed an empty key
|
||||
# to satisfy RBD Provisioner requirements.
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'create', 'secret', 'generic',
|
||||
admin_secret_name,
|
||||
'--type=kubernetes.io/rbd',
|
||||
'--from-literal=key=']
|
||||
cmd.append('--namespace=%s' % namespace)
|
||||
_, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.SysinvException(
|
||||
"Could not create Kubernetes secret: %s, namespace: %s,"
|
||||
"Details: %s" % (admin_secret_name, namespace, str(e)))
|
||||
|
||||
@staticmethod
|
||||
def remove_k8s_admin_secret():
|
||||
admin_secret_name = constants.K8S_RBD_PROV_ADMIN_SECRET_NAME
|
||||
namespace = constants.K8S_RBD_PROV_NAMESPACE_DEFAULT
|
||||
|
||||
if not K8RbdProvisioner.get_k8s_secret(
|
||||
admin_secret_name, namespace=namespace):
|
||||
# Secret does not exist.
|
||||
return
|
||||
|
||||
LOG.info("Removing Kubernetes RBD Provisioner Ceph admin secret.")
|
||||
try:
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'delete', 'secret', admin_secret_name,
|
||||
'--namespace=%s' % namespace]
|
||||
_, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.SysinvException(
|
||||
"Could not delete Kubernetes secret: %s, namespace: %s,"
|
||||
"Details: %s." % (admin_secret_name, namespace, str(e)))
|
||||
|
|
|
@ -25,6 +25,9 @@ from sysinv.common import utils as cutils
|
|||
from sysinv.openstack.common import log as logging
|
||||
from sysinv.openstack.common import uuidutils
|
||||
from sysinv.common.storage_backend_conf import StorageBackendConfig
|
||||
from sysinv.common.storage_backend_conf import K8RbdProvisioner
|
||||
|
||||
from sysinv.api.controllers.v1 import utils
|
||||
|
||||
from sysinv.openstack.common.gettextutils import _
|
||||
from sysinv.openstack.common import rpc
|
||||
|
@ -626,7 +629,8 @@ class CephOperator(object):
|
|||
default_quota_map = {'cinder': constants.CEPH_POOL_VOLUMES_QUOTA_GIB,
|
||||
'glance': constants.CEPH_POOL_IMAGES_QUOTA_GIB,
|
||||
'ephemeral': constants.CEPH_POOL_EPHEMERAL_QUOTA_GIB,
|
||||
'object': constants.CEPH_POOL_OBJECT_GATEWAY_QUOTA_GIB}
|
||||
'object': constants.CEPH_POOL_OBJECT_GATEWAY_QUOTA_GIB,
|
||||
'kube': constants.CEPH_POOL_KUBE_QUOTA_GIB}
|
||||
|
||||
storage_ceph = StorageBackendConfig.get_configured_backend_conf(
|
||||
self._db_api,
|
||||
|
@ -634,7 +638,7 @@ class CephOperator(object):
|
|||
)
|
||||
|
||||
quotas = []
|
||||
for p in ['cinder', 'glance', 'ephemeral', 'object']:
|
||||
for p in ['cinder', 'glance', 'ephemeral', 'object', 'kube']:
|
||||
quota_attr = p + '_pool_gib'
|
||||
quota_val = getattr(storage_ceph, quota_attr)
|
||||
|
||||
|
@ -651,10 +655,12 @@ class CephOperator(object):
|
|||
def set_quota_gib(self, pool_name):
|
||||
quota_gib_value = None
|
||||
cinder_pool_gib, glance_pool_gib, ephemeral_pool_gib, \
|
||||
object_pool_gib = self.get_pools_values()
|
||||
object_pool_gib, kube_pool_gib = self.get_pools_values()
|
||||
|
||||
if pool_name.find(constants.CEPH_POOL_VOLUMES_NAME) != -1:
|
||||
quota_gib_value = cinder_pool_gib
|
||||
elif pool_name.find(constants.CEPH_POOL_KUBE_NAME) != -1:
|
||||
quota_gib_value = kube_pool_gib
|
||||
elif pool_name.find(constants.CEPH_POOL_IMAGES_NAME) != -1:
|
||||
quota_gib_value = glance_pool_gib
|
||||
elif pool_name.find(constants.CEPH_POOL_EPHEMERAL_NAME) != -1:
|
||||
|
@ -729,6 +735,25 @@ class CephOperator(object):
|
|||
constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
|
||||
self.delete_osd_pool(constants.CEPH_POOL_OBJECT_GATEWAY_NAME_HAMMER)
|
||||
|
||||
def _configure_pool_key(self, pool_name):
|
||||
"""Get CEPH key for a certain pool."""
|
||||
response, body = ("", "")
|
||||
caps_dict = {'mon': 'allow r',
|
||||
'osd': 'allow rwx pool=%s' % pool_name}
|
||||
entity = "client.%s" % pool_name
|
||||
try:
|
||||
response, body = ("", "")
|
||||
response, body = self._ceph_api.auth_get_or_create(
|
||||
entity, caps_dict, body='json', timeout=10)
|
||||
auth_result = body['output']
|
||||
rc = auth_result[0].get('key')
|
||||
except Exception as e:
|
||||
rc = None
|
||||
LOG.info("CEPH auth exception: %s response: %s body: %s" %
|
||||
(str(e), str(response), str(body)))
|
||||
|
||||
return rc
|
||||
|
||||
def _configure_primary_tier_pool(self, pool, size, min_size):
|
||||
"""Configure the default Ceph tier pools."""
|
||||
|
||||
|
@ -847,6 +872,173 @@ class CephOperator(object):
|
|||
except exception.CephFailure as e:
|
||||
LOG.info("Cannot add pools: %s" % e)
|
||||
|
||||
def _update_k8s_ceph_pool_secrets(self, ceph_backend):
|
||||
"""Create CEPH pool secrets for k8s namespaces.
|
||||
:param ceph_backend input/output storage backend data
|
||||
"""
|
||||
|
||||
pool_name = K8RbdProvisioner.get_pool(ceph_backend)
|
||||
|
||||
namespaces_to_add, namespaces_to_rm = \
|
||||
K8RbdProvisioner.getNamespacesDelta(ceph_backend)
|
||||
|
||||
# Get or create Ceph pool key. One per pool.
|
||||
# This key will be used by the K8S secrets from the rbd-provisioner.
|
||||
if namespaces_to_add:
|
||||
key = self._configure_pool_key(pool_name)
|
||||
|
||||
# Get the capabilities of the backend directly from DB to avoid
|
||||
# committing changes unrelated to ceph pool keys.
|
||||
try:
|
||||
orig_ceph_backend = self._db_api.storage_backend_get(ceph_backend['id'])
|
||||
orig_capab = orig_ceph_backend['capabilities']
|
||||
except exception.InvalidParameterValue:
|
||||
# This is a new backend, not yet stored in DB.
|
||||
orig_ceph_backend = None
|
||||
|
||||
configured_namespaces = \
|
||||
K8RbdProvisioner.getListFromNamespaces(orig_ceph_backend,
|
||||
get_configured=True)
|
||||
|
||||
# Adding secrets to namespaces
|
||||
for namespace in namespaces_to_add:
|
||||
K8RbdProvisioner.create_k8s_pool_secret(
|
||||
ceph_backend, key=key,
|
||||
namespace=namespace, force=(True if not ceph_backend else False))
|
||||
|
||||
# Update the backend's capabilities to reflect that a secret
|
||||
# has been created for the k8s pool in the given namespace.
|
||||
# Update DB for each item to reflect reality in case of error.
|
||||
configured_namespaces.append(namespace)
|
||||
if orig_ceph_backend:
|
||||
orig_capab[constants.K8S_RBD_PROV_NAMESPACES_READY] = \
|
||||
','.join(configured_namespaces)
|
||||
self._db_api.storage_backend_update(ceph_backend['id'],
|
||||
{'capabilities': orig_capab})
|
||||
|
||||
# Removing secrets from namespaces
|
||||
for namespace in namespaces_to_rm:
|
||||
K8RbdProvisioner.remove_k8s_pool_secret(ceph_backend,
|
||||
namespace)
|
||||
configured_namespaces.remove(namespace)
|
||||
if orig_ceph_backend:
|
||||
if configured_namespaces:
|
||||
orig_capab[constants.K8S_RBD_PROV_NAMESPACES_READY] = \
|
||||
','.join(configured_namespaces)
|
||||
elif constants.K8S_RBD_PROV_NAMESPACES_READY in orig_capab:
|
||||
# No RBD Provisioner configured, cleanup
|
||||
del orig_capab[constants.K8S_RBD_PROV_NAMESPACES_READY]
|
||||
self._db_api.storage_backend_update(ceph_backend['id'],
|
||||
{'capabilities': orig_capab})
|
||||
|
||||
# Done, store the updated capabilities in the ceph_backend reference
|
||||
capab = ceph_backend['capabilities']
|
||||
if configured_namespaces:
|
||||
capab[constants.K8S_RBD_PROV_NAMESPACES_READY] = \
|
||||
','.join(configured_namespaces)
|
||||
elif constants.K8S_RBD_PROV_NAMESPACES_READY in capab:
|
||||
# No RBD Provisioner configured, cleanup
|
||||
del capab[constants.K8S_RBD_PROV_NAMESPACES_READY]
|
||||
|
||||
def _update_db_capabilities(self, bk, new_storceph):
|
||||
# Avoid updating DB for all capabilities in new_storceph as we
|
||||
# don't manage them. Leave the callers deal with it.
|
||||
if (not new_storceph or
|
||||
(new_storceph and bk['name'] != new_storceph['name'])):
|
||||
self._db_api.storage_backend_update(
|
||||
bk['id'],
|
||||
{'capabilities': bk['capabilities']}
|
||||
)
|
||||
|
||||
def check_and_update_rbd_provisioner(self, new_storceph=None):
|
||||
""" Check and/or update RBD Provisioner configuration for all Ceph
|
||||
internal backends.
|
||||
|
||||
This function should be called when:
|
||||
1. Making any changes to rbd-provisioner service
|
||||
(adding a new, removing or updating an existing provisioner)
|
||||
2. Synchronizing changes with the DB.
|
||||
|
||||
To speed up synchronization, DB entries are used to determine when
|
||||
changes are needed and only then proceed with more time consuming
|
||||
operations.
|
||||
|
||||
Note: This function assumes a functional Ceph cluster
|
||||
|
||||
:param new_storceph a storage backend object as_dict() with updated
|
||||
data. This is required as database updates can happen later.
|
||||
:returns an updated version of new_storceph or None
|
||||
"""
|
||||
# Get an updated list of backends
|
||||
if new_storceph:
|
||||
ceph_backends = [b.as_dict() for b in
|
||||
self._db_api.storage_backend_get_list()
|
||||
if b['backend'] == constants.SB_TYPE_CEPH and
|
||||
b['name'] != new_storceph['name']]
|
||||
ceph_backends.append(new_storceph)
|
||||
else:
|
||||
ceph_backends = [b.as_dict() for b in
|
||||
self._db_api.storage_backend_get_list()
|
||||
if b['backend'] == constants.SB_TYPE_CEPH]
|
||||
|
||||
# Nothing to do if rbd-provisioner is not configured and was never
|
||||
# configured on any backend.
|
||||
for bk in ceph_backends:
|
||||
svcs = utils.SBApiHelper.getListFromServices(bk)
|
||||
if (constants.SB_SVC_RBD_PROVISIONER in svcs or
|
||||
bk['capabilities'].get(constants.K8S_RBD_PROV_NAMESPACES_READY) or
|
||||
bk['capabilities'].get(constants.K8S_RBD_PROV_ADMIN_SECRET_READY)):
|
||||
break
|
||||
else:
|
||||
return new_storceph
|
||||
|
||||
# In order for an RBD provisioner to work we need:
|
||||
# - A couple of Ceph keys:
|
||||
# 1. A cluster wide admin key (e.g. the one in
|
||||
# /etc/ceph/ceph.client.admin.keyring)
|
||||
# 2. A key for accessing the pool (e.g. client.kube-rbd)
|
||||
# - The Ceph keys above passed into Kubernetes secrets:
|
||||
# 1. An admin secret in the RBD Provisioner POD namespace with the
|
||||
# Ceph cluster wide admin key.
|
||||
# 2. One or more K8S keys with the Ceph pool key for each namespace
|
||||
# we allow RBD PV and PVC creations.
|
||||
|
||||
# Manage Ceph cluster wide admin key and associated secret - we create
|
||||
# it if needed or remove it if no longer needed.
|
||||
admin_secret_exists = False
|
||||
remove_admin_secret = True
|
||||
for bk in ceph_backends:
|
||||
svcs = utils.SBApiHelper.getListFromServices(bk)
|
||||
|
||||
# Create secret
|
||||
# Check to see if we need the admin Ceph key. This key is created
|
||||
# once per cluster and references to it are kept in all Ceph tiers
|
||||
# of that cluster. So make sure they are up to date.
|
||||
if constants.SB_SVC_RBD_PROVISIONER in svcs:
|
||||
remove_admin_secret = False
|
||||
if bk['capabilities'].get(constants.K8S_RBD_PROV_ADMIN_SECRET_READY):
|
||||
admin_secret_exists = True
|
||||
else:
|
||||
if not admin_secret_exists:
|
||||
K8RbdProvisioner.create_k8s_admin_secret()
|
||||
admin_secret_exists = True
|
||||
bk['capabilities'][constants.K8S_RBD_PROV_ADMIN_SECRET_READY] = True
|
||||
self._update_db_capabilities(bk, new_storceph)
|
||||
# Remove admin secret and any references to it if RBD Provisioner is
|
||||
# unconfigured.
|
||||
if remove_admin_secret:
|
||||
K8RbdProvisioner.remove_k8s_admin_secret()
|
||||
for bk in ceph_backends:
|
||||
if bk['capabilities'].get(constants.K8S_RBD_PROV_ADMIN_SECRET_READY):
|
||||
del bk['capabilities'][constants.K8S_RBD_PROV_ADMIN_SECRET_READY]
|
||||
self._update_db_capabilities(bk, new_storceph)
|
||||
|
||||
for bk in ceph_backends:
|
||||
self._update_k8s_ceph_pool_secrets(bk)
|
||||
|
||||
# Return updated new_storceph reference
|
||||
return new_storceph
|
||||
|
||||
def get_osd_tree(self):
|
||||
"""Get OSD tree info
|
||||
return: list of nodes and a list of stray osds e.g.:
|
||||
|
@ -1059,20 +1251,21 @@ class CephOperator(object):
|
|||
osds_raw actual osds
|
||||
|
||||
Primary Tier:
|
||||
Minimum: <= 2 storage applies minimum. (512, 512, 256, 256)
|
||||
Minimum: <= 2 storage applies minimum. (512, 512, 256, 256, 128)
|
||||
Assume max 8 OSD for first pair to set baseline.
|
||||
|
||||
cinder_volumes: 512 * 2
|
||||
ephemeral_vms: 512 * 2
|
||||
glance_images: 256 * 2
|
||||
.rgw.buckets: 256 * 2
|
||||
kube-rbd: 128 * 2
|
||||
rbd: 64 (this is created by Ceph)
|
||||
--------------------
|
||||
Total: 3136
|
||||
Total: 3392
|
||||
|
||||
Note: for a single OSD the value has to be less than 2048, formula:
|
||||
[Total] / [total number of OSD] = [PGs/OSD]
|
||||
3136 / 2 = 1568 < 2048
|
||||
3392 / 2 = 1696 < 2048
|
||||
See constants.CEPH_POOLS for up to date values
|
||||
|
||||
Secondary Tiers:
|
||||
|
@ -1081,13 +1274,14 @@ class CephOperator(object):
|
|||
first pair to set baseline.
|
||||
|
||||
cinder_volumes: 512 * 2
|
||||
kube_rbd: 128 * 2
|
||||
rbd: 64 (this is created by Ceph)
|
||||
--------------------
|
||||
Total: 1088
|
||||
Total: 1344
|
||||
|
||||
Note: for a single OSD the value has to be less than 2048, formula:
|
||||
[Total] / [total number of OSD] = [PGs/OSD]
|
||||
1088 / 2 = 544 < 2048
|
||||
1344 / 2 = 672 < 2048
|
||||
See constants.SB_TIER_CEPH_POOLS for up to date values
|
||||
|
||||
Above 2 Storage hosts: Calculate OSDs based upon pg_calc:
|
||||
|
@ -1095,11 +1289,12 @@ class CephOperator(object):
|
|||
|
||||
Select Target PGs per OSD = 200; to forecast it can double
|
||||
|
||||
Determine number of OSD (in multiples of storage replication factor) on the
|
||||
first host-unlock of storage pair.
|
||||
Determine number of OSD (in multiples of storage replication factor) on
|
||||
the first host-unlock of storage pair.
|
||||
"""
|
||||
# Get configured ceph replication
|
||||
replication, min_replication = StorageBackendConfig.get_ceph_pool_replication(self._db_api)
|
||||
replication, min_replication = \
|
||||
StorageBackendConfig.get_ceph_pool_replication(self._db_api)
|
||||
|
||||
if tiers_obj.uuid == self.primary_tier_uuid:
|
||||
is_primary_tier = True
|
||||
|
@ -1141,7 +1336,7 @@ class CephOperator(object):
|
|||
data_pt = int(pool['data_pt'])
|
||||
break
|
||||
|
||||
if pool['pool_name'] == pool_name:
|
||||
if pool['pool_name'] in pool_name:
|
||||
data_pt = int(pool['data_pt'])
|
||||
break
|
||||
|
||||
|
@ -1269,9 +1464,9 @@ class CephOperator(object):
|
|||
|
||||
try:
|
||||
primary_tier_gib = int(self.get_ceph_primary_tier_size())
|
||||
# In case have only two controllers up, the cluster is considered up,
|
||||
# but the total cluster is reported as zero. For such a case we don't
|
||||
# yet dynamically update the ceph quotas
|
||||
# In case have only two controllers up, the cluster is considered
|
||||
# up, but the total cluster is reported as zero. For such a case we
|
||||
# don't yet dynamically update the ceph quotas
|
||||
if primary_tier_gib == 0:
|
||||
LOG.info("Ceph cluster is up, but no storage nodes detected.")
|
||||
return
|
||||
|
@ -1302,28 +1497,35 @@ class CephOperator(object):
|
|||
|
||||
# Grab the current values
|
||||
cinder_pool_gib = storage_ceph.cinder_pool_gib
|
||||
kube_pool_gib = storage_ceph.kube_pool_gib
|
||||
glance_pool_gib = storage_ceph.glance_pool_gib
|
||||
ephemeral_pool_gib = storage_ceph.ephemeral_pool_gib
|
||||
object_pool_gib = storage_ceph.object_pool_gib
|
||||
|
||||
# Initial cluster provisioning after cluster is up
|
||||
# glance_pool_gib = 20 GiB
|
||||
# kube_pool_gib = 20 Gib
|
||||
# cinder_pool_gib = total_cluster_size - glance_pool_gib
|
||||
# - kube_pool_gib
|
||||
# ephemeral_pool_gib = 0
|
||||
if (upgrade is None and
|
||||
cinder_pool_gib == constants.CEPH_POOL_VOLUMES_QUOTA_GIB and
|
||||
kube_pool_gib == constants.CEPH_POOL_KUBE_QUOTA_GIB and
|
||||
glance_pool_gib == constants.CEPH_POOL_IMAGES_QUOTA_GIB and
|
||||
ephemeral_pool_gib == constants.CEPH_POOL_EPHEMERAL_QUOTA_GIB and
|
||||
object_pool_gib == constants.CEPH_POOL_OBJECT_GATEWAY_QUOTA_GIB):
|
||||
# The minimum development setup requires two storage
|
||||
# nodes each with one 10GB OSD. This result in cluster
|
||||
# nodes each with one 10GB OSD. This results in a cluster
|
||||
# size which is under the default glance pool size of 20GB.
|
||||
# Setting the glance pool to a value lower than 20GB
|
||||
# is a developement safeguard only and should not really
|
||||
# is a development safeguard only and should not really
|
||||
# happen in real-life scenarios.
|
||||
if primary_tier_gib > constants.CEPH_POOL_IMAGES_QUOTA_GIB:
|
||||
if (primary_tier_gib >
|
||||
constants.CEPH_POOL_IMAGES_QUOTA_GIB +
|
||||
constants.CEPH_POOL_KUBE_QUOTA_GIB):
|
||||
cinder_pool_gib = (primary_tier_gib -
|
||||
constants.CEPH_POOL_IMAGES_QUOTA_GIB)
|
||||
constants.CEPH_POOL_IMAGES_QUOTA_GIB -
|
||||
constants.CEPH_POOL_KUBE_QUOTA_GIB)
|
||||
|
||||
self._db_api.storage_ceph_update(storage_ceph.uuid,
|
||||
{'cinder_pool_gib':
|
||||
|
@ -1331,13 +1533,23 @@ class CephOperator(object):
|
|||
self.set_osd_pool_quota(constants.CEPH_POOL_VOLUMES_NAME,
|
||||
cinder_pool_gib * 1024 ** 3)
|
||||
else:
|
||||
glance_pool_gib = primary_tier_gib
|
||||
glance_pool_gib = primary_tier_gib / 2
|
||||
kube_pool_gib = primary_tier_gib - glance_pool_gib
|
||||
|
||||
# Set the quota for the glance pool.
|
||||
self._db_api.storage_ceph_update(storage_ceph.uuid,
|
||||
{'glance_pool_gib':
|
||||
glance_pool_gib})
|
||||
self.set_osd_pool_quota(constants.CEPH_POOL_IMAGES_NAME,
|
||||
glance_pool_gib * 1024 ** 3)
|
||||
|
||||
# Set the quota for the k8s pool.
|
||||
self._db_api.storage_ceph_update(storage_ceph.uuid,
|
||||
{'kube_pool_gib':
|
||||
kube_pool_gib})
|
||||
self.set_osd_pool_quota(constants.CEPH_POOL_KUBE_NAME,
|
||||
kube_pool_gib * 1024 ** 3)
|
||||
|
||||
self.executed_default_quota_check_by_tier[tier_obj.name] = True
|
||||
elif (upgrade is not None and
|
||||
self.check_storage_upgrade_finished(upgrade)):
|
||||
|
@ -1364,6 +1576,7 @@ class CephOperator(object):
|
|||
self.executed_default_quota_check_by_tier[tier_obj.name] = True
|
||||
elif (primary_tier_gib > 0 and
|
||||
primary_tier_gib == (cinder_pool_gib +
|
||||
kube_pool_gib +
|
||||
glance_pool_gib +
|
||||
ephemeral_pool_gib +
|
||||
object_pool_gib)):
|
||||
|
@ -1372,31 +1585,41 @@ class CephOperator(object):
|
|||
self.executed_default_quota_check_by_tier[tier_obj.name] = True
|
||||
|
||||
else:
|
||||
# Secondary tiers: only cinder pool supported.
|
||||
# Grab the current values
|
||||
cinder_pool_gib = storage_ceph.cinder_pool_gib
|
||||
kube_pool_gib = storage_ceph.kube_pool_gib
|
||||
|
||||
# Secondary tiers: only cinder and kube pool supported.
|
||||
tiers_size = self.get_ceph_tiers_size()
|
||||
tier_root = "{0}{1}".format(tier_obj.name,
|
||||
constants.CEPH_CRUSH_TIER_SUFFIX)
|
||||
tier_size_gib = tiers_size.get(tier_root, 0)
|
||||
|
||||
# Take action on individual pools not considering any relationships
|
||||
# between pools
|
||||
tier_pools_sum = 0
|
||||
for pool in constants.SB_TIER_CEPH_POOLS:
|
||||
if (cinder_pool_gib == constants.CEPH_POOL_VOLUMES_QUOTA_GIB and
|
||||
kube_pool_gib == constants.CEPH_POOL_KUBE_QUOTA_GIB):
|
||||
if (tier_size_gib >
|
||||
constants.CEPH_POOL_VOLUMES_QUOTA_GIB +
|
||||
constants.CEPH_POOL_KUBE_QUOTA_GIB):
|
||||
cinder_pool_gib = primary_tier_gib -\
|
||||
constants.CEPH_POOL_KUBE_QUOTA_GIB
|
||||
kube_pool_gib = constants.CEPH_POOL_KUBE_QUOTA_GIB
|
||||
else:
|
||||
kube_pool_gib = tier_size_gib / 2
|
||||
cinder_pool_gib = tier_size_gib - kube_pool_gib
|
||||
|
||||
# Grab the current values
|
||||
current_gib = storage_ceph.get(pool['be_quota_attr'])
|
||||
default_gib = pool['quota_default']
|
||||
tier_pools_sum = kube_pool_gib + cinder_pool_gib
|
||||
|
||||
if not current_gib:
|
||||
self._db_api.storage_ceph_update(storage_ceph.uuid,
|
||||
{pool['be_quota_attr']:
|
||||
default_gib})
|
||||
self._db_api.storage_ceph_update(storage_ceph.uuid,
|
||||
{pool['be_quota_attr']:
|
||||
default_gib * 1024 ** 3})
|
||||
current_gib = default_gib
|
||||
tier_pools_sum += current_gib
|
||||
# Set the quota for the cinder-volumes pool.
|
||||
self._db_api.storage_ceph_update(
|
||||
storage_ceph.uuid, {'cinder_pool_gib': cinder_pool_gib})
|
||||
self.set_osd_pool_quota(
|
||||
constants.CEPH_POOL_VOLUMES_NAME, cinder_pool_gib * 1024 ** 3)
|
||||
|
||||
# Set the quota for the k8s pool.
|
||||
self._db_api.storage_ceph_update(
|
||||
storage_ceph.uuid, {'kube_pool_gib': kube_pool_gib})
|
||||
self.set_osd_pool_quota(
|
||||
constants.CEPH_POOL_KUBE_NAME, kube_pool_gib * 1024 ** 3)
|
||||
|
||||
# Adjust pool quotas based on pool relationships.
|
||||
if tier_size_gib == tier_pools_sum:
|
||||
|
|
|
@ -1510,6 +1510,9 @@ class ConductorManager(service.PeriodicService):
|
|||
# unlocked.
|
||||
self._ceph.configure_osd_pools()
|
||||
|
||||
# Generate CEPH keys for k8s pools.
|
||||
self.check_and_update_rbd_provisioner(context)
|
||||
|
||||
# Generate host configuration files
|
||||
self._puppet.update_host_config(host)
|
||||
else:
|
||||
|
@ -4973,6 +4976,41 @@ class ConductorManager(service.PeriodicService):
|
|||
elif bk.backend in self._stor_bck_op_timeouts:
|
||||
del self._stor_bck_op_timeouts[bk.backend]
|
||||
|
||||
def get_k8s_namespaces(self, context):
|
||||
""" Get Kubernetes namespaces
|
||||
:returns: list of namespaces
|
||||
"""
|
||||
try:
|
||||
cmd = ['kubectl', '--kubeconfig=/etc/kubernetes/admin.conf',
|
||||
'get', 'namespaces', '-o',
|
||||
'go-template=\'{{range .items}}{{.metadata.name}}\'{{end}}\'']
|
||||
stdout, _ = cutils.execute(*cmd, run_as_root=False)
|
||||
namespaces = [n for n in stdout.split("\'") if n]
|
||||
return namespaces
|
||||
except exception.ProcessExecutionError as e:
|
||||
raise exception.SysinvException(
|
||||
_("Error getting Kubernetes list of namespaces, "
|
||||
"Details: %s" % str(e)))
|
||||
|
||||
def check_and_update_rbd_provisioner(self, context, new_storceph=None):
|
||||
""" Check and/or update RBD Provisioner configuration for all Ceph
|
||||
internal backends.
|
||||
|
||||
This function should be called in two cases:
|
||||
1. When making any changes to the rbd-provisioner service.
|
||||
2. When delaying changes due to Ceph not being up.
|
||||
|
||||
To allow delayed executions we check DB entries for changes and only
|
||||
then proceed with time consuming modifications.
|
||||
|
||||
Note: This function assumes a functional Ceph cluster
|
||||
|
||||
:param new_storceph a storage backend object as_dict() with updated
|
||||
data. This is required as database updates can happen later.
|
||||
:returns an updated version of new_storceph or None
|
||||
"""
|
||||
return self._ceph.check_and_update_rbd_provisioner(new_storceph)
|
||||
|
||||
def configure_isystemname(self, context, systemname):
|
||||
"""Configure the systemname with the supplied data.
|
||||
|
||||
|
@ -5045,7 +5083,8 @@ class ConductorManager(service.PeriodicService):
|
|||
self.dbapi, constants.SB_TYPE_LVM):
|
||||
pools = self._openstack.get_cinder_pools()
|
||||
for pool in pools:
|
||||
if getattr(pool, 'volume_backend_name', '') == constants.CINDER_BACKEND_LVM:
|
||||
if (getattr(pool, 'volume_backend_name', '') ==
|
||||
constants.CINDER_BACKEND_LVM):
|
||||
return pool.to_dict()
|
||||
|
||||
return None
|
||||
|
|
|
@ -874,6 +874,35 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
return self.call(context,
|
||||
self.make_msg('update_ceph_services', sb_uuid=sb_uuid))
|
||||
|
||||
def get_k8s_namespaces(self, context):
|
||||
"""Synchronously, get Kubernetes namespaces
|
||||
|
||||
:returns: list of namespacea
|
||||
"""
|
||||
return self.call(context,
|
||||
self.make_msg('get_k8s_namespaces'))
|
||||
|
||||
def check_and_update_rbd_provisioner(self, context, new_storceph=None):
|
||||
""" Check and/or update RBD Provisioner is correctly configured
|
||||
for all Ceph internal backends.
|
||||
|
||||
This function should be called in two cases:
|
||||
1. When making any change to rbd-provisioner
|
||||
2. When delaying changes due to Ceph not being up
|
||||
|
||||
To allow delayed executions we check DB entries for changes and only
|
||||
then proceed with time consuming modifications.
|
||||
|
||||
Note: This function assumes a fully functional Ceph cluster
|
||||
|
||||
:param new_storceph a storage backend object as_dict() with updated
|
||||
data. This is needed as database updates can happen later.
|
||||
:returns an updated version of new_storceph
|
||||
"""
|
||||
return self.call(context,
|
||||
self.make_msg('check_and_update_rbd_provisioner',
|
||||
new_storceph=new_storceph))
|
||||
|
||||
def report_config_status(self, context, iconfig,
|
||||
status, error=None):
|
||||
""" Callback from Sysinv Agent on manifest apply success or failure
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
#
|
||||
# Copyright (c) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
from sqlalchemy import Integer
|
||||
from sqlalchemy import Column, MetaData, Table
|
||||
|
||||
ENGINE = 'InnoDB'
|
||||
CHARSET = 'utf8'
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
i_storconfig = Table('storage_ceph', meta, autoload=True)
|
||||
i_storconfig.create_column(Column('kube_pool_gib', Integer))
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta = MetaData()
|
||||
meta.bind = migrate_engine
|
||||
|
||||
i_storconfig = Table('storage_ceph', meta, autoload=True)
|
||||
i_storconfig.drop_column('kube_pool_gib')
|
|
@ -886,6 +886,7 @@ class StorageCeph(StorageBackend):
|
|||
glance_pool_gib = Column(Integer)
|
||||
ephemeral_pool_gib = Column(Integer)
|
||||
object_pool_gib = Column(Integer)
|
||||
kube_pool_gib = Column(Integer)
|
||||
object_gateway = Column(Boolean, default=False)
|
||||
tier_id = Column(Integer,
|
||||
ForeignKey('storage_tiers.id'))
|
||||
|
|
|
@ -23,6 +23,7 @@ class StorageCeph(storage_backend.StorageBackend):
|
|||
'glance_pool_gib': utils.int_or_none,
|
||||
'ephemeral_pool_gib': utils.int_or_none,
|
||||
'object_pool_gib': utils.int_or_none,
|
||||
'kube_pool_gib': utils.int_or_none,
|
||||
'object_gateway': utils.bool_or_none,
|
||||
'tier_id': utils.int_or_none,
|
||||
'tier_name': utils.str_or_none,
|
||||
|
|
|
@ -40,6 +40,25 @@ test_storage_lvm.HIERA_DATA = {
|
|||
test_storage_ceph.HIERA_DATA = {
|
||||
'backend': ['test_bparam3'],
|
||||
constants.SB_SVC_CINDER: ['test_cparam3'],
|
||||
constants.SB_SVC_RBD_PROVISIONER: ['test_rparam3'],
|
||||
constants.SB_SVC_GLANCE: ['test_gparam3'],
|
||||
constants.SB_SVC_SWIFT: ['test_sparam1'],
|
||||
constants.SB_SVC_NOVA: ['test_nparam1'],
|
||||
}
|
||||
|
||||
test_storage_ceph.CAPABILITIES = {
|
||||
'backend': ['test_bparam3'],
|
||||
constants.SB_SVC_CINDER: ['test_cparam3'],
|
||||
constants.SB_SVC_RBD_PROVISIONER: ['test_rparam3'],
|
||||
constants.SB_SVC_GLANCE: ['test_gparam3'],
|
||||
constants.SB_SVC_SWIFT: ['test_sparam1'],
|
||||
constants.SB_SVC_NOVA: ['test_nparam1'],
|
||||
}
|
||||
|
||||
test_storage_ceph.MANDATORY_CAP = {
|
||||
'backend': ['test_bparam3'],
|
||||
constants.SB_SVC_CINDER: ['test_cparam3'],
|
||||
constants.SB_SVC_RBD_PROVISIONER: ['test_rparam3'],
|
||||
constants.SB_SVC_GLANCE: ['test_gparam3'],
|
||||
constants.SB_SVC_SWIFT: ['test_sparam1'],
|
||||
constants.SB_SVC_NOVA: ['test_nparam1'],
|
||||
|
@ -578,7 +597,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
response.json['error_message'])
|
||||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
def test_post_ceph_with_valid_svc_no_svc_param_and_confirm(self, mock_apply, mock_validate, mock_mon_ip):
|
||||
# Test skipped. Fix later.
|
||||
|
@ -597,7 +616,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
response.json['error_message'])
|
||||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
def test_post_ceph_with_valid_svc_some_svc_param_and_confirm(self, mock_apply, mock_validate, mock_mon_ip):
|
||||
# Test skipped. Fix later.
|
||||
|
@ -618,7 +637,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch.object(StorageBackendConfig, 'set_img_conversions_defaults')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
def test_post_ceph_with_valid_svc_all_svc_param_and_confirm(self, mock_apply, mock_validate, mock_img_conv, mock_mon_ip):
|
||||
vals = {
|
||||
|
@ -662,7 +681,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch.object(StorageBackendConfig, 'set_img_conversions_defaults')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
@mock.patch.object(SBApiHelper, 'set_backend_data',
|
||||
side_effect=set_backend_state_configured)
|
||||
|
@ -691,7 +710,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch.object(StorageBackendConfig, 'set_img_conversions_defaults')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
@mock.patch.object(SBApiHelper, 'set_backend_data',
|
||||
side_effect=set_backend_state_configured)
|
||||
|
@ -721,7 +740,7 @@ class StorageBackendTestCases(base.FunctionalTest):
|
|||
|
||||
@mock.patch.object(StorageBackendConfig, 'get_ceph_mon_ip_addresses')
|
||||
@mock.patch.object(StorageBackendConfig, 'set_img_conversions_defaults')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_hiera_data')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._discover_and_validate_cinder_capabilities')
|
||||
@mock.patch('sysinv.api.controllers.v1.storage_ceph._apply_backend_changes')
|
||||
@mock.patch.object(SBApiHelper, 'set_backend_data',
|
||||
side_effect=set_backend_state_configured)
|
||||
|
|
Loading…
Reference in New Issue