Add support for external Ceph

This commit introduces a new storage backend "ceph-external" to support
external ceph.
- It provide the capability to provision an external Ceph cluster as
  backend for Cinder, Glance, Nova
- The connectivity to the 3rd party Ceph cluster is ensured through
  importing a Ceph configuration file
- Any combination of other backends and the external Ceph backend is
  supported with limitations
- Create /opt/extenstion/ceph directory in drbd.pp
- When instance-backing is "remote" on a compute node, if the ephemeral
  ceph pool is moved from one ceph backend to another, a config
  out-of-date alarm should be generated for that compute node to signal
  that the compute node needs to be locked and unlocked in order for
  nova-compute to be restarted on that compute node.
- For adding an external ceph backend, it is done by two POST requests:
  one to upload the ceph config file and one to add the backend. For
  modifying an existing external ceph backend, it is done by one POST
  request to upload the ceph config file and one PATCH request to modify
  the backend.

Story: 2002820
Task: 22737

Change-Id: Ie504ffae9f4895a67502ecfa3f0fbf267bb65e99
Signed-off-by: Jack Ding <jack.ding@windriver.com>
This commit is contained in:
Wei Zhou 2018-06-09 22:26:35 -04:00 committed by Jack Ding
parent ef5b96ac17
commit f52a35d6bc
35 changed files with 1583 additions and 135 deletions

View File

@ -224,6 +224,16 @@ start()
fatal_error "Unable to mount $PLATFORM_DIR (RC:$RC)"
fi
fi
# Copy over external_ceph config files
if [ -e $CONFIG_DIR/ceph-config ]
then
cp $CONFIG_DIR/ceph-config/*.conf /etc/ceph/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy ceph-external config files"
fi
fi
fi
if [ "$nodetype" = "compute" ]

View File

@ -271,6 +271,16 @@ start()
fi
fi
# Copy over external_ceph config files
if [ -e $CONFIG_DIR/ceph-config ]
then
cp $CONFIG_DIR/ceph-config/*.conf /etc/ceph/
if [ $? -ne 0 ]
then
fatal_error "Unable to copy ceph-external config files"
fi
fi
# Keep the /opt/branding directory to preserve any new files and explicitly copy over any required files
if [ -e $CONFIG_DIR/branding/horizon-region-exclusions.csv ]
then

View File

@ -19,6 +19,7 @@ class openstack::cinder::params (
$cinder_vg_name = 'cinder-volumes',
$drbd_resource = 'drbd-cinder',
$iscsi_ip_address = undef,
$is_ceph_external = false,
# Flag files
$initial_cinder_config_flag = "${::platform::params::config_path}/.initial_cinder_config_complete",
$initial_cinder_lvm_config_flag = "${::platform::params::config_path}/.initial_cinder_lvm_config_complete",
@ -67,7 +68,7 @@ class openstack::cinder::params (
$is_node_cinder_lvm = false
}
if 'ceph' in $enabled_backends {
if 'ceph' in $enabled_backends or $is_ceph_external {
# Check if this is the first time we ever configure Ceph on this system
if str2bool($::is_controller_active) and str2bool($::is_initial_cinder_ceph_config) {
$is_initial_cinder_ceph = true
@ -260,7 +261,7 @@ class openstack::cinder::backends
include ::openstack::cinder::lvm
}
if 'ceph' in $enabled_backends {
if 'ceph' in $enabled_backends or $is_ceph_external {
include ::openstack::cinder::backends::ceph
}
@ -462,7 +463,8 @@ define openstack::cinder::backend::ceph(
$backend_enabled = false,
$backend_name,
$rbd_user = 'cinder',
$rbd_pool
$rbd_pool,
$rbd_ceph_conf = '/etc/ceph/ceph.conf'
) {
if $backend_enabled {
@ -470,6 +472,7 @@ define openstack::cinder::backend::ceph(
backend_host => '$host',
rbd_pool => $rbd_pool,
rbd_user => $rbd_user,
rbd_ceph_conf => $rbd_ceph_conf,
}
} else {
cinder_config {
@ -728,8 +731,9 @@ class openstack::cinder::post
class openstack::cinder::reload {
platform::sm::restart {'cinder-volume': }
platform::sm::restart {'cinder-scheduler': }
platform::sm::restart {'cinder-volume': }
platform::sm::restart {'cinder-backup': }
platform::sm::restart {'cinder-api': }
}

View File

@ -11,6 +11,8 @@ class openstack::glance::params (
$configured_registry_host = '0.0.0.0',
$glance_cached = false,
$glance_delete_interval = 6,
$rbd_store_pool = 'images',
$rbd_store_ceph_conf = '/etc/ceph/ceph.conf',
) { }
@ -105,10 +107,6 @@ class openstack::glance
if 'file' in $enabled_backends {
include ::glance::backend::file
}
if 'rbd' in $enabled_backends {
include ::glance::backend::rbd
}
}
}
@ -172,6 +170,13 @@ class openstack::glance::api
show_image_direct_url => $show_image_direct_url,
}
if 'rbd' in $enabled_backends {
class { '::glance::backend::rbd':
rbd_store_pool => $rbd_store_pool,
rbd_store_ceph_conf => $rbd_store_ceph_conf,
}
}
include ::openstack::glance::firewall
include ::openstack::glance::haproxy
}

View File

@ -392,6 +392,8 @@ class openstack::nova::storage (
$instance_backing = 'image',
$instances_lv_size = 0,
$concurrent_disk_operations = 2,
$images_rbd_pool = 'ephemeral',
$images_rbd_ceph_conf = '/etc/ceph/ceph.conf'
) {
$adding_pvs_str = join($adding_pvs," ")
$removing_pvs_str = join($removing_pvs," ")
@ -403,26 +405,29 @@ class openstack::nova::storage (
'image': {
$images_type = 'default'
$images_volume_group = absent
$images_rbd_pool = absent
$round_to_extent = false
$local_monitor_state = 'disabled'
$instances_lv_size_real = 'max'
$images_rbd_pool_real = absent
$images_rbd_ceph_conf_real = absent
}
'lvm': {
$images_type = 'lvm'
$images_volume_group = 'nova-local'
$images_rbd_pool = absent
$round_to_extent = true
$local_monitor_state = 'enabled'
$instances_lv_size_real = $instances_lv_size
$images_rbd_pool_real = absent
$images_rbd_ceph_conf_real = absent
}
'remote': {
$images_type = 'rbd'
$images_volume_group = absent
$images_rbd_pool = 'ephemeral'
$round_to_extent = false
$local_monitor_state = 'disabled'
$instances_lv_size_real = 'max'
$images_rbd_pool_real = $images_rbd_pool
$images_rbd_ceph_conf_real = $images_rbd_ceph_conf
}
default: {
fail("Unsupported instance backing: ${instance_backing}")
@ -444,7 +449,8 @@ class openstack::nova::storage (
nova_config {
"libvirt/images_type": value => $images_type;
"libvirt/images_volume_group": value => $images_volume_group;
"libvirt/images_rbd_pool": value => $images_rbd_pool;
"libvirt/images_rbd_pool": value => $images_rbd_pool_real;
"libvirt/images_rbd_ceph_conf": value => $images_rbd_ceph_conf_real;
} ->
exec { 'umount /etc/nova/instances':
command => 'umount /etc/nova/instances; true',

View File

@ -236,7 +236,6 @@ class platform::ceph::haproxy
}
}
class platform::ceph::rgw
inherits ::platform::ceph::params {

View File

@ -259,6 +259,13 @@ class platform::drbd::extension (
mountpoint => $mountpoint,
resync_after => $resync_after,
}
file { "${mountpoint}/ceph":
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0775',
}
}
class platform::drbd::patch_vault::params (

View File

@ -32,9 +32,11 @@ CONTROLLER_1_HOSTNAME = '%s-1' % CONTROLLER_HOSTNAME
SB_TYPE_FILE = 'file'
SB_TYPE_LVM = 'lvm'
SB_TYPE_CEPH = 'ceph'
SB_TYPE_CEPH_EXTERNAL = 'ceph-external'
SB_TYPE_EXTERNAL = 'external'
SB_SUPPORTED = [SB_TYPE_FILE, SB_TYPE_LVM, SB_TYPE_CEPH, SB_TYPE_EXTERNAL]
SB_SUPPORTED = [SB_TYPE_FILE, SB_TYPE_LVM, SB_TYPE_CEPH, SB_TYPE_CEPH_EXTERNAL,
SB_TYPE_EXTERNAL]
# Storage backend state
SB_STATE_CONFIGURED = 'configured'
SB_STATE_CONFIGURING = 'configuring'

View File

@ -68,6 +68,7 @@ from cgtsclient.v1 import sm_service_nodes
from cgtsclient.v1 import sm_servicegroup
from cgtsclient.v1 import storage_backend
from cgtsclient.v1 import storage_ceph
from cgtsclient.v1 import storage_ceph_external
from cgtsclient.v1 import storage_external
from cgtsclient.v1 import storage_file
from cgtsclient.v1 import storage_lvm
@ -145,3 +146,5 @@ class Client(http.HTTPClient):
self.license = license.LicenseManager(self)
self.certificate = certificate.CertificateManager(self)
self.storage_tier = storage_tier.StorageTierManager(self)
self.storage_ceph_external = \
storage_ceph_external.StorageCephExternalManager(self)

View File

@ -172,7 +172,7 @@ def do_host_lvg_delete(cc, args):
metavar='<hostname or id>',
help="Name or ID of the host [REQUIRED]")
@utils.arg('lvgnameoruuid',
metavar='<lvm name or uuid>',
metavar='<lvg name or uuid>',
help="Name or UUID of lvg [REQUIRED]")
@utils.arg('-b', '--instance_backing',
metavar='<instance backing>',

View File

@ -13,6 +13,7 @@ from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ceph_mon as ceph_mon_utils
from cgtsclient.v1 import storage_ceph # noqa
from cgtsclient.v1 import storage_ceph_external # noqa
from cgtsclient.v1 import storage_external # noqa
from cgtsclient.v1 import storage_file # noqa
from cgtsclient.v1 import storage_lvm # noqa
@ -104,9 +105,10 @@ def backend_show(cc, backend_name_or_uuid):
raise exc.CommandError("Backend %s is not found."
% backend_name_or_uuid)
backend_client = getattr(cc, 'storage_' + db_backend.backend)
backend_type = db_backend.backend.replace('-', '_')
backend_client = getattr(cc, 'storage_' + backend_type)
backend_obj = backend_client.get(db_backend.uuid)
extra_fields = getattr(eval('storage_' + db_backend.backend),
extra_fields = getattr(eval('storage_' + backend_type),
'DISPLAY_ATTRIBUTES')
_show_backend(backend_obj, extra_fields)
@ -120,13 +122,14 @@ def _display_next_steps():
def backend_add(cc, backend, args):
backend = backend.replace('-', '_')
# add ceph mons to controllers
if backend == constants.SB_TYPE_CEPH:
ceph_mon_utils.ceph_mon_add(cc, args)
# allowed storage_backend fields
allowed_fields = ['name', 'services', 'confirmed']
allowed_fields = ['name', 'services', 'confirmed', 'ceph_conf']
# allowed backend specific backends
if backend in constants.SB_SUPPORTED:
@ -158,7 +161,6 @@ def backend_add(cc, backend, args):
# BACKEND MODIFY
def backend_modify(cc, args):
db_backends = cc.storage_backend.list()
backend_entry = next(
(b for b in db_backends
@ -170,7 +172,7 @@ def backend_modify(cc, args):
% args.backend_name_or_uuid)
# filter out arg noise: Only relevant fields
allowed_fields = ['services']
allowed_fields = ['services', 'ceph_conf']
# filter the args.passed to backend creation
fields = dict((k, v) for (k, v) in vars(args).items()
@ -183,8 +185,9 @@ def backend_modify(cc, args):
# non-capability, backend specific attributes
backend = backend_entry.backend
if backend in constants.SB_SUPPORTED:
backend_attrs = getattr(eval('storage_' + backend),
backend_attrs = getattr(eval('storage_' + backend.replace("-", "_")),
'PATCH_ATTRIBUTES')
allowed_fields += backend_attrs
for k, v in attr_dict.iteritems():
@ -205,8 +208,9 @@ def backend_modify(cc, args):
'op': 'replace'})
try:
backend_client = getattr(cc, 'storage_' + backend)
backend_client = getattr(cc, 'storage_' + backend.replace("-", "_"))
backend_entry = backend_client.update(backend_entry.uuid, patch)
except exc.HTTPNotFound:
raise exc.CommandError('Storage %s not found: %s'
% (backend,

View File

@ -60,7 +60,7 @@ def do_storage_backend_show(cc, args):
@utils.arg('backend',
metavar='<backend>',
choices=['ceph', 'file', 'lvm', 'external'],
choices=['ceph', 'ceph-external', 'file', 'lvm', 'external'],
help='The storage backend to add [REQUIRED]')
@utils.arg('-s', '--services',
metavar='<services>',
@ -73,6 +73,10 @@ def do_storage_backend_show(cc, args):
metavar='<tier_uuid>',
help=('Optional storage tier uuid for additional backends (ceph '
'only)'))
@utils.arg('-c', '--ceph_conf',
metavar='<ceph_conf>',
help='Location of the Ceph configuration file used for provisioning'
' an external backend.')
@utils.arg('--confirmed',
action='store_true',
help='Provide acknowledgement that the operation should continue as'
@ -82,8 +86,8 @@ def do_storage_backend_show(cc, args):
nargs='*',
default=[],
help="Required backend/service parameters to apply.")
# Parameters specific to Ceph monitors, these should be moved to system ceph-mon-add
# when that command is available
# Parameters specific to Ceph monitors, these should be moved to system
# ceph-mon-add when that command is available.
@utils.arg('--ceph-mon-gib',
metavar='<ceph-mon-gib>',
help='The ceph-mon-lv size in GiB')
@ -102,6 +106,10 @@ def do_storage_backend_add(cc, args):
metavar='<services>',
help=('Optional string of comma separated services to add/update. '
'Valid values are: "cinder, glance, swift"'))
@utils.arg('-c', '--ceph_conf',
metavar='<ceph_conf>',
help=('Location of the Ceph configuration file used for provisioning'
' an external backend.'))
@utils.arg('attributes',
metavar='<parameter=value>',
nargs='*',

View File

@ -0,0 +1,93 @@
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# -*- encoding: utf-8 -*-
#
from cgtsclient.common import base
from cgtsclient import exc
import os
CREATION_ATTRIBUTES = ['confirmed', 'name', 'services', 'capabilities',
'ceph_conf']
DISPLAY_ATTRIBUTES = ['ceph_conf']
PATCH_ATTRIBUTES = ['ceph_conf']
class StorageCephExternal(base.Resource):
def __repr__(self):
return "<storage_ceph_external %s>" % self._info
class StorageCephExternalManager(base.Manager):
resource_class = StorageCephExternal
@staticmethod
def _path(id=None):
return ('/v1/storage_ceph_external/%s' % id
if id else '/v1/storage_ceph_external')
def list(self):
return self._list(self._path(), "storage_ceph_external")
def get(self, stor_ceph_external_id=None):
try:
if stor_ceph_external_id:
return self._list(self._path(stor_ceph_external_id))[0]
else:
return self._list(self._path(), "storage_ceph_external")[0]
except IndexError:
return None
def upload_file(self, parm):
# Upload the ceph config file
ceph_conf_file = parm.get('ceph_conf', None)
if not ceph_conf_file:
raise exc.CommandError('A Ceph configuration file must be provided '
'for provisioning an external Ceph backend.')
try:
c_c_f = open(ceph_conf_file, 'rb')
except Exception:
raise exc.CommandError("Error: Could not open file %s." %
ceph_conf_file)
data = {"ceph_conf_fn": os.path.basename(ceph_conf_file)}
try:
resp = self._upload(self._path("ceph_conf_upload"), c_c_f, data=data)
error = resp.get('error')
if error:
raise exc.CommandError("%s" % error)
except exc.HTTPNotFound:
raise exc.CommandError('Cannot upload ceph config file. No response.')
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute('%s' % key)
self.upload_file(new)
ceph_conf_file = new.get('ceph_conf', None)
new.update({'ceph_conf': os.path.basename(ceph_conf_file)})
return self._create(self._path(), new)
def delete(self, stor_ceph_external_id):
return self._delete(self._path(stor_ceph_external_id))
def update(self, stor_ceph_external_id, patch):
ceph_config_filename = None
for item in patch:
if item.get('path') == '/ceph_conf':
ceph_config_filename = item.get('value')
item['value'] = os.path.basename(ceph_config_filename)
break
if ceph_config_filename:
self.upload_file({'ceph_conf': ceph_config_filename})
return self._update(self._path(stor_ceph_external_id), patch)

View File

@ -73,6 +73,7 @@ from sysinv.api.controllers.v1 import storage_lvm
from sysinv.api.controllers.v1 import storage_file
from sysinv.api.controllers.v1 import storage_external
from sysinv.api.controllers.v1 import storage_tier
from sysinv.api.controllers.v1 import storage_ceph_external
from sysinv.api.controllers.v1 import system
from sysinv.api.controllers.v1 import trapdest
from sysinv.api.controllers.v1 import upgrade
@ -168,6 +169,9 @@ class V1(base.APIBase):
storage_tier = [link.Link]
"Links to the storage tier resource"
storage_ceph_external = [link.Link]
"Links to the storage exteral ceph resource"
ceph_mon = [link.Link]
"Links to the ceph mon resource"
@ -459,6 +463,16 @@ class V1(base.APIBase):
bookmark=True)
]
v1.storage_ceph_external = [
link.Link.make_link('self',
pecan.request.host_url,
'storage_ceph_external', ''),
link.Link.make_link('bookmark',
pecan.request.host_url,
'storage_ceph_external', '',
bookmark=True)
]
v1.ceph_mon = [link.Link.make_link('self',
pecan.request.host_url,
'ceph_mon', ''),
@ -733,6 +747,8 @@ class Controller(rest.RestController):
storage_external = storage_external.StorageExternalController()
storage_ceph = storage_ceph.StorageCephController()
storage_tiers = storage_tier.StorageTierController()
storage_ceph_external = \
storage_ceph_external.StorageCephExternalController()
ceph_mon = ceph_mon.CephMonController()
drbdconfig = drbdconfig.drbdconfigsController()
ialarms = alarm.AlarmController()

View File

@ -660,19 +660,28 @@ def _check(op, lvg):
float(allowed_min_mib) / 1024,
float(allowed_max_mib) / 1024)))
# remote instance backing only available for ceph only cinder
# backend. for Titanium Cloud that is initially configured as
# lvm backend ephemeral storage on lvm backend
# Instances backed by remote ephemeral storage can only be
# used on systems that have a Ceph (internal or external)
# backend.
if ((lvg_caps.get(constants.LVG_NOVA_PARAM_BACKING) ==
constants.LVG_NOVA_BACKING_REMOTE) and
not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.CINDER_BACKEND_CEPH,
pecan.request.rpcapi)):
constants.SB_TYPE_CEPH,
service=constants.SB_SVC_NOVA,
check_only_defaults=False,
rpcapi=pecan.request.rpcapi) and
not StorageBackendConfig.has_backend_configured(
pecan.request.dbapi,
constants.SB_TYPE_CEPH_EXTERNAL,
service=constants.SB_SVC_NOVA,
check_only_defaults=False,
rpcapi=pecan.request.rpcapi)):
raise wsme.exc.ClientSideError(
_('Invalid value for instance_backing. Instances '
'backed by remote ephemeral storage can only be '
'used on systems that have a Ceph Cinder backend.'))
'used on systems that have a Ceph (internal or '
'external) backend.'))
if (lvg['lvm_cur_lv'] > 1):
raise wsme.exc.ClientSideError(

View File

@ -46,10 +46,10 @@ from sysinv.openstack.common.gettextutils import _
from sysinv.openstack.common import uuidutils
from oslo_serialization import jsonutils
from sysinv.api.controllers.v1 import storage_ceph # noqa
from sysinv.api.controllers.v1 import storage_lvm # noqa
from sysinv.api.controllers.v1 import storage_file # noqa
from sysinv.api.controllers.v1 import storage_ceph # noqa
from sysinv.api.controllers.v1 import storage_lvm # noqa
from sysinv.api.controllers.v1 import storage_file # noqa
from sysinv.api.controllers.v1 import storage_ceph_external # noqa
LOG = log.getLogger(__name__)
@ -482,8 +482,8 @@ class StorageBackendController(rest.RestController):
# update
return _patch(storage_backend_uuid, patch)
rpc_storage_backend = objects.storage_backend.get_by_uuid(pecan.request.context,
storage_backend_uuid)
rpc_storage_backend = objects.storage_backend.get_by_uuid(
pecan.request.context, storage_backend_uuid)
# action = None
for p in patch:
# if '/action' in p['path']:
@ -539,8 +539,10 @@ class StorageBackendController(rest.RestController):
#
def _create(storage_backend):
# Get and call the specific backend create function based on the backend provided
backend_create = getattr(eval('storage_' + storage_backend['backend']), '_create')
# Get and call the specific backend create function based on the backend
# provided.
backend_create = getattr(eval('storage_' + storage_backend['backend']),
'_create')
new_backend = backend_create(storage_backend)
return new_backend
@ -551,9 +553,11 @@ def _create(storage_backend):
#
def _patch(storage_backend_uuid, patch):
rpc_storage_backend = objects.storage_backend.get_by_uuid(pecan.request.context,
storage_backend_uuid)
rpc_storage_backend = objects.storage_backend.get_by_uuid(
pecan.request.context, storage_backend_uuid)
# Get and call the specific backend patching function based on the backend provided
backend_patch = getattr(eval('storage_' + rpc_storage_backend.backend), '_patch')
# Get and call the specific backend patching function based on the backend
# provided.
backend_patch = getattr(eval('storage_' + rpc_storage_backend.backend),
'_patch')
return backend_patch(storage_backend_uuid, patch)

View File

@ -58,6 +58,7 @@ HIERA_DATA = {
constants.SB_SVC_CINDER: [],
constants.SB_SVC_GLANCE: [],
constants.SB_SVC_SWIFT: [],
constants.SB_SVC_NOVA: [],
}
@ -431,6 +432,11 @@ def _discover_and_validate_swift_hiera_data(caps_dict):
pass
def _discover_and_validate_nova_hiera_data(caps_dict):
# Currently there is no backend specific hiera_data for this backend
pass
def _check_backend_ceph(req, storage_ceph, confirmed=False):
# check for the backend parameters
capabilities = storage_ceph.get('capabilities', {})
@ -531,6 +537,31 @@ def _check_backend_ceph(req, storage_ceph, confirmed=False):
constants.SB_TYPE_CEPH))
def check_and_update_services(storage_ceph):
req_services = api_helper.getListFromServices(storage_ceph)
## If glance/nova is already a service on an external ceph backend, remove it from there
check_svcs = [constants.SB_SVC_GLANCE, constants.SB_SVC_NOVA]
check_data = {constants.SB_SVC_GLANCE: ['glance_pool'],
constants.SB_SVC_NOVA: ['ephemeral_pool']}
for s in check_svcs:
if s in req_services:
sb_list = pecan.request.dbapi.storage_backend_get_list()
if sb_list:
for sb in sb_list:
if (sb.backend == constants.SB_TYPE_CEPH_EXTERNAL and
s in sb.get('services')):
services = api_helper.getListFromServices(sb)
services.remove(s)
cap = sb.capabilities
for k in check_data[s]:
cap.pop(k, None)
values = {'services': ','.join(services),
'capabilities': cap,}
pecan.request.dbapi.storage_backend_update(sb.uuid, values)
def _apply_backend_changes(op, sb_obj):
services = api_helper.getListFromServices(sb_obj.as_dict())
# Make sure img_conversion partition is present
@ -633,6 +664,8 @@ def _create(storage_ceph):
storage_ceph,
storage_ceph.pop('confirmed', False))
check_and_update_services(storage_ceph)
# Conditionally update the DB based on any previous create attempts. This
# creates the StorageCeph object.
system = pecan.request.dbapi.isystem_get_one()
@ -903,6 +936,7 @@ def _patch(storceph_uuid, patch):
storceph_uuid)
object_gateway_install = False
add_nova_only = False
patch_obj = jsonpatch.JsonPatch(patch)
for p in patch_obj:
if p['path'] == '/capabilities':
@ -975,6 +1009,11 @@ def _patch(storceph_uuid, patch):
storceph_config.object_gateway = True
storceph_config.task = constants.SB_TASK_ADD_OBJECT_GATEWAY
object_gateway_install = True
if ((set(api_helper.getListFromServices(storceph_config.as_dict())) -
set(api_helper.getListFromServices(ostorceph.as_dict())) ==
set([constants.SB_SVC_NOVA])) and
(delta == set(['services']))):
add_nova_only = True
elif d == 'capabilities':
# Go through capabilities parameters and check
# if any values changed
@ -1065,10 +1104,12 @@ def _patch(storceph_uuid, patch):
LOG.info("SYS_I new storage_ceph: %s " % rpc_storceph.as_dict())
try:
check_and_update_services(rpc_storceph.as_dict())
rpc_storceph.save()
if (not quota_only_update or
storceph_config.state == constants.SB_STATE_CONFIG_ERR):
if ((not quota_only_update and not add_nova_only) or
(storceph_config.state == constants.SB_STATE_CONFIG_ERR)):
# Enable the backend changes:
_apply_backend_changes(constants.SB_API_OP_MODIFY,
rpc_storceph)

View File

@ -0,0 +1,589 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2017 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import jsonpatch
import copy
import os
from oslo_serialization import jsonutils
import pecan
from pecan import rest
from pecan import expose
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import link
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.api.controllers.v1.utils import SBApiHelper as api_helper
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
from sysinv.openstack.common import log
from sysinv.openstack.common import uuidutils
from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
HIERA_DATA = {
'backend': [],
constants.SB_SVC_CINDER: ['cinder_pool'],
constants.SB_SVC_GLANCE: ['glance_pool'],
constants.SB_SVC_NOVA: ['ephemeral_pool']
}
class StorageCephExternalPatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class StorageCephExternal(base.APIBase):
"""API representation of an external ceph storage.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of
an external ceph storage.
"""
uuid = types.uuid
"Unique UUID for this external storage backend."
links = [link.Link]
"A list containing a self link and associated storage backend links."
created_at = wtypes.datetime.datetime
updated_at = wtypes.datetime.datetime
ceph_conf = wtypes.text
"Path to the configuration file for the external ceph cluster."
# Inherited attributes from the base class
backend = wtypes.text
"Represents the storage backend (file, lvm, ceph, ceph_external or external)."
name = wtypes.text
"The name of the backend (to differentiate between multiple common backends)."
state = wtypes.text
"The state of the backend. It can be configured or configuring."
task = wtypes.text
"Current task of the corresponding cinder backend."
services = wtypes.text
"The openstack services that are supported by this storage backend."
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
six.integer_types)}
"Meta data for the storage backend"
# Confirmation parameter [API-only field]
confirmed = types.boolean
"Represent confirmation that the backend operation should proceed"
def __init__(self, **kwargs):
defaults = {'uuid': uuidutils.generate_uuid(),
'state': constants.SB_STATE_CONFIGURING,
'task': constants.SB_TASK_NONE,
'capabilities': {},
'services': None,
'confirmed': False,
'ceph_conf': None}
self.fields = objects.storage_ceph_external.fields.keys()
# 'confirmed' is not part of objects.storage_backend.fields
# (it's an API-only attribute)
self.fields.append('confirmed')
# Set the value for any of the field
for k in self.fields:
setattr(self, k, kwargs.get(k, defaults.get(k)))
@classmethod
def convert_with_links(cls, rpc_storage_ceph_external, expand=True):
stor_ceph_external = StorageCephExternal(**rpc_storage_ceph_external.as_dict())
if not expand:
stor_ceph_external.unset_fields_except(['uuid',
'created_at',
'updated_at',
'isystem_uuid',
'backend',
'name',
'state',
'task',
'services',
'capabilities',
'ceph_conf'])
stor_ceph_external.links =\
[link.Link.make_link('self', pecan.request.host_url,
'storage_ceph_external',
stor_ceph_external.uuid),
link.Link.make_link('bookmark', pecan.request.host_url,
'storage_ceph_external',
stor_ceph_external.uuid,
bookmark=True)]
return stor_ceph_external
class StorageCephExternalCollection(collection.Collection):
"""API representation of a collection of external ceph storage backends."""
storage_ceph_external = [StorageCephExternal]
"A list containing ceph external storage backend objects."
def __init__(self, **kwargs):
self._type = 'storage_ceph_external'
@classmethod
def convert_with_links(cls, rpc_storage_ceph_external, limit, url=None,
expand=False, **kwargs):
collection = StorageCephExternalCollection()
collection.storage_ceph_external = \
[StorageCephExternal.convert_with_links(p, expand)
for p in rpc_storage_ceph_external]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'StorageCephExternalController'
class StorageCephExternalController(rest.RestController):
"""REST controller for ceph external storage backend."""
_custom_actions = {
'detail': ['GET'],
'ceph_conf_upload': ['POST']
}
def _get_storage_ceph_external_collection(
self, marker, limit, sort_key, sort_dir, expand=False,
resource_url=None):
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.storage_ceph_external.get_by_uuid(
pecan.request.context,
marker)
ceph_external_storage_backends = \
pecan.request.dbapi.storage_ceph_external_get_list(
limit, marker_obj, sort_key=sort_key, sort_dir=sort_dir)
return StorageCephExternalCollection \
.convert_with_links(ceph_external_storage_backends,
limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
@wsme_pecan.wsexpose(StorageCephExternalCollection, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of ceph external storage backends."""
return self._get_storage_ceph_external_collection(
marker, limit, sort_key, sort_dir)
@wsme_pecan.wsexpose(StorageCephExternal, types.uuid)
def get_one(self, storage_ceph_external_uuid):
"""Retrieve information about the given ceph external storage
backend.
"""
rpc_storage_ceph_external = objects.storage_ceph_external.get_by_uuid(
pecan.request.context,
storage_ceph_external_uuid)
return StorageCephExternal.convert_with_links(
rpc_storage_ceph_external)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(StorageCephExternal, body=StorageCephExternal)
def post(self, storage_ceph_external):
"""Create a new external storage backend."""
try:
storage_ceph_external = storage_ceph_external.as_dict()
new_storage_ceph_external = _create(storage_ceph_external)
except exception.SysinvException as e:
LOG.exception(e)
raise wsme.exc.ClientSideError(_("Invalid data: failed to create "
"a storage_ceph_external record."))
return StorageCephExternal.convert_with_links(new_storage_ceph_external)
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [StorageCephExternalPatchType])
@wsme_pecan.wsexpose(StorageCephExternal, types.uuid,
body=[StorageCephExternalPatchType])
def patch(self, storexternal_uuid, patch):
"""Update the current external storage configuration."""
return _patch(storexternal_uuid, patch)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, storageexternal_uuid):
"""Delete a backend."""
# return _delete(storageexternal_uuid)
@expose('json')
@cutils.synchronized(LOCK_NAME)
def ceph_conf_upload(self, file):
""" Upload Ceph Config file
"""
file = pecan.request.POST['file']
ceph_conf_fn = pecan.request.POST.get('ceph_conf_fn')
if ceph_conf_fn == constants.SB_TYPE_CEPH_CONF_FILENAME:
msg = _("The %s name is reserved for the internally managed Ceph "
"cluster.\nPlease use a different name and try again." %
constants.SB_TYPE_CEPH_CONF_FILENAME)
return dict(success="", error=msg)
if not file.filename:
return dict(success="", error="Error: No file uploaded")
try:
file.file.seek(0, os.SEEK_SET)
file_content = file.file.read()
pecan.request.rpcapi.store_ceph_external_config(
pecan.request.context, file_content, ceph_conf_fn)
except Exception as e:
LOG.exception(e)
return dict(
success="",
error=str(e))
return dict(success="Success: ceph config file is uploaded", error="")
def _discover_and_validate_backend_hiera_data(caps_dict):
pass
def _check_and_update_services(storage_ceph_ext):
svcs = api_helper.getListFromServices(storage_ceph_ext)
## If glance/nova is already a service on other rbd backend, remove it from there
check_svcs = [constants.SB_SVC_GLANCE, constants.SB_SVC_NOVA]
for s in check_svcs:
if s in svcs:
sb_list = pecan.request.dbapi.storage_backend_get_list()
if sb_list:
for sb in sb_list:
if (sb.isystem_uuid != storage_ceph_ext.get("isystem_uuid", None) and
sb.backend in [constants.SB_TYPE_CEPH,
constants.SB_TYPE_CEPH_EXTERNAL] and
s in sb.get('services')):
services = api_helper.getListFromServices(sb)
services.remove(s)
cap = sb.capabilities
for k in HIERA_DATA[s]:
cap.pop(k, None)
values = {'services': ','.join(services),
'capabilities': cap,}
pecan.request.dbapi.storage_backend_update(sb.uuid, values)
def _check_backend_ceph_external(storage_ceph_ext):
"""Prechecks for adding an external Ceph backend."""
# go through the service list and validate
svcs = api_helper.getListFromServices(storage_ceph_ext)
for svc in svcs:
if svc not in constants.SB_CEPH_EXTERNAL_SVCS_SUPPORTED:
raise wsme.exc.ClientSideError("Service %s is not supported for the"
" %s backend" %
(svc, constants.SB_TYPE_CEPH_EXTERNAL))
# check for the backend parameters
capabilities = storage_ceph_ext.get('capabilities', {})
# Discover the latest hiera_data for the supported service
_discover_and_validate_backend_hiera_data(capabilities)
for svc in svcs:
for k in HIERA_DATA[svc]:
if not capabilities.get(k, None):
raise wsme.exc.ClientSideError("Missing required %s service "
"parameter: %s" % (svc, k))
for svc in constants.SB_CEPH_EXTERNAL_SVCS_SUPPORTED:
for k in HIERA_DATA[svc]:
if capabilities.get(k, None) and svc not in svcs:
raise wsme.exc.ClientSideError("Missing required service %s for "
"parameter: %s" % (svc, k))
valid_pars = [i for sublist in HIERA_DATA.values() for i in sublist]
if len(set(capabilities.keys()) - set(valid_pars)) > 0:
raise wsme.exc.ClientSideError("Parameter %s is not valid "
% list(set(capabilities.keys()) - set(valid_pars)))
# Check the Ceph configuration file
ceph_conf_file = storage_ceph_ext.get('ceph_conf')
if ceph_conf_file:
if (ceph_conf_file == constants.SB_TYPE_CEPH_CONF_FILENAME):
msg = _("The %s name is reserved for the internally managed Ceph "
"cluster.\nPlease use a different name and try again." %
constants.SB_TYPE_CEPH_CONF_FILENAME)
raise wsme.exc.ClientSideError(msg)
else:
# Raise error if the Ceph configuration file is not provided.
msg = _("A Ceph configuration file must be provided for provisioning "
"an external Ceph cluster.")
raise wsme.exc.ClientSideError(msg)
# If a conf file is specified, make sure the backend's name is not already
# used / one of the default names for other backends.
if ceph_conf_file:
backend_name = storage_ceph_ext.get('name')
backend_list = pecan.request.dbapi.storage_backend_get_list()
for backend in backend_list:
if backend.isystem_uuid != storage_ceph_ext.get("isystem_uuid", None):
if backend_name in constants.SB_DEFAULT_NAMES.values():
msg = _(
"The \"%s\" name is reserved for internally managed "
"backends."
% backend_name)
raise wsme.exc.ClientSideError(msg)
if backend.name == backend_name:
msg = _(
"The \"%s\" name is already used for another backend." %
backend_name)
raise wsme.exc.ClientSideError(msg)
def _apply_ceph_external_backend_changes(op, sb_obj, orig_sb_obj=None):
if ((op == constants.SB_API_OP_CREATE) or
(op == constants.SB_API_OP_MODIFY and
sb_obj.get('ceph_conf') != orig_sb_obj.get('ceph_conf'))):
values = {'task': constants.SB_TASK_APPLY_CONFIG_FILE}
pecan.request.dbapi.storage_ceph_external_update(sb_obj.get('uuid'), values)
try:
pecan.request.rpcapi.distribute_ceph_external_config(
pecan.request.context, sb_obj.get('ceph_conf'))
except Exception as e:
LOG.exception(e)
msg = _("Failed to distribute ceph config file.")
raise wsme.exc.ClientSideError(msg)
services = api_helper.getListFromServices(sb_obj)
pecan.request.rpcapi.update_ceph_external_config(
pecan.request.context,
sb_obj.get('uuid'),
services)
elif op == constants.SB_API_OP_DELETE:
msg = _("Delete a Ceph external backend is not supported currently.")
raise wsme.exc.ClientSideError(msg)
else:
# Compare ceph pools
caps = sb_obj.get('capabilities', {})
orig_caps = orig_sb_obj.get('capabilities', {})
services = []
for svc in constants.SB_CEPH_EXTERNAL_SVCS_SUPPORTED:
for k in HIERA_DATA[svc]:
if caps.get(k, None) != orig_caps.get(k, None):
services.append(svc)
pecan.request.rpcapi.update_ceph_external_config(
pecan.request.context,
sb_obj.get('uuid'),
services)
def _set_defaults_ceph_external(storage_ceph_ext):
defaults = {
'backend': constants.SB_TYPE_CEPH_EXTERNAL,
'name': constants.SB_DEFAULT_NAMES[constants.SB_TYPE_CEPH_EXTERNAL].format(0),
'state': constants.SB_STATE_CONFIGURING,
'task': None,
'services': None,
'ceph_conf': None,
'capabilities': {},
}
sc = api_helper.set_backend_data(storage_ceph_ext,
defaults,
HIERA_DATA,
constants.SB_CEPH_EXTERNAL_SVCS_SUPPORTED)
return sc
def _create(storage_ceph_ext):
storage_ceph_ext = _set_defaults_ceph_external(storage_ceph_ext)
# Execute the common semantic checks for all backends, if a specific backend
# is not specified this will not return
api_helper.common_checks(constants.SB_API_OP_CREATE,
storage_ceph_ext)
_check_backend_ceph_external(storage_ceph_ext)
_check_and_update_services(storage_ceph_ext)
# Conditionally update the DB based on any previous create attempts. This
# creates the StorageCeph object.
system = pecan.request.dbapi.isystem_get_one()
storage_ceph_ext['forisystemid'] = system.id
storage_ceph_ext_obj = pecan.request.dbapi.storage_ceph_external_create(
storage_ceph_ext)
# Retrieve the main StorageBackend object.
storage_backend_obj = pecan.request.dbapi.storage_backend_get(
storage_ceph_ext_obj.id)
_apply_ceph_external_backend_changes(
constants.SB_API_OP_CREATE, sb_obj=storage_ceph_ext)
return storage_backend_obj
#
# Update/Modify/Patch
#
def _hiera_data_semantic_checks(caps_dict):
""" Validate each individual data value to make sure it's of the correct
type and value.
"""
pass
def _pre_patch_checks(storage_ceph_ext_obj, patch_obj):
storage_ceph_ext_dict = storage_ceph_ext_obj.as_dict()
for p in patch_obj:
if p['path'] == '/capabilities':
patch_caps_dict = p['value']
# Validate the change to make sure it valid
_hiera_data_semantic_checks(patch_caps_dict)
current_caps_dict = storage_ceph_ext_dict.get('capabilities', {})
for k in (set(current_caps_dict.keys()) -
set(patch_caps_dict.keys())):
patch_caps_dict[k] = current_caps_dict[k]
p['value'] = patch_caps_dict
elif p['path'] == '/services':
current_svcs = set([])
if storage_ceph_ext_obj.services:
current_svcs = set(storage_ceph_ext_obj.services.split(','))
updated_svcs = set(p['value'].split(','))
# WEI: Only support service add. Removing a service is not supported.
if len(current_svcs - updated_svcs):
raise wsme.exc.ClientSideError(
_("Removing %s is not supported.") % ','.join(
current_svcs - updated_svcs))
p['value'] = ','.join(updated_svcs)
elif p['path'] == '/ceph_conf':
pass
def _patch(stor_ceph_ext_uuid, patch):
# Obtain current storage object.
rpc_stor_ceph_ext = objects.storage_ceph_external.get_by_uuid(
pecan.request.context,
stor_ceph_ext_uuid)
ostor_ceph_ext = copy.deepcopy(rpc_stor_ceph_ext)
patch_obj = jsonpatch.JsonPatch(patch)
for p in patch_obj:
if p['path'] == '/capabilities':
p['value'] = jsonutils.loads(p['value'])
# perform checks based on the current vs.requested modifications
_pre_patch_checks(rpc_stor_ceph_ext, patch_obj)
# Obtain a storage object with the patch applied.
try:
stor_ceph_ext_config = StorageCephExternal(**jsonpatch.apply_patch(
rpc_stor_ceph_ext.as_dict(),
patch_obj))
except utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update current storage object.
for field in objects.storage_ceph_external.fields:
if (field in stor_ceph_ext_config.as_dict() and
rpc_stor_ceph_ext[field] != stor_ceph_ext_config.as_dict()[field]):
rpc_stor_ceph_ext[field] = stor_ceph_ext_config.as_dict()[field]
# Obtain the fields that have changed.
delta = rpc_stor_ceph_ext.obj_what_changed()
if len(delta) == 0 and rpc_stor_ceph_ext['state'] != constants.SB_STATE_CONFIG_ERR:
raise wsme.exc.ClientSideError(
_("No changes to the existing backend settings were detected."))
allowed_attributes = ['services', 'ceph_conf', 'capabilities', 'task']
for d in delta:
if d not in allowed_attributes:
raise wsme.exc.ClientSideError(
_("Can not modify '%s' with this operation." % d))
LOG.info("SYS_I orig storage_ceph_external: %s " % ostor_ceph_ext.as_dict())
LOG.info("SYS_I new storage_ceph_external: %s " % stor_ceph_ext_config.as_dict())
# Execute the common semantic checks for all backends, if backend is not
# present this will not return
api_helper.common_checks(constants.SB_API_OP_MODIFY,
rpc_stor_ceph_ext)
_check_backend_ceph_external(rpc_stor_ceph_ext)
_check_and_update_services(rpc_stor_ceph_ext)
rpc_stor_ceph_ext.save()
_apply_ceph_external_backend_changes(
constants.SB_API_OP_MODIFY, sb_obj=rpc_stor_ceph_ext, orig_sb_obj=ostor_ceph_ext)
return StorageCephExternal.convert_with_links(rpc_stor_ceph_ext)

View File

@ -465,11 +465,12 @@ class SBApiHelper(object):
existing_backends_by_type = set(bk['backend'] for bk in backends)
if (backend_type in existing_backends_by_type and
backend_type != constants.SB_TYPE_CEPH):
backend_type not in [constants.SB_TYPE_CEPH, constants.SB_TYPE_CEPH_EXTERNAL]):
msg = _("Only one %s backend is supported." % backend_type)
raise wsme.exc.ClientSideError(msg)
elif (backend_type not in existing_backends_by_type and
elif (backend_type != constants.SB_TYPE_CEPH_EXTERNAL and
backend_type not in existing_backends_by_type and
backend_name != constants.SB_DEFAULT_NAMES[backend_type]):
msg = _("The primary %s backend must use the default name: %s."
% (backend_type,
@ -490,9 +491,10 @@ class SBApiHelper(object):
raise wsme.exc.ClientSideError(msg)
else:
for ctrl in ctrls:
if ctrl.availability != constants.AVAILABILITY_AVAILABLE:
if ctrl.availability not in [constants.AVAILABILITY_AVAILABLE,
constants.AVAILABILITY_DEGRADED]:
msg = _("Storage backend operations require both controllers "
"to be enabled and available.")
"to be enabled and available/degraded.")
raise wsme.exc.ClientSideError(msg)
if existing_backend and operation == constants.SB_API_OP_CREATE:

View File

@ -308,32 +308,40 @@ VENDOR_ID_LIO = 'LIO-ORG'
SB_TYPE_FILE = 'file'
SB_TYPE_LVM = 'lvm'
SB_TYPE_CEPH = 'ceph'
SB_TYPE_CEPH_EXTERNAL = 'ceph-external'
SB_TYPE_EXTERNAL = 'external'
SB_SUPPORTED = [SB_TYPE_FILE, SB_TYPE_LVM, SB_TYPE_CEPH, SB_TYPE_EXTERNAL]
SB_SUPPORTED = [SB_TYPE_FILE,
SB_TYPE_LVM,
SB_TYPE_CEPH,
SB_TYPE_CEPH_EXTERNAL,
SB_TYPE_EXTERNAL]
# Storage backend default names
SB_DEFAULT_NAME_SUFFIX = "-store"
SB_DEFAULT_NAMES = {
SB_TYPE_FILE:SB_TYPE_FILE + SB_DEFAULT_NAME_SUFFIX,
SB_TYPE_FILE: SB_TYPE_FILE + SB_DEFAULT_NAME_SUFFIX,
SB_TYPE_LVM: SB_TYPE_LVM + SB_DEFAULT_NAME_SUFFIX,
SB_TYPE_CEPH: SB_TYPE_CEPH + SB_DEFAULT_NAME_SUFFIX,
SB_TYPE_EXTERNAL:'shared_services'
SB_TYPE_CEPH_EXTERNAL: SB_TYPE_CEPH_EXTERNAL + SB_DEFAULT_NAME_SUFFIX,
SB_TYPE_EXTERNAL: 'shared_services'
}
# Storage backends services
SB_SVC_CINDER = 'cinder'
SB_SVC_GLANCE = 'glance'
SB_SVC_NOVA = 'nova' # usage reporting only
SB_SVC_NOVA = 'nova'
SB_SVC_SWIFT = 'swift'
SB_FILE_SVCS_SUPPORTED = [SB_SVC_GLANCE]
SB_LVM_SVCS_SUPPORTED = [SB_SVC_CINDER]
SB_CEPH_SVCS_SUPPORTED = [SB_SVC_GLANCE, SB_SVC_CINDER, SB_SVC_SWIFT] # supported primary tier svcs
SB_CEPH_SVCS_SUPPORTED = [SB_SVC_GLANCE, SB_SVC_CINDER, SB_SVC_SWIFT, SB_SVC_NOVA] # supported primary tier svcs
SB_CEPH_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE, SB_SVC_NOVA]
SB_EXTERNAL_SVCS_SUPPORTED = [SB_SVC_CINDER, SB_SVC_GLANCE]
# Storage backend: Service specific backend nomenclature
CINDER_BACKEND_CEPH = SB_TYPE_CEPH
CINDER_BACKEND_CEPH_EXTERNAL = SB_TYPE_CEPH_EXTERNAL
CINDER_BACKEND_LVM = SB_TYPE_LVM
GLANCE_BACKEND_FILE = SB_TYPE_FILE
GLANCE_BACKEND_RBD = 'rbd'
@ -351,9 +359,15 @@ SB_TIER_CEPH_SECONDARY_SVCS = [SB_SVC_CINDER] # supported secondary tier svcs
SB_TIER_STATUS_DEFINED = 'defined'
SB_TIER_STATUS_IN_USE = 'in-use'
# File name reserved for internal ceph cluster.
SB_TYPE_CEPH_CONF_FILENAME = "ceph.conf"
# Glance images path when it is file backended
GLANCE_IMAGE_PATH = tsc.CGCS_PATH + "/" + SB_SVC_GLANCE + "/images"
# Path for Ceph (internal and external) config files
CEPH_CONF_PATH = "/etc/ceph/"
# Requested storage backend API operations
SB_API_OP_CREATE = "create"
SB_API_OP_MODIFY = "modify"
@ -367,8 +381,10 @@ SB_STATE_CONFIG_ERR = 'configuration-failed'
# Storage backend tasks
SB_TASK_NONE = None
SB_TASK_APPLY_MANIFESTS = 'applying-manifests'
SB_TASK_APPLY_CONFIG_FILE = 'applying-config-file'
SB_TASK_RECONFIG_CONTROLLER = 'reconfig-controller'
SB_TASK_PROVISION_STORAGE = 'provision-storage'
SB_TASK_PROVISION_SERVICES = 'provision-services'
SB_TASK_RECONFIG_COMPUTE = 'reconfig-compute'
SB_TASK_RESIZE_CEPH_MON_LV = 'resize-ceph-mon-lv'
SB_TASK_ADD_OBJECT_GATEWAY = 'add-object-gateway'

View File

@ -62,6 +62,12 @@ class StorageBackendConfig(object):
storage_externals = api.storage_external_get_list()
if storage_externals:
return storage_externals[0]
elif target == constants.SB_TYPE_CEPH_EXTERNAL:
# Support multiple ceph external backends
storage_ceph_externals = api.storage_ceph_external_get_list()
if storage_ceph_externals:
return storage_ceph_externals[0]
return None
@staticmethod
@ -119,12 +125,30 @@ class StorageBackendConfig(object):
return None
@staticmethod
def has_backend_configured(dbapi, target, rpcapi=None):
def get_configuring_target_backend(api, target):
"""Get the primary backend that is configuring. """
backend_list = api.storage_backend_get_list()
for backend in backend_list:
if backend.state == constants.SB_STATE_CONFIGURING and \
backend.backend == target:
# At this point we can have but only max 1 configuring backend
# at any moment
return backend
# it is normal there isn't one being configured
return None
@staticmethod
def has_backend_configured(dbapi, target, service=None,
check_only_defaults=True, rpcapi=None):
""" Check is a backend is configured. """
# If cinder is a shared service on another region and
# we want to know if the ceph backend is configured,
# send a rpc to conductor which sends a query to the primary
system = dbapi.isystem_get_one()
shared_services = system.capabilities.get('shared_services', None)
configured = False
if (shared_services is not None and
constants.SERVICE_TYPE_VOLUME in shared_services and
target == constants.SB_TYPE_CEPH and
@ -135,10 +159,19 @@ class StorageBackendConfig(object):
backend_list = dbapi.storage_backend_get_list()
for backend in backend_list:
if backend.state == constants.SB_STATE_CONFIGURED and \
backend.backend == target and \
backend.name == constants.SB_DEFAULT_NAMES[target]:
return True
return False
backend.backend == target:
configured = True
break
# Supplementary semantics
if configured:
if check_only_defaults and \
backend.name != constants.SB_DEFAULT_NAMES[target]:
configured = False
if service and service not in backend.services:
configured = False
return configured
@staticmethod
def has_backend(api, target):

View File

@ -3398,7 +3398,7 @@ class ConductorManager(service.PeriodicService):
personalities = [db_host.personality]
config_uuid = self._config_update_hosts(context,
personalities,
host_uuid=host_uuid,
host_uuids=[host_uuid],
reboot=False)
config_dict = {
"host_uuids": host_uuid,
@ -3412,7 +3412,7 @@ class ConductorManager(service.PeriodicService):
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict,
host_uuid=host_uuid)
host_uuids=[host_uuid])
def ipartition_update_by_ihost(self, context,
ihost_uuid, ipart_dict_array):
@ -4399,7 +4399,7 @@ class ConductorManager(service.PeriodicService):
"classes": ['openstack::keystone::endpoint::runtime']
}
self._config_apply_runtime_manifest(
context, config_uuid, config_dict, host_uuid=active_host.uuid)
context, config_uuid, config_dict, host_uuids=[active_host.uuid])
# apply filesystem config changes if all controllers at target
standby_config_target_flipped = None
@ -5475,66 +5475,194 @@ class ConductorManager(service.PeriodicService):
def update_ceph_config(self, context, sb_uuid, services):
"""Update the manifests for Cinder Ceph backend"""
personalities = [constants.CONTROLLER]
# Update service table
self.update_service_table_for_cinder()
if (constants.SB_SVC_CINDER in services or
constants.SB_SVC_GLANCE in services):
personalities = [constants.CONTROLLER]
# TODO(oponcea): Uncomment when SM supports in-service config reload
# ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
# valid_ctrls = [ctrl for ctrl in ctrls if
# ctrl.administrative == constants.ADMIN_UNLOCKED and
# ctrl.availability == constants.AVAILABILITY_AVAILABLE]
host = utils.HostHelper.get_active_controller(self.dbapi)
classes = ['platform::partitions::runtime',
'platform::lvm::controller::runtime',
'platform::haproxy::runtime',
'openstack::keystone::endpoint::runtime',
'platform::filesystem::img_conversions::runtime',
'platform::ceph::controller::runtime',
]
if constants.SB_SVC_GLANCE in services:
classes.append('openstack::glance::api::runtime')
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
classes.append('platform::sm::norestart::runtime')
config_dict = {"personalities": personalities,
"host_uuids": host.uuid,
# "host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
"classes": classes,
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_CEPH_BACKEND_CONFIG,
}
# Update service table
self.update_service_table_for_cinder()
# TODO(oponcea) once sm supports in-service config reload always
# set reboot=False
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
reboot = False
# TODO(oponcea): Uncomment when SM supports in-service config reload
# ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
# valid_ctrls = [ctrl for ctrl in ctrls if
# ctrl.administrative == constants.ADMIN_UNLOCKED and
# ctrl.availability == constants.AVAILABILITY_AVAILABLE]
host = utils.HostHelper.get_active_controller(self.dbapi)
classes = ['platform::partitions::runtime',
'platform::lvm::controller::runtime',
'platform::haproxy::runtime',
'openstack::keystone::endpoint::runtime',
'platform::filesystem::img_conversions::runtime',
'platform::ceph::controller::runtime',
]
if constants.SB_SVC_GLANCE in services:
classes.append('openstack::glance::api::runtime')
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
classes.append('platform::sm::norestart::runtime')
config_dict = {"personalities": personalities,
"host_uuids": host.uuid,
# "host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
"classes": classes,
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_CEPH_BACKEND_CONFIG,
}
# TODO(oponcea) once sm supports in-service config reload always
# set reboot=False
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
reboot = False
else:
reboot = True
# Set config out-of-date for controllers
config_uuid = self._config_update_hosts(context,
personalities,
reboot=reboot)
# TODO(oponcea): Set config_uuid to a random value to keep Config out-of-date.
# Once sm supports in-service config reload, allways set config_uuid=config_uuid
# in _config_apply_runtime_manifest and remove code bellow.
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
new_uuid = config_uuid
else:
new_uuid = str(uuid.uuid4())
self._config_apply_runtime_manifest(context,
config_uuid=new_uuid,
config_dict=config_dict)
# Update initial task states
values = {'state': constants.SB_STATE_CONFIGURING,
'task': constants.SB_TASK_APPLY_MANIFESTS}
self.dbapi.storage_ceph_update(sb_uuid, values)
else:
reboot = True
values = {'state': constants.SB_STATE_CONFIGURED,
'task': None}
self.dbapi.storage_ceph_update(sb_uuid, values)
# Set config out-of-date for controllers
config_uuid = self._config_update_hosts(context,
personalities,
reboot=reboot)
if constants.SB_SVC_NOVA in services:
hosts_uuid = self.hosts_with_nova_local(constants.LVG_NOVA_BACKING_REMOTE)
if hosts_uuid:
personalities = [constants.CONTROLLER, constants.COMPUTE]
self._config_update_hosts(context,
personalities,
host_uuids=hosts_uuid,
reboot=True)
# TODO(oponcea): Set config_uuid to a random value to keep Config out-of-date.
# Once sm supports in-service config reload, allways set config_uuid=config_uuid
# in _config_apply_runtime_manifest and remove code bellow.
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
new_uuid = config_uuid
def hosts_with_nova_local(self, backing_type):
"""Returns a list of hosts with certain backing type of nova_local"""
hosts_uuid = []
hosts = self.dbapi.ihost_get_list()
for host in hosts:
if ((host.personality and host.personality == constants.COMPUTE) or
(host.subfunctions and constants.COMPUTE in host.subfunctions)):
ilvgs = self.dbapi.ilvg_get_by_ihost(host['uuid'])
for lvg in ilvgs:
if (lvg['lvm_vg_name'] == constants.LVG_NOVA_LOCAL and
lvg['capabilities'].get(constants.LVG_NOVA_PARAM_BACKING) ==
backing_type):
hosts_uuid.append(host['uuid'])
return hosts_uuid
def update_ceph_external_config(self, context, sb_uuid, services):
"""Update the manifests for Cinder/Glance External Ceph backend"""
if (constants.SB_SVC_CINDER in services or
constants.SB_SVC_GLANCE in services):
personalities = [constants.CONTROLLER]
# Update service table
self.update_service_table_for_cinder()
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
valid_ctrls = [ctrl for ctrl in ctrls if
(ctrl.administrative == constants.ADMIN_LOCKED and
ctrl.availability == constants.AVAILABILITY_ONLINE) or
(ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.operational == constants.OPERATIONAL_ENABLED)]
classes = ['platform::partitions::runtime',
'platform::lvm::controller::runtime',
'platform::haproxy::runtime',
'openstack::keystone::endpoint::runtime',
'platform::filesystem::img_conversions::runtime',
]
if constants.SB_SVC_GLANCE in services:
classes.append('openstack::glance::api::runtime')
if constants.SB_SVC_CINDER in services:
classes.append('openstack::cinder::runtime')
classes.append('platform::sm::norestart::runtime')
report_config = puppet_common.REPORT_CEPH_EXTERNAL_BACKEND_CONFIG
config_dict = {"personalities": personalities,
"host_uuids": [ctrl.uuid for ctrl in valid_ctrls],
"classes": classes,
puppet_common.REPORT_STATUS_CFG: report_config,}
# TODO(oponcea) once sm supports in-service config reload always
# set reboot=False
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if utils.is_host_simplex_controller(active_controller):
reboot = False
else:
if constants.SB_SVC_CINDER in services:
# If it is the first time to start cinder service and it
# is not a simplex configuration, then set reboot to false
if StorageBackendConfig.is_service_enabled(
self.dbapi,
constants.SB_SVC_CINDER,
filter_unconfigured=True,
filter_shared=True):
reboot = False
else:
reboot = True
else:
reboot = False
# Set config out-of-date for controllers
config_uuid = self._config_update_hosts(context,
personalities,
reboot=reboot)
tasks = {}
for ctrl in valid_ctrls:
tasks[ctrl.hostname] = constants.SB_TASK_APPLY_MANIFESTS
# Update initial task states
values = {'state': constants.SB_STATE_CONFIGURING,
'task': str(tasks)}
self.dbapi.storage_ceph_external_update(sb_uuid, values)
# TODO(oponcea): Set config_uuid to a random value to keep Config out-of-date.
# Once sm supports in-service config reload, allways set config_uuid=config_uuid
# in _config_apply_runtime_manifest and remove code bellow.
if reboot:
new_uuid = str(uuid.uuid4())
else:
new_uuid = config_uuid
self._config_apply_runtime_manifest(context,
config_uuid=new_uuid,
config_dict=config_dict)
else:
new_uuid = str(uuid.uuid4())
values = {'state': constants.SB_STATE_CONFIGURED,
'task': None}
self.dbapi.storage_ceph_external_update(sb_uuid, values)
self._config_apply_runtime_manifest(context,
config_uuid=new_uuid,
config_dict=config_dict)
# Update initial task states
values = {'state': constants.SB_STATE_CONFIGURING,
'task': constants.SB_TASK_APPLY_MANIFESTS}
self.dbapi.storage_ceph_update(sb_uuid, values)
if constants.SB_SVC_NOVA in services:
hosts_uuid = self.hosts_with_nova_local(constants.LVG_NOVA_BACKING_REMOTE)
if hosts_uuid:
personalities = [constants.CONTROLLER, constants.COMPUTE]
self._config_update_hosts(context,
personalities,
host_uuids=hosts_uuid,
reboot=True)
def update_ceph_services(self, context, sb_uuid):
"""Update service configs for Ceph tier pools."""
@ -5661,6 +5789,20 @@ class ConductorManager(service.PeriodicService):
LOG.error("No match for sysinv-agent manifest application reported! "
"reported_cfg: %(cfg)s status: %(status)s "
"iconfig: %(iconfig)s" % args)
elif reported_cfg == puppet_common.REPORT_CEPH_EXTERNAL_BACKEND_CONFIG:
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
self.report_ceph_external_config_success(context, host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
self.report_ceph_external_config_failure(
host_uuid, error, constants.SB_TYPE_CEPH_EXTERNAL)
else:
args = {'cfg': reported_cfg, 'status': status, 'iconfig': iconfig}
LOG.error("No match for sysinv-agent manifest application reported! "
"reported_cfg: %(cfg)s status: %(status)s "
"iconfig: %(iconfig)s" % args)
elif reported_cfg == puppet_common.REPORT_EXTERNAL_BACKEND_CONFIG:
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
@ -5927,6 +6069,87 @@ class ConductorManager(service.PeriodicService):
# constants.SB_TYPE_EXTERNAL,
# reason)
def report_ceph_external_config_success(self, context, host_uuid):
""" Callback for Sysinv Agent
Configuring Ceph External was successful, finalize operation.
The Agent calls this if Ceph manifests are applied correctly.
Both controllers have to get their manifests applied before accepting
the entire operation as successful.
"""
LOG.info("Ceph manifests success on host: %s" % host_uuid)
## As we can have multiple external_ceph backends, need to find the one
## that is in configuring state.
ceph_conf = StorageBackendConfig.get_configuring_target_backend(
self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL)
if ceph_conf:
# For NOVA, if nova.conf needs to be updated on compute nodes, the
# task should be set to what? constants.SB_TASK_RECONFIG_COMPUTE?
config_done = True
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
if not utils.is_host_simplex_controller(active_controller):
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for host in ctrls:
if host.uuid == host_uuid:
break
else:
LOG.error("Host %s is not a controller?" % host_uuid)
return
tasks = eval(ceph_conf.get('task', '{}'))
if tasks:
tasks[host.hostname] = None
else:
tasks = {host.hostname: None}
for h in ctrls:
if tasks[h.hostname]:
config_done = False
break
if config_done:
values = {'state': constants.SB_STATE_CONFIGURED,
'task': None}
# The VIM needs to know when a cinder backend was added.
services = utils.SBApiHelper.getListFromServices(ceph_conf.as_dict())
if constants.SB_SVC_CINDER in services:
self._update_vim_config(context)
# Clear alarm, if any
self._update_storage_backend_alarm(fm_constants.FM_ALARM_STATE_CLEAR,
constants.CINDER_BACKEND_CEPH)
else:
values = {'task': str(tasks)}
self.dbapi.storage_backend_update(ceph_conf.uuid, values)
def report_ceph_external_config_failure(self, host_uuid, error):
""" Callback for Sysinv Agent
Configuring External Ceph backend failed, set backend to err and raise alarm
The agent calls this if Ceph manifests failed to apply
"""
args = {'host': host_uuid, 'error': error}
LOG.error("Ceph external manifests failed on host: %(host)s. Error: %(error)s" % args)
## As we can have multiple external_ceph backends, need to find the one
## that is in configuring state.
ceph_conf = StorageBackendConfig.get_configuring_target_backend(
self.dbapi, target=constants.SB_TYPE_CEPH_EXTERNAL)
# Set ceph backend to error state
values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
self.dbapi.storage_backend_update(ceph_conf.uuid, values)
# Raise alarm
reason = "Ceph external configuration failed to apply on host: %(host)s" % args
self._update_storage_backend_alarm(fm_constants.FM_ALARM_STATE_SET,
constants.CINDER_BACKEND_CEPH,
reason)
def report_ceph_config_success(self, context, host_uuid):
""" Callback for Sysinv Agent
@ -5937,7 +6160,7 @@ class ConductorManager(service.PeriodicService):
"""
LOG.info("Ceph manifests success on host: %s" % host_uuid)
ceph_conf = StorageBackendConfig.get_backend(self.dbapi,
constants.CINDER_BACKEND_CEPH)
constants.CINDER_BACKEND_CEPH)
# Only update the state/task if the backend hasn't been previously
# configured. Subsequent re-applies of the runtime manifest that need to
@ -6814,7 +7037,7 @@ class ConductorManager(service.PeriodicService):
personalities = [constants.CONTROLLER, constants.COMPUTE]
config_uuid = self._config_update_hosts(context,
personalities,
host_uuid=host_uuid)
host_uuids=[host_uuid])
config_dict = {
"personalities": personalities,
"host_uuids": [host_uuid],
@ -6823,7 +7046,7 @@ class ConductorManager(service.PeriodicService):
self._config_apply_runtime_manifest(context, config_uuid,
config_dict,
force=force,
host_uuid=host_uuid)
host_uuids=[host_uuid])
def _update_resolv_file(self, context, config_uuid, personalities):
"""Generate and update the resolv.conf files on the system"""
@ -7411,12 +7634,12 @@ class ConductorManager(service.PeriodicService):
if host.personality and host.personality in personalities:
self._update_host_config_reinstall(context, host)
def _config_update_hosts(self, context, personalities, host_uuid=None,
def _config_update_hosts(self, context, personalities, host_uuids=None,
reboot=False):
""""Update the hosts configuration status for all hosts affected
:param context: request context.
:param personalities: list of affected host personalities
:parm host_uuid (optional): host whose config_target will be updated
:parm host_uuids (optional): hosts whose config_target will be updated
:param reboot (optional): indicates if a reboot is required to apply
: update
:return The UUID of the configuration generation
@ -7434,10 +7657,10 @@ class ConductorManager(service.PeriodicService):
else:
config_uuid = self._config_clear_reboot_required(config_uuid)
if not host_uuid:
if not host_uuids:
hosts = self.dbapi.ihost_get_list()
else:
hosts = [self.dbapi.ihost_get(host_uuid)]
hosts = [self.dbapi.ihost_get(host_uuid) for host_uuid in host_uuids]
for host in hosts:
if host.personality and host.personality in personalities:
@ -7447,7 +7670,7 @@ class ConductorManager(service.PeriodicService):
return config_uuid
def _config_update_puppet(self, config_uuid, config_dict, force=False,
host_uuid=None):
host_uuids=None):
"""Regenerate puppet hiera data files for each affected host that is
provisioned. If host_uuid is provided, only that host's puppet
hiera data file will be regenerated.
@ -7455,10 +7678,10 @@ class ConductorManager(service.PeriodicService):
host_updated = False
personalities = config_dict['personalities']
if not host_uuid:
if not host_uuids:
hosts = self.dbapi.ihost_get_list()
else:
hosts = [self.dbapi.ihost_get(host_uuid)]
hosts = [self.dbapi.ihost_get(host_uuid) for host_uuid in host_uuids]
for host in hosts:
if host.personality in personalities:
@ -7506,15 +7729,18 @@ class ConductorManager(service.PeriodicService):
self._config_update_puppet(config_uuid, config_dict)
rpcapi = agent_rpcapi.AgentAPI()
rpcapi.iconfig_update_file(context,
iconfig_uuid=config_uuid,
iconfig_dict=config_dict)
try:
rpcapi.iconfig_update_file(context,
iconfig_uuid=config_uuid,
iconfig_dict=config_dict)
except Exception as e:
LOG.info("Error: %s" % str(e))
def _config_apply_runtime_manifest(self,
context,
config_uuid,
config_dict,
host_uuid=None,
host_uuids=None,
force=False):
"""Apply manifests on all hosts affected by the supplied personalities.
@ -7525,7 +7751,7 @@ class ConductorManager(service.PeriodicService):
# is not set. If host_uuid is set only update hiera data for that host
self._config_update_puppet(config_uuid,
config_dict,
host_uuid=host_uuid,
host_uuids=host_uuids,
force=force)
config_dict.update({'force': force})
@ -9166,6 +9392,64 @@ class ConductorManager(service.PeriodicService):
LOG.exception(e)
return False
def distribute_ceph_external_config(self, context, ceph_conf_filename):
"""Notify agent to distribute Ceph configuration file for external
cluster.
"""
LOG.debug("ceph_conf_file: %s" % ceph_conf_filename)
# Retriving the ceph config file that is stored in the /opt/platform/config
# during the file upload stage.
opt_ceph_conf_file = os.path.join(tsc.PLATFORM_CEPH_CONF_PATH,
ceph_conf_filename)
if not os.path.exists(opt_ceph_conf_file):
raise exception.SysinvException(
_("Could not find the uploaded ceph config file %s in %s")
% (ceph_conf_filename, tsc.PLATFORM_CEPH_CONF_PATH))
try:
f = open(opt_ceph_conf_file, "r")
f.seek(0, os.SEEK_SET)
contents = f.read()
except IOError:
msg = _("Failed to read ceph config file from %s " %
tsc.PLATFORM_CEPH_CONF_PATH)
raise exception.SysinvException(msg)
ceph_conf_file = os.path.join(constants.CEPH_CONF_PATH,
ceph_conf_filename)
personalities = [constants.CONTROLLER, constants.COMPUTE]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
'personalities': personalities,
'file_names': [ceph_conf_file],
'file_content': contents,
}
self._config_update_file(context, config_uuid, config_dict)
def store_ceph_external_config(self, context, contents, ceph_conf_filename):
"""Store the uploaded external ceph config file in /opt/platform/config
"""
## Once this directory is created at installation time, we can
## remove this code.
if not os.path.exists(tsc.PLATFORM_CEPH_CONF_PATH):
os.makedirs(tsc.PLATFORM_CEPH_CONF_PATH)
opt_ceph_conf_file = os.path.join(tsc.PLATFORM_CEPH_CONF_PATH,
ceph_conf_filename)
if os.path.exists(opt_ceph_conf_file):
raise exception.SysinvException(_(
"Same external ceph config file already exists."))
try:
with open(opt_ceph_conf_file, 'w+') as f:
f.write(contents)
except IOError:
msg = _("Failed to write ceph config file in %s " %
tsc.PLATFORM_CEPH_CONF_PATH)
raise exception.SysinvException(msg)
def update_firewall_config(self, context, ip_version, contents):
"""Notify agent to configure firewall rules with the supplied data.
Apply firewall manifest changes.

View File

@ -817,6 +817,18 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
sb_uuid=sb_uuid,
services=services))
def update_ceph_external_config(self, context, sb_uuid, services):
"""Synchronously, have the conductor update External Ceph on a controller
:param context: request context
:param sb_uuid: uuid of the storage backed to apply the external ceph config
:param services: list of services using Ceph.
"""
return self.call(context,
self.make_msg('update_ceph_external_config',
sb_uuid=sb_uuid,
services=services))
def update_external_cinder_config(self, context):
"""Synchronously, have the conductor update Cinder Exernal(shared)
on a controller.
@ -1484,6 +1496,32 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
ip_version=ip_version,
contents=contents))
def distribute_ceph_external_config(self, context, ceph_conf_filename):
"""Synchronously, have the conductor update the Ceph configuration
file for external cluster.
:param context: request context.
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('distribute_ceph_external_config',
ceph_conf_filename=ceph_conf_filename))
def store_ceph_external_config(self, context, contents, ceph_conf_filename):
"""Synchronously, have the conductor to write the ceph config file content
to /opt/platform/config
:param context: request context.
:param contents: file content of the Ceph conf file
:param ceph_conf_filename: Ceph conf file
"""
return self.call(context,
self.make_msg('store_ceph_external_config',
contents=contents,
ceph_conf_filename=ceph_conf_filename))
def update_partition_information(self, context, partition_data):
"""Synchronously, have the conductor update partition information.

View File

@ -2507,6 +2507,50 @@ class Connection(object):
:returns: An ceph storage backend.
"""
@abc.abstractmethod
def storage_ceph_external_create(self, values):
"""Create a new external ceph storage backend.
:param forihostid: the external ceph belongs to this isystem
:param values: A dict containing several items used to identify
and track the external_ceph.
:returns: An external storage_ceph.
"""
@abc.abstractmethod
def storage_ceph_external_get(self, storage_ceph_id):
"""Return an external ceph storage.
:param storage_ceph_id: The id or uuid of the external_ceph storage.
:returns: An external storage_ceph.
"""
@abc.abstractmethod
def storage_ceph_external_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of external ceph storage backends.
:param limit: Maximum number of external ceph storage backends to
return.
:param marker: the last item of the previous page; we return the next
result set.
:param sort_key: Attribute by which results should be sorted.
:param sort_dir: direction in which results should be sorted.
(asc, desc)
"""
@abc.abstractmethod
def storage_ceph_external_update(self, stor_ceph_ext_id, values):
"""Update properties of an external ceph storage backend.
:param stor_ceph_ext_id: The id or uuid of a ceph storage backend.
:param values: Dict of values to update.
{
'ceph_conf': '/opt/extension/ceph/3p_ceph1.conf'
}
:returns: An external ceph storage backend.
"""
@abc.abstractmethod
def drbdconfig_create(self, values):
"""Create a new drbdconfig for an isystem

View File

@ -752,11 +752,13 @@ def add_storage_backend_filter(query, value):
"""
if value in constants.SB_SUPPORTED:
return query.filter(or_(models.StorageCeph.backend == value,
models.StorageCephExternal.backend == value,
models.StorageFile.backend == value,
models.StorageLvm.backend == value,
models.StorageExternal.backend == value))
elif uuidutils.is_uuid_like(value):
return query.filter(or_(models.StorageCeph.uuid == value,
models.StorageCephExternal.uuid == value,
models.StorageFile.uuid == value,
models.StorageLvm.uuid == value,
models.StorageExternal.uuid == value))
@ -767,6 +769,7 @@ def add_storage_backend_filter(query, value):
def add_storage_backend_name_filter(query, value):
""" Add a name based storage_backend filter to a query. """
return query.filter(or_(models.StorageCeph.name == value,
models.StorageCephExternal.name == value,
models.StorageFile.name == value,
models.StorageLvm.name == value,
models.StorageExternal.name == value))
@ -3935,6 +3938,8 @@ class Connection(api.Connection):
def storage_backend_create(self, values):
if values['backend'] == constants.SB_TYPE_CEPH:
backend = models.StorageCeph()
elif values['backend'] == constants.SB_TYPE_CEPH_EXTERNAL:
backend = models.StorageCephExternal()
elif values['backend'] == constants.SB_TYPE_FILE:
backend = models.StorageFile()
elif values['backend'] == constants.SB_TYPE_LVM:
@ -4009,6 +4014,8 @@ class Connection(api.Connection):
if result['backend'] == constants.SB_TYPE_CEPH:
return objects.storage_ceph.from_db_object(result)
elif result['backend'] == constants.SB_TYPE_CEPH_EXTERNAL:
return objects.storage_ceph_external.from_db_object(result)
elif result['backend'] == constants.SB_TYPE_FILE:
return objects.storage_file.from_db_object(result)
elif result['backend'] == constants.SB_TYPE_LVM:
@ -4040,6 +4047,9 @@ class Connection(api.Connection):
if backend_type == constants.SB_TYPE_CEPH:
return self._storage_backend_get_list(models.StorageCeph, limit,
marker, sort_key, sort_dir)
elif backend_type == constants.SB_TYPE_CEPH_EXTERNAL:
return self._storage_backend_get_list(models.StorageCephExternal, limit,
marker, sort_key, sort_dir)
elif backend_type == constants.SB_TYPE_FILE:
return self._storage_backend_get_list(models.StorageFile, limit,
marker, sort_key, sort_dir)
@ -4091,6 +4101,8 @@ class Connection(api.Connection):
if result.backend == constants.SB_TYPE_CEPH:
return self._storage_backend_update(models.StorageCeph, storage_backend_id, values)
elif result.backend == constants.SB_TYPE_CEPH_EXTERNAL:
return self._storage_backend_update(models.StorageCephExternal, storage_backend_id, values)
elif result.backend == constants.SB_TYPE_FILE:
return self._storage_backend_update(models.StorageFile, storage_backend_id, values)
elif result.backend == constants.SB_TYPE_LVM:
@ -4240,6 +4252,34 @@ class Connection(api.Connection):
def storage_lvm_destroy(self, storage_lvm_id):
return self._storage_backend_destroy(models.StorageLvm, storage_lvm_id)
@objects.objectify(objects.storage_ceph_external)
def storage_ceph_external_create(self, values):
backend = models.StorageCephExternal()
return self._storage_backend_create(backend, values)
@objects.objectify(objects.storage_ceph_external)
def storage_ceph_external_get(self, storage_ceph_external_id):
return self._storage_backend_get_by_cls(models.StorageCephExternal,
storage_ceph_external_id)
@objects.objectify(objects.storage_ceph_external)
def storage_ceph_external_get_list(self, limit=None, marker=None,
sort_key=None, sort_dir=None):
return self._storage_backend_get_list(models.StorageCephExternal, limit,
marker,
sort_key, sort_dir)
@objects.objectify(objects.storage_ceph_external)
def storage_ceph_external_update(self, storage_ceph_external_id, values):
return self._storage_backend_update(models.StorageCephExternal,
storage_ceph_external_id,
values)
@objects.objectify(objects.storage_ceph_external)
def storage_ceph_external_destroy(self, storage_ceph_external_id):
return self._storage_backend_destroy(models.StorageCephExternal,
storage_ceph_external_id)
def _drbdconfig_get(self, server):
query = model_query(models.drbdconfig)
query = add_identity_filter(query, server)

View File

@ -0,0 +1,54 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Integer, DateTime, String
from sqlalchemy import Column, MetaData, Table, ForeignKey
from sysinv.openstack.common import log
ENGINE = 'InnoDB'
CHARSET = 'utf8'
LOG = log.getLogger(__name__)
def upgrade(migrate_engine):
"""
This database upgrade creates a new storage_external table
"""
meta = MetaData()
meta.bind = migrate_engine
storage_backend = Table('storage_backend', meta, autoload=True)
# Define and create the storage_external table.
storage_external = Table(
'storage_ceph_external',
meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer,
ForeignKey('storage_backend.id', ondelete="CASCADE"),
primary_key=True, unique=True, nullable=False),
Column('ceph_conf', String(255), unique=True, index=True),
mysql_engine=ENGINE,
mysql_charset=CHARSET,
)
storage_external.create()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -937,6 +937,18 @@ class StorageExternal(StorageBackend):
}
class StorageCephExternal(StorageBackend):
__tablename__ = 'storage_ceph_external'
id = Column(Integer, ForeignKey('storage_backend.id'), primary_key=True,
nullable=False)
ceph_conf = Column(JSONEncodedDict)
__mapper_args__ = {
'polymorphic_identity': 'ceph-external',
}
class CephMon(Base):
__tablename__ = 'ceph_mon'

View File

@ -84,6 +84,7 @@ from sysinv.objects import tpmdevice
from sysinv.objects import storage_file
from sysinv.objects import storage_external
from sysinv.objects import storage_tier
from sysinv.objects import storage_ceph_external
def objectify(klass):
@ -177,6 +178,7 @@ certificate = certificate.Certificate
storage_file = storage_file.StorageFile
storage_external = storage_external.StorageExternal
storage_tier = storage_tier.StorageTier
storage_ceph_external = storage_ceph_external.StorageCephExternal
__all__ = (system,
cluster,
@ -242,6 +244,7 @@ __all__ = (system,
storage_file,
storage_external,
storage_tier,
storage_ceph_external,
# alias objects for RPC compatibility
ihost,
ilvg,

View File

@ -24,7 +24,7 @@ class StorageCeph(storage_backend.StorageBackend):
'ephemeral_pool_gib': utils.int_or_none,
'object_pool_gib': utils.int_or_none,
'object_gateway': utils.bool_or_none,
'tier_id': int,
'tier_id': utils.int_or_none,
'tier_name': utils.str_or_none,
'tier_uuid': utils.str_or_none,
}, **storage_backend.StorageBackend.fields)

View File

@ -0,0 +1,28 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
from sysinv.objects import storage_backend
class StorageCephExternal(storage_backend.StorageBackend):
dbapi = db_api.get_instance()
fields = dict({
'ceph_conf': utils.str_or_none,
}, **storage_backend.StorageBackend.fields)
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.storage_ceph_external_get(uuid)
def save_changes(self, context, updates):
self.dbapi.storage_ceph_external_update(self.uuid, updates)

View File

@ -3,6 +3,7 @@
#
# SPDX-License-Identifier: Apache-2.0
#
import os
from sysinv.common import constants
from sysinv.common import exception
@ -463,6 +464,7 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
ceph_type_configs = {}
is_service_enabled = False
is_ceph_external = False
for storage_backend in self.dbapi.storage_backend_get_list():
if (storage_backend.backend == constants.SB_TYPE_LVM and
(storage_backend.services and
@ -529,6 +531,37 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
ceph_backend_configs.update({storage_backend.name: ceph_backend})
ceph_type_configs.update({storage_backend.name: ceph_backend_type})
elif storage_backend.backend == constants.SB_TYPE_CEPH_EXTERNAL:
is_ceph_external = True
ceph_ext_obj = self.dbapi.storage_ceph_external_get(
storage_backend.id)
ceph_external_backend = {
'backend_enabled': False,
'backend_name': ceph_ext_obj.name,
'rbd_pool':
storage_backend.capabilities.get('cinder_pool'),
'rbd_ceph_conf': constants.CEPH_CONF_PATH + os.path.basename(ceph_ext_obj.ceph_conf),
}
ceph_external_backend_type = {
'type_enabled': False,
'type_name': "{0}-{1}".format(
ceph_ext_obj.name,
constants.CINDER_BACKEND_CEPH_EXTERNAL),
'backend_name': ceph_ext_obj.name
}
if (storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services):
is_service_enabled = True
ceph_external_backend['backend_enabled'] = True
ceph_external_backend_type['type_enabled'] = True
enabled_backends.append(
ceph_external_backend['backend_name'])
ceph_backend_configs.update(
{storage_backend.name: ceph_external_backend})
ceph_type_configs.update(
{storage_backend.name: ceph_external_backend_type})
# Update the params for the external SANs
config.update(self._get_service_parameter_config(is_service_enabled,
@ -536,6 +569,7 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
config.update({
'openstack::cinder::params::service_enabled': is_service_enabled,
'openstack::cinder::params::enabled_backends': enabled_backends,
'openstack::cinder::params::is_ceph_external': is_ceph_external,
'openstack::cinder::backends::ceph::ceph_backend_configs':
ceph_backend_configs,
'openstack::cinder::api::backends::ceph_type_configs':
@ -730,5 +764,9 @@ class CinderPuppet(openstack.OpenstackBasePuppet):
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
return True
elif (storage_backend.backend == constants.SB_TYPE_CEPH_EXTERNAL and
(storage_backend.services and
constants.SB_SVC_CINDER in storage_backend.services)):
return True
return False

View File

@ -31,6 +31,7 @@ REPORT_DISK_PARTITON_CONFIG = 'manage_disk_partitions'
REPORT_LVM_BACKEND_CONFIG = 'lvm_config'
REPORT_EXTERNAL_BACKEND_CONFIG = 'external_config'
REPORT_CEPH_BACKEND_CONFIG = 'ceph_config'
REPORT_CEPH_EXTERNAL_BACKEND_CONFIG = 'ceph_external_config'
REPORT_CEPH_SERVICES_CONFIG = 'ceph_services'

View File

@ -4,13 +4,17 @@
# SPDX-License-Identifier: Apache-2.0
#
import os
from oslo_utils import strutils
from urlparse import urlparse
from sysinv.common import constants
from sysinv.common import exception
from sysinv.openstack.common import log as logging
from . import openstack
LOG = logging.getLogger(__name__)
class GlancePuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for glance configuration"""
@ -55,6 +59,8 @@ class GlancePuppet(openstack.OpenstackBasePuppet):
pipeline = constants.GLANCE_DEFAULT_PIPELINE
registry_host = constants.GLANCE_LOCAL_REGISTRY
remote_registry_region_name = None
rbd_store_pool = None
rbd_store_ceph_conf = None
is_service_enabled = False
for storage_backend in self.dbapi.storage_backend_get_list():
@ -70,6 +76,18 @@ class GlancePuppet(openstack.OpenstackBasePuppet):
is_service_enabled = True
enabled_backends.append(constants.GLANCE_BACKEND_RBD)
stores.append(constants.GLANCE_BACKEND_RBD)
# For internal ceph backend, the default "images" glance pool
# and default "/etc/ceph/ceph.conf" config file will be used.
elif (storage_backend.backend == constants.SB_TYPE_CEPH_EXTERNAL and
(storage_backend.services and
constants.SB_SVC_GLANCE in storage_backend.services)):
is_service_enabled = True
enabled_backends.append(constants.GLANCE_BACKEND_RBD)
stores.append(constants.GLANCE_BACKEND_RBD)
ceph_ext_obj = self.dbapi.storage_ceph_external_get(
storage_backend.id)
rbd_store_pool = storage_backend.capabilities.get('glance_pool')
rbd_store_ceph_conf = constants.CEPH_CONF_PATH + os.path.basename(ceph_ext_obj.ceph_conf)
if self.get_glance_cached_status():
stores.append(constants.GLANCE_BACKEND_GLANCE)
@ -150,6 +168,12 @@ class GlancePuppet(openstack.OpenstackBasePuppet):
'openstack::glance::params::glance_cached':
self.get_glance_cached_status(),
}
if rbd_store_pool and rbd_store_ceph_conf:
config.update({'openstack::glance::params::rbd_store_pool':
rbd_store_pool,
'openstack::glance::params::rbd_store_ceph_conf':
rbd_store_ceph_conf,})
return config
def get_secure_system_config(self):

View File

@ -4,9 +4,9 @@
# SPDX-License-Identifier: Apache-2.0
#
import json
import os
import re
import json
import shutil
import subprocess
@ -480,7 +480,7 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
global_filter, update_filter = self._get_lvm_global_filter(host)
return {
values = {
'openstack::nova::storage::final_pvs': final_pvs,
'openstack::nova::storage::adding_pvs': adding_pvs,
'openstack::nova::storage::removing_pvs': removing_pvs,
@ -490,8 +490,28 @@ class NovaPuppet(openstack.OpenstackBasePuppet):
'openstack::nova::storage::instances_lv_size':
"%sm" % instances_lv_size,
'openstack::nova::storage::concurrent_disk_operations':
concurrent_disk_operations,
}
concurrent_disk_operations,}
# If NOVA is a service on a ceph-external backend, use the ephemeral_pool
# and ceph_conf file that are stored in that DB entry.
# If NOVA is not on any ceph-external backend, it must be on the internal
# ceph backend with default "ephemeral" pool and default "/etc/ceph/ceph.conf"
# config file
sb_list = self.dbapi.storage_backend_get_list_by_type(
backend_type=constants.SB_TYPE_CEPH_EXTERNAL)
if sb_list:
for sb in sb_list:
if constants.SB_SVC_NOVA in sb.services:
ceph_ext_obj = self.dbapi.storage_ceph_external_get(sb.id)
images_rbd_pool = sb.capabilities.get('ephemeral_pool')
images_rbd_ceph_conf = \
constants.CEPH_CONF_PATH + os.path.basename(ceph_ext_obj.ceph_conf)
values.update({'openstack::nova::storage::images_rbd_pool':
images_rbd_pool,
'openstack::nova::storage::images_rbd_ceph_conf':
images_rbd_ceph_conf,})
return values
# TODO(oponcea): Make lvm global_filter generic
def _get_lvm_global_filter(self, host):

View File

@ -42,6 +42,7 @@ test_storage_ceph.HIERA_DATA = {
constants.SB_SVC_CINDER: ['test_cparam3'],
constants.SB_SVC_GLANCE: ['test_gparam3'],
constants.SB_SVC_SWIFT: ['test_sparam1'],
constants.SB_SVC_NOVA: ['test_nparam1'],
}
orig_set_backend_data = SBApiHelper.set_backend_data