Ceph for standard: System Inventory CLI, API, DB

In order to enable Openstack's helm charts on StarlingX we need a
distributed persistent storage for Kubernetes that leverages our
existing storage configurations. For this stage we will enable CEPH's
RBD to work with Kubernetes RBD provisioner through a new Helm chart.

Since RBD will be the persistent storage solution, CEPH support has to
be extended to the 1, 2 node and Standard configurations.

This commit enables CEPH on a standard configuration without dedicated
storage nodes by allowing users to add the 3rd Ceph monitor to a worker
node. It implements the functional part of this feature.

Details:
* Improve Ceph Monitor CLI to allow adding and deleting Ceph monitors
* Refactor Ceph Monitor API handling code;
* Add status and task DB column for ceph_mon table;
* Apply puppet manifests when a Ceph monitor is added through CLI or API
  at runtime on all nodes;
* Monitor manifest application status and report back puppet apply
  status on controllers;
* Delete monitor when worker is deleted;
* Add monitor for storage nodes;
* Minimal semantics.

Change-Id: Ie316bb611a006bbbc92ac22c52c3973cc9f15109
Co-Authored-By: Ovidiu Poncea <ovidiu.poncea@windriver.com>
Implements: containerization-2002844-CEPH-persistent-storage-backend-for-Kubernetes
Story: 2002844
Task: 28723
Signed-off-by: Ovidiu Poncea <Ovidiu.Poncea@windriver.com>
This commit is contained in:
Stefan Dinescu 2018-12-10 15:12:06 +02:00 committed by Ovidiu Poncea
parent 7dd943fe46
commit 0831a616b3
12 changed files with 353 additions and 80 deletions

View File

@ -439,7 +439,7 @@ class platform::ceph::rgw::keystone::auth(
}
class platform::ceph::controller::runtime {
class platform::ceph::runtime {
include ::platform::ceph::monitor
include ::platform::ceph
@ -452,7 +452,3 @@ class platform::ceph::controller::runtime {
}
}
}
class platform::ceph::compute::runtime {
include ::platform::ceph
}

View File

@ -10,8 +10,7 @@
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['ceph_mon_gib', 'ceph_mon_dev',
'ceph_mon_dev_ctrl0', 'ceph_mon_dev_ctrl1']
CREATION_ATTRIBUTES = ['ihost_uuid']
class CephMon(base.Resource):
@ -36,8 +35,8 @@ class CephMonManager(base.Manager):
path = '/v1/ceph_mon'
return self._list(path, "ceph_mon")
def get(self, ceph_mon_id):
path = '/v1/ceph_mon/%s' % ceph_mon_id
def get(self, ceph_mon_uuid):
path = '/v1/ceph_mon/%s' % ceph_mon_uuid
try:
return self._list(path)[0]
except IndexError:
@ -53,6 +52,10 @@ class CephMonManager(base.Manager):
raise exc.InvalidAttribute('%s' % key)
return self._create(path, new)
def delete(self, host_uuid):
path = '/v1/ceph_mon/%s' % host_uuid
return self._delete(path)
def update(self, ceph_mon_id, patch):
path = '/v1/ceph_mon/%s' % ceph_mon_id
return self._update(path, patch)
@ -62,7 +65,7 @@ class CephMonManager(base.Manager):
return self._json_get(path, {})
def ceph_mon_add(cc, args):
def ceph_mon_add(cc, args, ihost_uuid):
data = dict()
if not vars(args).get('confirmed', None):
@ -73,8 +76,13 @@ def ceph_mon_add(cc, args):
if ceph_mon_gib:
data['ceph_mon_gib'] = ceph_mon_gib
data['ihost_uuid'] = ihost_uuid
ceph_mon = cc.ceph_mon.create(**data)
suuid = getattr(ceph_mon, 'uuid', '')
if ceph_mon and len(ceph_mon.ceph_mon):
suuid = ceph_mon.ceph_mon[0].get('uuid', '')
else:
raise exc.CommandError(
"Created ceph_mon has invalid data.")
try:
ceph_mon = cc.ceph_mon.get(suuid)
except exc.HTTPNotFound:

View File

@ -11,21 +11,24 @@
from cgtsclient.common import constants
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ihost as ihost_utils
def _print_ceph_mon_show(ceph_mon):
fields = ['uuid', 'ceph_mon_gib',
'created_at', 'updated_at']
'created_at', 'updated_at',
'state', 'task']
data = [(f, getattr(ceph_mon, f)) for f in fields]
utils.print_tuple_list(data)
def _print_ceph_mon_list(cc):
field_labels = ['uuid', 'ceph_mon_gib',
'hostname']
fields = ['uuid', 'ceph_mon_gib', 'hostname']
'hostname', 'state', 'task']
fields = ['uuid', 'ceph_mon_gib', 'hostname',
'state', 'task']
ceph_mons = cc.ceph_mon.list()
utils.print_list(ceph_mons, fields, field_labels, sortby=0)
@ -88,3 +91,44 @@ def do_ceph_mon_show(cc, args):
hostname = getattr(ceph_mon, 'hostname', '')
if hostname == ihost.hostname:
_print_ceph_mon_show(ceph_mon)
@utils.arg('hostnameorid',
metavar='<hostname or id>',
help='name or ID of host [REQUIRED]')
def do_ceph_mon_add(cc, args):
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
fields = {}
fields['ihost_uuid'] = ihost.uuid
try:
ceph_mon = cc.ceph_mon.create(**fields)
except exc.HTTPNotFound:
raise exc.CommandError(
"Ceph mon creation failed: "
"host %s: " % args.hostnameorid)
if ceph_mon and len(ceph_mon.ceph_mon):
suuid = ceph_mon.ceph_mon[0].get('uuid', '')
else:
raise exc.CommandError(
"Created ceph_mon has invalid data.")
try:
ceph_mon = cc.ceph_mon.get(suuid)
except exc.HTTPNotFound:
raise exc.CommandError("Created ceph monitor UUID not found: "
"%s" % suuid)
_print_ceph_mon_show(ceph_mon)
@utils.arg('hostnameorid',
help='hostname for compute')
def do_ceph_mon_delete(cc, args):
ihost = ihost_utils._find_ihost(cc, args.hostnameorid)
try:
cc.ceph_mon.delete(ihost.uuid)
except exc.HTTPNotFound:
raise exc.CommandError("failed to delete ceph_mon")

View File

@ -12,6 +12,7 @@ from cgtsclient.common import constants
from cgtsclient.common import utils
from cgtsclient import exc
from cgtsclient.v1 import ceph_mon as ceph_mon_utils
from cgtsclient.v1 import ihost as ihost_utils
from cgtsclient.v1 import storage_ceph # noqa
from cgtsclient.v1 import storage_ceph_external # noqa
from cgtsclient.v1 import storage_external # noqa
@ -139,7 +140,20 @@ def backend_add(cc, backend, args):
# add ceph mons to controllers
if backend == constants.SB_TYPE_CEPH:
ceph_mon_utils.ceph_mon_add(cc, args)
# Controllers should always have monitors.
# Not finding a controller means it's not yet configured,
# so move forward.
try:
ihost = ihost_utils._find_ihost(cc, constants.CONTROLLER_0_HOSTNAME)
ceph_mon_utils.ceph_mon_add(cc, args, ihost.uuid)
except exc.CommandError:
pass
try:
ihost = ihost_utils._find_ihost(cc, constants.CONTROLLER_1_HOSTNAME)
ceph_mon_utils.ceph_mon_add(cc, args, ihost.uuid)
except exc.CommandError:
pass
# allowed storage_backend fields
allowed_fields = ['name', 'services', 'confirmed', 'ceph_conf']

View File

@ -16,10 +16,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
# Copyright (c) 2013-2019 Wind River Systems, Inc.
#
import jsonpatch
import six
import pecan
from pecan import rest
@ -75,6 +76,9 @@ class CephMon(base.APIBase):
forihostid = int
"The id of the host the ceph mon belongs to."
ihost_uuid = types.uuid
"The UUID of the host this ceph mon belongs to"
hostname = wtypes.text
"The name of host this ceph mon belongs to."
@ -85,13 +89,11 @@ class CephMon(base.APIBase):
ceph_mon_gib = int
"The ceph-mon-lv size in GiB, for Ceph backend only."
ceph_mon_dev_ctrl0 = wtypes.text
"The disk device on controller-0 that cgts-vg will be extended " \
"to create ceph-mon-lv"
state = wtypes.text
"The state of the monitor. It can be configured or configuring."
ceph_mon_dev_ctrl1 = wtypes.text
"The disk device on controller-1 that cgts-vg will be extended " \
"to create ceph-mon-lv"
task = wtypes.text
"Current task of the corresponding ceph monitor."
links = [link.Link]
"A list containing a self link and associated ceph_mon links"
@ -100,28 +102,18 @@ class CephMon(base.APIBase):
updated_at = wtypes.datetime.datetime
def __init__(self, **kwargs):
defaults = {'state': constants.SB_STATE_CONFIGURED,
'task': constants.SB_TASK_NONE}
self.fields = objects.ceph_mon.fields.keys()
for k in self.fields:
setattr(self, k, kwargs.get(k))
setattr(self, k, kwargs.get(k, defaults.get(k)))
if not self.uuid:
self.uuid = uuidutils.generate_uuid()
self.fields.append('ceph_mon_dev')
setattr(self, 'ceph_mon_dev', kwargs.get('ceph_mon_dev', None))
self.fields.append('ceph_mon_dev_ctrl0')
setattr(self, 'ceph_mon_dev_ctrl0',
kwargs.get('ceph_mon_dev_ctrl0', None))
self.fields.append('ceph_mon_dev_ctrl1')
setattr(self, 'ceph_mon_dev_ctrl1',
kwargs.get('ceph_mon_dev_ctrl1', None))
self.fields.append('device_node')
setattr(self, 'device_node', kwargs.get('device_node', None))
@classmethod
def convert_with_links(cls, rpc_ceph_mon, expand=True):
@ -129,12 +121,15 @@ class CephMon(base.APIBase):
if not expand:
ceph_mon.unset_fields_except(['created_at',
'updated_at',
'ihost_uuid',
'forihostid',
'uuid',
'device_path',
'device_node',
'ceph_mon_dev',
'ceph_mon_gib',
'state',
'task',
'ceph_mon_dev_ctrl0',
'ceph_mon_dev_ctrl1',
'hostname'])
@ -381,13 +376,21 @@ class CephMonController(rest.RestController):
return StorageBackendConfig.get_ceph_mon_ip_addresses(
pecan.request.dbapi)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(None, six.text_type, status_code=204)
def delete(self, host_uuid):
"""Delete a ceph_mon."""
_delete(host_uuid)
def _set_defaults(ceph_mon):
defaults = {
'uuid': None,
'ceph_mon_gib': constants.SB_CEPH_MON_GIB,
'ceph_mon_dev': None,
'ceph_mon_dev_ctrl0': None,
'ceph_mon_dev_ctrl1': None,
'state': constants.SB_STATE_CONFIGURED,
'task': constants.SB_TASK_NONE,
}
storage_ceph_merged = ceph_mon.copy()
@ -407,30 +410,74 @@ def _create(ceph_mon):
_check_ceph_mon(ceph_mon)
chost = pecan.request.dbapi.ihost_get(ceph_mon['ihost_uuid'])
ceph_mon['forihostid'] = chost['id']
controller_fs_utils._check_controller_fs(
ceph_mon_gib_new=ceph_mon['ceph_mon_gib'])
pecan.request.rpcapi.reserve_ip_for_first_storage_node(
pecan.request.context)
new_ceph_mons = list()
chosts = pecan.request.dbapi.ihost_get_by_personality(constants.CONTROLLER)
for chost in chosts:
# Check if mon exists
ceph_mons = pecan.request.dbapi.ceph_mon_get_by_ihost(chost.uuid)
if ceph_mons:
pecan.request.dbapi.ceph_mon_update(
ceph_mons[0].uuid, {'ceph_mon_gib': ceph_mon['ceph_mon_gib']}
)
new_ceph_mons.append(ceph_mons[0])
else:
ceph_mon_new = dict()
ceph_mon_new['uuid'] = None
ceph_mon_new['forihostid'] = chost.id
ceph_mon_new['ceph_mon_gib'] = ceph_mon['ceph_mon_gib']
# Size of ceph-mon logical volume must be the same for all
# monitors so we get the size from any or use default.
ceph_mons = pecan.request.dbapi.ceph_mon_get_list()
if ceph_mons:
ceph_mon['ceph_mon_gib'] = ceph_mons[0]['ceph_mon_gib']
LOG.info("creating ceph_mon_new for %s: %s" %
(chost.hostname, str(ceph_mon_new)))
new_ceph_mons.append(pecan.request.dbapi.ceph_mon_create(ceph_mon_new))
# In case we add the monitor on a worker node, the state
# and task must be set properly.
if chost.personality == constants.WORKER:
ceph_mon['state'] = constants.SB_STATE_CONFIGURING
ctrls = pecan.request.dbapi.ihost_get_by_personality(
constants.CONTROLLER)
valid_ctrls = [
ctrl for ctrl in ctrls if
(ctrl.administrative == constants.ADMIN_LOCKED and
ctrl.availability == constants.AVAILABILITY_ONLINE) or
(ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.operational == constants.OPERATIONAL_ENABLED)]
return new_ceph_mons
tasks = {}
for ctrl in valid_ctrls:
tasks[ctrl.hostname] = constants.SB_STATE_CONFIGURING
ceph_mon['task'] = str(tasks)
LOG.info("Creating ceph-mon DB entry for host uuid %s: %s" %
(ceph_mon['ihost_uuid'], str(ceph_mon)))
new_ceph_mon = pecan.request.dbapi.ceph_mon_create(ceph_mon)
# We update the base config when adding a dynamic monitor.
# At this moment the only possibility to add a dynamic monitor
# is on a worker node, so we check for that.
if chost.personality == constants.WORKER:
# Storage nodes are not supported on a controller based
# storage model.
personalities = [constants.CONTROLLER, constants.WORKER]
pecan.request.rpcapi.update_ceph_base_config(
pecan.request.context,
personalities)
# The return value needs to be iterable, so make it a list.
return [new_ceph_mon]
def _delete(host_uuid):
ceph_mon = pecan.request.dbapi.ceph_mon_get_by_ihost(host_uuid)
if ceph_mon:
ceph_mon = ceph_mon[0]
else:
raise wsme.exc.ClientSideError(
_("No Ceph Monitor defined for host with uuid: %s" % host_uuid))
if ceph_mon.state == constants.SB_STATE_CONFIG_ERR:
try:
pecan.request.dbapi.ceph_mon_destroy(ceph_mon.uuid)
except exception.HTTPNotFound:
raise wsme.exc.ClientSideError("Deleting Ceph Monitor failed!")
else:
raise wsme.exc.ClientSideError(
_("Direct Ceph monitor delete only allowed for state '%s'. "
"Please lock and delete node to remove the configured Ceph Monitor."
% constants.SB_STATE_CONFIG_ERR))

View File

@ -4452,8 +4452,8 @@ class HostController(rest.RestController):
LOG.info(
'Apply new Ceph manifest to provisioned worker nodes.'
)
pecan.request.rpcapi.config_worker_for_ceph(
pecan.request.context
pecan.request.rpcapi.update_ceph_base_config(
pecan.request.context, personalities=[constants.WORKER]
)
# mark all tasks completed after updating the manifests for
# all worker nodes.

View File

@ -1415,24 +1415,29 @@ class ConductorManager(service.PeriodicService):
ceph_mon_gib = ceph_mons[0].ceph_mon_gib
values = {'forisystemid': system.id,
'forihostid': host.id,
'ceph_mon_gib': ceph_mon_gib}
'ceph_mon_gib': ceph_mon_gib,
'state': constants.SB_STATE_CONFIGURED,
'task': constants.SB_TASK_NONE}
LOG.info("creating ceph_mon for host %s with ceph_mon_gib=%s."
% (host.hostname, ceph_mon_gib))
self.dbapi.ceph_mon_create(values)
def config_worker_for_ceph(self, context):
"""
configure worker nodes for adding ceph
:param context:
:return: none
"""
personalities = [constants.WORKER]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['platform::ceph::compute::runtime']
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def _remove_ceph_mon(self, host):
if not StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_CEPH
):
return
mon = self.dbapi.ceph_mon_get_by_ihost(host.uuid)
if mon:
LOG.info("Deleting ceph monitor for host %s"
% str(host.hostname))
self.dbapi.ceph_mon_destroy(mon[0].uuid)
else:
LOG.info("No ceph monitor present for host %s. "
"Skipping deleting ceph monitor."
% str(host.hostname))
def update_remotelogging_config(self, context):
"""Update the remotelogging configuration"""
@ -1529,6 +1534,8 @@ class ConductorManager(service.PeriodicService):
self._allocate_addresses_for_host(context, host)
# Set up the PXE config file for this host so it can run the installer
self._update_pxe_config(host)
if host['hostname'] == constants.STORAGE_0_HOSTNAME:
self._ceph_mon_create(host)
# TODO(CephPoolsDecouple): remove
def configure_osd_pools(self, context, ceph_backend=None, new_pool_size=None, new_pool_min_size=None):
@ -1590,6 +1597,7 @@ class ConductorManager(service.PeriodicService):
self._remove_addresses_for_host(host)
self._puppet.remove_host_config(host)
self._remove_pxe_config(host)
self._remove_ceph_mon(host)
def _unconfigure_storage_host(self, host):
"""Unconfigure a storage host.
@ -5649,7 +5657,7 @@ class ConductorManager(service.PeriodicService):
rpcapi.iconfig_update_install_uuid(context, host_uuid, install_uuid)
def update_ceph_config(self, context, sb_uuid, services):
"""Update the manifests for Cinder Ceph backend"""
"""Update the manifests for Ceph backend and services"""
personalities = [constants.CONTROLLER]
@ -5666,7 +5674,7 @@ class ConductorManager(service.PeriodicService):
'platform::haproxy::runtime',
'openstack::keystone::endpoint::runtime',
'platform::filesystem::img_conversions::runtime',
'platform::ceph::controller::runtime',
'platform::ceph::runtime',
]
if utils.is_aio_duplex_system(self.dbapi):
@ -5721,6 +5729,27 @@ class ConductorManager(service.PeriodicService):
'task': str(tasks)}
self.dbapi.storage_ceph_update(sb_uuid, values)
def update_ceph_base_config(self, context, personalities):
""" Update Ceph configuration, monitors and ceph.conf only"""
config_uuid = self._config_update_hosts(context, personalities)
valid_nodes = []
for personality in personalities:
nodes = self.dbapi.ihost_get_by_personality(personality)
valid_nodes += [
node for node in nodes if
(node.administrative == constants.ADMIN_UNLOCKED and
node.operational == constants.OPERATIONAL_ENABLED)]
# TODO: check what other puppet class need to be called
config_dict = {
"personalities": personalities,
"host_uuids": [node.uuid for node in valid_nodes],
"classes": ['platform::ceph::runtime'],
puppet_common.REPORT_STATUS_CFG: puppet_common.REPORT_CEPH_MONITOR_CONFIG
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def config_update_nova_local_backed_hosts(self, context, instance_backing):
hosts_uuid = self.hosts_with_nova_local(instance_backing)
if hosts_uuid:
@ -6001,6 +6030,19 @@ class ConductorManager(service.PeriodicService):
LOG.error("No match for sysinv-agent manifest application reported! "
"reported_cfg: %(cfg)s status: %(status)s "
"iconfig: %(iconfig)s" % args)
elif reported_cfg == puppet_common.REPORT_CEPH_MONITOR_CONFIG:
host_uuid = iconfig['host_uuid']
if status == puppet_common.REPORT_SUCCESS:
# Configuration was successful
self.report_ceph_base_config_success(host_uuid)
elif status == puppet_common.REPORT_FAILURE:
# Configuration has failed
self.report_ceph_base_config_failure(host_uuid, error)
else:
args = {'cfg': reported_cfg, 'status': status, 'iconfig': iconfig}
LOG.error("No match for sysinv-agent manifest application reported! "
"reported_cfg: %(cfg)s status: %(status)s "
"iconfig: %(iconfig)s" % args)
else:
LOG.error("Reported configuration '%(cfg)s' is not handled by"
" report_config_status! iconfig: %(iconfig)s" %
@ -6495,6 +6537,89 @@ class ConductorManager(service.PeriodicService):
values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
self.dbapi.storage_backend_update(backend.uuid, values)
def report_ceph_base_config_success(self, host_uuid):
"""
Callback for Sysinv Agent
"""
LOG.info("Ceph monitor update succeeded on host: %s" % host_uuid)
# Get the monitor that is configuring
monitor_list = self.dbapi.ceph_mon_get_list()
monitor = None
for mon in monitor_list:
if mon.state == constants.SB_STATE_CONFIGURING:
monitor = mon
break
ctrls = self.dbapi.ihost_get_by_personality(constants.CONTROLLER)
# Note that even if nodes are degraded we still accept the answer.
valid_ctrls = [ctrl for ctrl in ctrls if
(ctrl.administrative == constants.ADMIN_LOCKED and
ctrl.availability == constants.AVAILABILITY_ONLINE) or
(ctrl.administrative == constants.ADMIN_UNLOCKED and
ctrl.operational == constants.OPERATIONAL_ENABLED)]
# Set state for current node
for host in valid_ctrls:
if host.uuid == host_uuid:
break
else:
LOG.error("Host %(host) is not in the required state!" % host_uuid)
host = self.dbapi.ihost_get(host_uuid)
if not host:
LOG.error("Host %s is invalid!" % host_uuid)
return
elif host.personality == constants.WORKER:
LOG.info("Ignoring report from worker hosts")
return
tasks = eval(monitor.get('task', '{}'))
if tasks:
tasks[host.hostname] = constants.SB_STATE_CONFIGURED
else:
tasks = {host.hostname: constants.SB_STATE_CONFIGURED}
# Check if all hosts configurations have applied correctly
# and mark config success
config_success = True
for host in valid_ctrls:
if tasks.get(host.hostname, '') != constants.SB_STATE_CONFIGURED:
config_success = False
values = None
if monitor.state != constants.SB_STATE_CONFIG_ERR:
if config_success:
# All hosts have completed configuration
values = {'state': constants.SB_STATE_CONFIGURED, 'task': None}
else:
# This host_uuid has completed configuration
values = {'task': str(tasks)}
if values:
self.dbapi.ceph_mon_update(monitor.uuid, values)
def report_ceph_base_config_failure(self, host_uuid, error):
"""
Callback for Sysinv Agent
"""
LOG.error("Ceph monitor update failed on host: %(host)s. Error: "
"%(error)s" % {'host': host_uuid, 'error': error})
host = self.dbapi.ihost_get(host_uuid)
if host and host.personality == constants.WORKER:
# Ignoring report from worker
return
monitor_list = self.dbapi.ceph_mon_get_list()
monitor = None
for mon in monitor_list:
if mon.state == constants.SB_STATE_CONFIGURING:
monitor = mon
break
# Set monitor to error state
values = {'state': constants.SB_STATE_CONFIG_ERR, 'task': None}
self.dbapi.ceph_mon_update(monitor.uuid, values)
def create_controller_filesystems(self, context, rootfs_device):
""" Create the storage config based on disk size for
database, image, backup, img-conversion

View File

@ -788,13 +788,19 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
"""
return self.call(context, self.make_msg('update_lvm_config'))
def config_worker_for_ceph(self, context):
"""Synchronously, have the conductor update the worker configuration
for adding ceph.
def update_ceph_base_config(self, context, personalities):
"""Synchronously, have the conductor update the configuration
for monitors and ceph.conf.
:param context: request context.
:param personalities: list of host personalities.
"""
return self.call(context, self.make_msg('config_worker_for_ceph'))
return self.call(
context, self.make_msg(
'update_ceph_base_config',
personalities=personalities
)
)
def update_drbd_config(self, context):
"""Synchronously, have the conductor update the drbd configuration.

View File

@ -0,0 +1,26 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sqlalchemy import Column, MetaData, String, Table
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
ceph_mon = Table('ceph_mon', meta, autoload=True)
ceph_mon.create_column(Column('state', String(255)))
ceph_mon.create_column(Column('task', String(255)))
def downgrade(migrate_engine):
# As per other openstack components, downgrade is
# unsupported in this release.
raise NotImplementedError('SysInv database downgrade is unsupported.')

View File

@ -953,6 +953,8 @@ class CephMon(Base):
uuid = Column(String(36))
device_path = Column(String(255))
ceph_mon_gib = Column(Integer)
state = Column(String(255))
task = Column(String(255))
forihostid = Column(Integer, ForeignKey('i_host.id', ondelete='CASCADE'))
host = relationship("ihost", lazy="joined", join_depth=1)

View File

@ -23,13 +23,17 @@ class CephMon(base.SysinvObject):
'device_path': utils.str_or_none,
'ceph_mon_gib': utils.int_or_none,
'state': utils.str_or_none,
'task': utils.str_or_none,
'forihostid': utils.int_or_none,
'ihost_uuid': utils.str_or_none,
'hostname': utils.str_or_none,
}
_foreign_fields = {
'hostname': 'host:hostname',
'ihost_uuid': 'host:uuid'
}
@base.remotable_classmethod

View File

@ -33,6 +33,7 @@ REPORT_EXTERNAL_BACKEND_CONFIG = 'external_config'
REPORT_CEPH_BACKEND_CONFIG = 'ceph_config'
REPORT_CEPH_EXTERNAL_BACKEND_CONFIG = 'ceph_external_config'
REPORT_CEPH_SERVICES_CONFIG = 'ceph_services'
REPORT_CEPH_MONITOR_CONFIG = 'ceph_monitor'
def puppet_apply_manifest(ip_address, personality,