Add support for radosgw upgrades
Sync charms.ceph and use helper functions to determine whether any changes in the source configuration option are a supported upgrade path. If an upgrade path is detected then upgrade via apt_install with the full list of required packages for the radosgw to force an upgrade. Change-Id: I48a8b5d14ad6ac11af57ddf0260a4a41744e7e21 Closes-Bug: 1539335
This commit is contained in:
parent
d8ef5d1e7b
commit
0f3203b18c
@ -1,8 +1,15 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/ceph-radosgw/hooks</path>
|
||||
</pydev_pathproperty>
|
||||
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/${PROJECT_DIR_NAME}/lib</path>
|
||||
<path>/${PROJECT_DIR_NAME}/hooks</path>
|
||||
<path>/${PROJECT_DIR_NAME}/unit_tests</path>
|
||||
<path>/${PROJECT_DIR_NAME}/actions</path>
|
||||
</pydev_pathproperty>
|
||||
|
||||
</pydev_project>
|
||||
|
9
Makefile
9
Makefile
@ -16,9 +16,12 @@ bin/charm_helpers_sync.py:
|
||||
@mkdir -p bin
|
||||
@curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
|
||||
|
||||
bin/git_sync.py:
|
||||
@mkdir -p bin
|
||||
@wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py
|
||||
|
||||
sync: bin/charm_helpers_sync.py
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||
|
||||
publish: lint test
|
||||
bzr push lp:charms/ceph-radosgw
|
||||
bzr push lp:charms/trusty/ceph-radosgw
|
||||
ceph-sync: bin/git_sync.py
|
||||
$(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git
|
||||
|
@ -19,7 +19,10 @@ import subprocess
|
||||
import sys
|
||||
import socket
|
||||
|
||||
import ceph
|
||||
sys.path.append('lib')
|
||||
|
||||
import ceph_rgw as ceph
|
||||
import ceph.utils as ceph_utils
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
@ -39,6 +42,7 @@ from charmhelpers.fetch import (
|
||||
apt_purge,
|
||||
add_source,
|
||||
filter_installed_packages,
|
||||
filter_missing_packages,
|
||||
)
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
from charmhelpers.core.host import (
|
||||
@ -115,16 +119,45 @@ APACHE_PACKAGES = [
|
||||
]
|
||||
|
||||
|
||||
def upgrade_available():
|
||||
"""Check for upgrade for ceph
|
||||
|
||||
:returns: whether an upgrade is available
|
||||
:rtype: boolean
|
||||
"""
|
||||
c = config()
|
||||
old_version = ceph_utils.resolve_ceph_version(c.previous('source') or
|
||||
'distro')
|
||||
new_version = ceph_utils.resolve_ceph_version(c.get('source'))
|
||||
if (old_version in ceph_utils.UPGRADE_PATHS and
|
||||
new_version == ceph_utils.UPGRADE_PATHS[old_version]):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def install_packages():
|
||||
add_source(config('source'), config('key'))
|
||||
apt_update(fatal=True)
|
||||
c = config()
|
||||
if c.changed('source') or c.changed('key'):
|
||||
add_source(c.get('source'), c.get('key'))
|
||||
apt_update(fatal=True)
|
||||
|
||||
if is_container():
|
||||
PACKAGES.remove('ntp')
|
||||
pkgs = filter_installed_packages(PACKAGES)
|
||||
|
||||
# NOTE: just use full package list if we're in an upgrade
|
||||
# config-changed execution
|
||||
pkgs = (
|
||||
PACKAGES if upgrade_available() else
|
||||
filter_installed_packages(PACKAGES)
|
||||
)
|
||||
if pkgs:
|
||||
status_set('maintenance', 'Installing radosgw packages')
|
||||
apt_install(PACKAGES, fatal=True)
|
||||
apt_purge(APACHE_PACKAGES)
|
||||
apt_install(pkgs, fatal=True)
|
||||
|
||||
pkgs = filter_missing_packages(APACHE_PACKAGES)
|
||||
if pkgs:
|
||||
apt_purge(pkgs)
|
||||
|
||||
disable_unused_apache_sites()
|
||||
|
||||
|
||||
@ -153,7 +186,6 @@ def config_changed():
|
||||
return
|
||||
|
||||
install_packages()
|
||||
disable_unused_apache_sites()
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
status_set('maintenance', 'configuring ipv6')
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
# Install required dependencies for charm runtime
|
||||
|
||||
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython')
|
||||
declare -a DEPS=('apt' 'netaddr' 'netifaces' 'yaml' 'jinja2' 'dnspython' 'pyudev')
|
||||
|
||||
check_and_install() {
|
||||
pkg="${1}-${2}"
|
||||
|
@ -1,3 +0,0 @@
|
||||
This file was created by release-tools to ensure that this empty
|
||||
directory is preserved in vcs re: lint check definitions in global
|
||||
tox.ini files. This file can be removed if/when this dir is actually in use.
|
0
lib/ceph/__init__.py
Normal file
0
lib/ceph/__init__.py
Normal file
872
lib/ceph/broker.py
Normal file
872
lib/ceph/broker.py
Normal file
@ -0,0 +1,872 @@
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from ceph.utils import (
|
||||
get_cephfs,
|
||||
get_osd_weight
|
||||
)
|
||||
from ceph.crush_utils import Crushmap
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
ERROR,
|
||||
)
|
||||
from charmhelpers.contrib.storage.linux.ceph import (
|
||||
create_erasure_profile,
|
||||
delete_pool,
|
||||
erasure_profile_exists,
|
||||
get_osds,
|
||||
monitor_key_get,
|
||||
monitor_key_set,
|
||||
pool_exists,
|
||||
pool_set,
|
||||
remove_pool_snapshot,
|
||||
rename_pool,
|
||||
set_pool_quota,
|
||||
snapshot_pool,
|
||||
validator,
|
||||
ErasurePool,
|
||||
Pool,
|
||||
ReplicatedPool,
|
||||
)
|
||||
|
||||
# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
|
||||
# This should do a decent job of preventing people from passing in bad values.
|
||||
# It will give a useful error message
|
||||
from subprocess import check_call, check_output, CalledProcessError
|
||||
|
||||
POOL_KEYS = {
|
||||
# "Ceph Key Name": [Python type, [Valid Range]]
|
||||
"size": [int],
|
||||
"min_size": [int],
|
||||
"crash_replay_interval": [int],
|
||||
"pgp_num": [int], # = or < pg_num
|
||||
"crush_ruleset": [int],
|
||||
"hashpspool": [bool],
|
||||
"nodelete": [bool],
|
||||
"nopgchange": [bool],
|
||||
"nosizechange": [bool],
|
||||
"write_fadvise_dontneed": [bool],
|
||||
"noscrub": [bool],
|
||||
"nodeep-scrub": [bool],
|
||||
"hit_set_type": [str, ["bloom", "explicit_hash",
|
||||
"explicit_object"]],
|
||||
"hit_set_count": [int, [1, 1]],
|
||||
"hit_set_period": [int],
|
||||
"hit_set_fpp": [float, [0.0, 1.0]],
|
||||
"cache_target_dirty_ratio": [float],
|
||||
"cache_target_dirty_high_ratio": [float],
|
||||
"cache_target_full_ratio": [float],
|
||||
"target_max_bytes": [int],
|
||||
"target_max_objects": [int],
|
||||
"cache_min_flush_age": [int],
|
||||
"cache_min_evict_age": [int],
|
||||
"fast_read": [bool],
|
||||
"allow_ec_overwrites": [bool],
|
||||
"compression_mode": [str, ["none", "passive", "aggressive", "force"]],
|
||||
"compression_algorithm": [str, ["lz4", "snappy", "zlib", "zstd"]],
|
||||
"compression_required_ratio": [float, [0.0, 1.0]],
|
||||
}
|
||||
|
||||
CEPH_BUCKET_TYPES = [
|
||||
'osd',
|
||||
'host',
|
||||
'chassis',
|
||||
'rack',
|
||||
'row',
|
||||
'pdu',
|
||||
'pod',
|
||||
'room',
|
||||
'datacenter',
|
||||
'region',
|
||||
'root'
|
||||
]
|
||||
|
||||
|
||||
def decode_req_encode_rsp(f):
|
||||
"""Decorator to decode incoming requests and encode responses."""
|
||||
|
||||
def decode_inner(req):
|
||||
return json.dumps(f(json.loads(req)))
|
||||
|
||||
return decode_inner
|
||||
|
||||
|
||||
@decode_req_encode_rsp
|
||||
def process_requests(reqs):
|
||||
"""Process Ceph broker request(s).
|
||||
|
||||
This is a versioned api. API version must be supplied by the client making
|
||||
the request.
|
||||
|
||||
:param reqs: dict of request parameters.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
request_id = reqs.get('request-id')
|
||||
try:
|
||||
version = reqs.get('api-version')
|
||||
if version == 1:
|
||||
log('Processing request {}'.format(request_id), level=DEBUG)
|
||||
resp = process_requests_v1(reqs['ops'])
|
||||
if request_id:
|
||||
resp['request-id'] = request_id
|
||||
|
||||
return resp
|
||||
|
||||
except Exception as exc:
|
||||
log(str(exc), level=ERROR)
|
||||
msg = ("Unexpected error occurred while processing requests: %s" %
|
||||
reqs)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
msg = ("Missing or invalid api version ({})".format(version))
|
||||
resp = {'exit-code': 1, 'stderr': msg}
|
||||
if request_id:
|
||||
resp['request-id'] = request_id
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def handle_create_erasure_profile(request, service):
|
||||
"""Create an erasure profile.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
# "local" | "shec" or it defaults to "jerasure"
|
||||
erasure_type = request.get('erasure-type')
|
||||
# "host" | "rack" or it defaults to "host" # Any valid Ceph bucket
|
||||
failure_domain = request.get('failure-domain')
|
||||
name = request.get('name')
|
||||
k = request.get('k')
|
||||
m = request.get('m')
|
||||
l = request.get('l')
|
||||
|
||||
if failure_domain not in CEPH_BUCKET_TYPES:
|
||||
msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
create_erasure_profile(service=service, erasure_plugin_name=erasure_type,
|
||||
profile_name=name, failure_domain=failure_domain,
|
||||
data_chunks=k, coding_chunks=m, locality=l)
|
||||
|
||||
|
||||
def handle_add_permissions_to_key(request, service):
|
||||
"""Groups are defined by the key cephx.groups.(namespace-)?-(name). This
|
||||
key will contain a dict serialized to JSON with data about the group,
|
||||
including pools and members.
|
||||
|
||||
A group can optionally have a namespace defined that will be used to
|
||||
further restrict pool access.
|
||||
"""
|
||||
resp = {'exit-code': 0}
|
||||
|
||||
service_name = request.get('name')
|
||||
group_name = request.get('group')
|
||||
group_namespace = request.get('group-namespace')
|
||||
if group_namespace:
|
||||
group_name = "{}-{}".format(group_namespace, group_name)
|
||||
group = get_group(group_name=group_name)
|
||||
service_obj = get_service_groups(service=service_name,
|
||||
namespace=group_namespace)
|
||||
if request.get('object-prefix-permissions'):
|
||||
service_obj['object_prefix_perms'] = request.get(
|
||||
'object-prefix-permissions')
|
||||
format("Service object: {}".format(service_obj))
|
||||
permission = request.get('group-permission') or "rwx"
|
||||
if service_name not in group['services']:
|
||||
group['services'].append(service_name)
|
||||
save_group(group=group, group_name=group_name)
|
||||
if permission not in service_obj['group_names']:
|
||||
service_obj['group_names'][permission] = []
|
||||
if group_name not in service_obj['group_names'][permission]:
|
||||
service_obj['group_names'][permission].append(group_name)
|
||||
save_service(service=service_obj, service_name=service_name)
|
||||
service_obj['groups'] = _build_service_groups(service_obj,
|
||||
group_namespace)
|
||||
update_service_permissions(service_name, service_obj, group_namespace)
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def update_service_permissions(service, service_obj=None, namespace=None):
|
||||
"""Update the key permissions for the named client in Ceph"""
|
||||
if not service_obj:
|
||||
service_obj = get_service_groups(service=service, namespace=namespace)
|
||||
permissions = pool_permission_list_for_service(service_obj)
|
||||
call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions
|
||||
try:
|
||||
check_call(call)
|
||||
except CalledProcessError as e:
|
||||
log("Error updating key capabilities: {}".format(e))
|
||||
|
||||
|
||||
def add_pool_to_group(pool, group, namespace=None):
|
||||
"""Add a named pool to a named group"""
|
||||
group_name = group
|
||||
if namespace:
|
||||
group_name = "{}-{}".format(namespace, group_name)
|
||||
group = get_group(group_name=group_name)
|
||||
if pool not in group['pools']:
|
||||
group["pools"].append(pool)
|
||||
save_group(group, group_name=group_name)
|
||||
for service in group['services']:
|
||||
update_service_permissions(service, namespace=namespace)
|
||||
|
||||
|
||||
def pool_permission_list_for_service(service):
|
||||
"""Build the permission string for Ceph for a given service"""
|
||||
permissions = []
|
||||
permission_types = collections.OrderedDict()
|
||||
for permission, group in sorted(service["group_names"].items()):
|
||||
if permission not in permission_types:
|
||||
permission_types[permission] = []
|
||||
for item in group:
|
||||
permission_types[permission].append(item)
|
||||
for permission, groups in permission_types.items():
|
||||
permission = "allow {}".format(permission)
|
||||
for group in groups:
|
||||
for pool in service['groups'][group].get('pools', []):
|
||||
permissions.append("{} pool={}".format(permission, pool))
|
||||
for permission, prefixes in sorted(
|
||||
service.get("object_prefix_perms", {}).items()):
|
||||
for prefix in prefixes:
|
||||
permissions.append("allow {} object_prefix {}".format(permission,
|
||||
prefix))
|
||||
return ['mon', 'allow r, allow command "osd blacklist"',
|
||||
'osd', ', '.join(permissions)]
|
||||
|
||||
|
||||
def get_service_groups(service, namespace=None):
|
||||
"""Services are objects stored with some metadata, they look like (for a
|
||||
service named "nova"):
|
||||
{
|
||||
group_names: {'rwx': ['images']},
|
||||
groups: {}
|
||||
}
|
||||
After populating the group, it looks like:
|
||||
{
|
||||
group_names: {'rwx': ['images']},
|
||||
groups: {
|
||||
'images': {
|
||||
pools: ['glance'],
|
||||
services: ['nova']
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
service_json = monitor_key_get(service='admin',
|
||||
key="cephx.services.{}".format(service))
|
||||
try:
|
||||
service = json.loads(service_json)
|
||||
except (TypeError, ValueError):
|
||||
service = None
|
||||
if service:
|
||||
service['groups'] = _build_service_groups(service, namespace)
|
||||
else:
|
||||
service = {'group_names': {}, 'groups': {}}
|
||||
return service
|
||||
|
||||
|
||||
def _build_service_groups(service, namespace=None):
|
||||
"""Rebuild the 'groups' dict for a service group
|
||||
|
||||
:returns: dict: dictionary keyed by group name of the following
|
||||
format:
|
||||
|
||||
{
|
||||
'images': {
|
||||
pools: ['glance'],
|
||||
services: ['nova', 'glance]
|
||||
},
|
||||
'vms':{
|
||||
pools: ['nova'],
|
||||
services: ['nova']
|
||||
}
|
||||
}
|
||||
"""
|
||||
all_groups = {}
|
||||
for groups in service['group_names'].values():
|
||||
for group in groups:
|
||||
name = group
|
||||
if namespace:
|
||||
name = "{}-{}".format(namespace, name)
|
||||
all_groups[group] = get_group(group_name=name)
|
||||
return all_groups
|
||||
|
||||
|
||||
def get_group(group_name):
|
||||
"""A group is a structure to hold data about a named group, structured as:
|
||||
{
|
||||
pools: ['glance'],
|
||||
services: ['nova']
|
||||
}
|
||||
"""
|
||||
group_key = get_group_key(group_name=group_name)
|
||||
group_json = monitor_key_get(service='admin', key=group_key)
|
||||
try:
|
||||
group = json.loads(group_json)
|
||||
except (TypeError, ValueError):
|
||||
group = None
|
||||
if not group:
|
||||
group = {
|
||||
'pools': [],
|
||||
'services': []
|
||||
}
|
||||
return group
|
||||
|
||||
|
||||
def save_service(service_name, service):
|
||||
"""Persist a service in the monitor cluster"""
|
||||
service['groups'] = {}
|
||||
return monitor_key_set(service='admin',
|
||||
key="cephx.services.{}".format(service_name),
|
||||
value=json.dumps(service, sort_keys=True))
|
||||
|
||||
|
||||
def save_group(group, group_name):
|
||||
"""Persist a group in the monitor cluster"""
|
||||
group_key = get_group_key(group_name=group_name)
|
||||
return monitor_key_set(service='admin',
|
||||
key=group_key,
|
||||
value=json.dumps(group, sort_keys=True))
|
||||
|
||||
|
||||
def get_group_key(group_name):
|
||||
"""Build group key"""
|
||||
return 'cephx.groups.{}'.format(group_name)
|
||||
|
||||
|
||||
def handle_erasure_pool(request, service):
|
||||
"""Create a new erasure coded pool.
|
||||
|
||||
:param request: dict of request operations and params.
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0.
|
||||
"""
|
||||
pool_name = request.get('name')
|
||||
erasure_profile = request.get('erasure-profile')
|
||||
quota = request.get('max-bytes')
|
||||
weight = request.get('weight')
|
||||
group_name = request.get('group')
|
||||
|
||||
if erasure_profile is None:
|
||||
erasure_profile = "default-canonical"
|
||||
|
||||
app_name = request.get('app-name')
|
||||
|
||||
# Check for missing params
|
||||
if pool_name is None:
|
||||
msg = "Missing parameter. name is required for the pool"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
if group_name:
|
||||
group_namespace = request.get('group-namespace')
|
||||
# Add the pool to the group named "group_name"
|
||||
add_pool_to_group(pool=pool_name,
|
||||
group=group_name,
|
||||
namespace=group_namespace)
|
||||
|
||||
# TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds
|
||||
if not erasure_profile_exists(service=service, name=erasure_profile):
|
||||
# TODO: Fail and tell them to create the profile or default
|
||||
msg = ("erasure-profile {} does not exist. Please create it with: "
|
||||
"create-erasure-profile".format(erasure_profile))
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
pool = ErasurePool(service=service, name=pool_name,
|
||||
erasure_code_profile=erasure_profile,
|
||||
percent_data=weight, app_name=app_name)
|
||||
# Ok make the erasure pool
|
||||
if not pool_exists(service=service, name=pool_name):
|
||||
log("Creating pool '{}' (erasure_profile={})"
|
||||
.format(pool.name, erasure_profile), level=INFO)
|
||||
pool.create()
|
||||
|
||||
# Set a quota if requested
|
||||
if quota is not None:
|
||||
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
|
||||
|
||||
|
||||
def handle_replicated_pool(request, service):
|
||||
"""Create a new replicated pool.
|
||||
|
||||
:param request: dict of request operations and params.
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0.
|
||||
"""
|
||||
pool_name = request.get('name')
|
||||
replicas = request.get('replicas')
|
||||
quota = request.get('max-bytes')
|
||||
weight = request.get('weight')
|
||||
group_name = request.get('group')
|
||||
|
||||
# Optional params
|
||||
pg_num = request.get('pg_num')
|
||||
if pg_num:
|
||||
# Cap pg_num to max allowed just in case.
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pg_num = min(pg_num, (len(osds) * 100 // replicas))
|
||||
|
||||
app_name = request.get('app-name')
|
||||
# Check for missing params
|
||||
if pool_name is None or replicas is None:
|
||||
msg = "Missing parameter. name and replicas are required"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
if group_name:
|
||||
group_namespace = request.get('group-namespace')
|
||||
# Add the pool to the group named "group_name"
|
||||
add_pool_to_group(pool=pool_name,
|
||||
group=group_name,
|
||||
namespace=group_namespace)
|
||||
|
||||
kwargs = {}
|
||||
if pg_num:
|
||||
kwargs['pg_num'] = pg_num
|
||||
if weight:
|
||||
kwargs['percent_data'] = weight
|
||||
if replicas:
|
||||
kwargs['replicas'] = replicas
|
||||
if app_name:
|
||||
kwargs['app_name'] = app_name
|
||||
|
||||
pool = ReplicatedPool(service=service,
|
||||
name=pool_name, **kwargs)
|
||||
if not pool_exists(service=service, name=pool_name):
|
||||
log("Creating pool '{}' (replicas={})".format(pool.name, replicas),
|
||||
level=INFO)
|
||||
pool.create()
|
||||
else:
|
||||
log("Pool '{}' already exists - skipping create".format(pool.name),
|
||||
level=DEBUG)
|
||||
|
||||
# Set a quota if requested
|
||||
if quota is not None:
|
||||
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
|
||||
|
||||
|
||||
def handle_create_cache_tier(request, service):
|
||||
"""Create a cache tier on a cold pool. Modes supported are
|
||||
"writeback" and "readonly".
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
# mode = "writeback" | "readonly"
|
||||
storage_pool = request.get('cold-pool')
|
||||
cache_pool = request.get('hot-pool')
|
||||
cache_mode = request.get('mode')
|
||||
|
||||
if cache_mode is None:
|
||||
cache_mode = "writeback"
|
||||
|
||||
# cache and storage pool must exist first
|
||||
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
|
||||
service=service, name=cache_pool):
|
||||
msg = ("cold-pool: {} and hot-pool: {} must exist. Please create "
|
||||
"them first".format(storage_pool, cache_pool))
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
p = Pool(service=service, name=storage_pool)
|
||||
p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
|
||||
|
||||
|
||||
def handle_remove_cache_tier(request, service):
|
||||
"""Remove a cache tier from the cold pool.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
storage_pool = request.get('cold-pool')
|
||||
cache_pool = request.get('hot-pool')
|
||||
# cache and storage pool must exist first
|
||||
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
|
||||
service=service, name=cache_pool):
|
||||
msg = ("cold-pool: {} or hot-pool: {} doesn't exist. Not "
|
||||
"deleting cache tier".format(storage_pool, cache_pool))
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
pool = Pool(name=storage_pool, service=service)
|
||||
pool.remove_cache_tier(cache_pool=cache_pool)
|
||||
|
||||
|
||||
def handle_set_pool_value(request, service):
|
||||
"""Sets an arbitrary pool value.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
# Set arbitrary pool values
|
||||
params = {'pool': request.get('name'),
|
||||
'key': request.get('key'),
|
||||
'value': request.get('value')}
|
||||
if params['key'] not in POOL_KEYS:
|
||||
msg = "Invalid key '{}'".format(params['key'])
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
# Get the validation method
|
||||
validator_params = POOL_KEYS[params['key']]
|
||||
if len(validator_params) is 1:
|
||||
# Validate that what the user passed is actually legal per Ceph's rules
|
||||
validator(params['value'], validator_params[0])
|
||||
else:
|
||||
# Validate that what the user passed is actually legal per Ceph's rules
|
||||
validator(params['value'], validator_params[0], validator_params[1])
|
||||
|
||||
# Set the value
|
||||
pool_set(service=service, pool_name=params['pool'], key=params['key'],
|
||||
value=params['value'])
|
||||
|
||||
|
||||
def handle_rgw_regionmap_update(request, service):
|
||||
"""Change the radosgw region map.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
name = request.get('client-name')
|
||||
if not name:
|
||||
msg = "Missing rgw-region or client-name params"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
try:
|
||||
check_output(['radosgw-admin',
|
||||
'--id', service,
|
||||
'regionmap', 'update', '--name', name])
|
||||
except CalledProcessError as err:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
|
||||
|
||||
def handle_rgw_regionmap_default(request, service):
|
||||
"""Create a radosgw region map.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
region = request.get('rgw-region')
|
||||
name = request.get('client-name')
|
||||
if not region or not name:
|
||||
msg = "Missing rgw-region or client-name params"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
try:
|
||||
check_output(
|
||||
[
|
||||
'radosgw-admin',
|
||||
'--id', service,
|
||||
'regionmap',
|
||||
'default',
|
||||
'--rgw-region', region,
|
||||
'--name', name])
|
||||
except CalledProcessError as err:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
|
||||
|
||||
def handle_rgw_zone_set(request, service):
|
||||
"""Create a radosgw zone.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
json_file = request.get('zone-json')
|
||||
name = request.get('client-name')
|
||||
region_name = request.get('region-name')
|
||||
zone_name = request.get('zone-name')
|
||||
if not json_file or not name or not region_name or not zone_name:
|
||||
msg = "Missing json-file or client-name params"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
infile = NamedTemporaryFile(delete=False)
|
||||
with open(infile.name, 'w') as infile_handle:
|
||||
infile_handle.write(json_file)
|
||||
try:
|
||||
check_output(
|
||||
[
|
||||
'radosgw-admin',
|
||||
'--id', service,
|
||||
'zone',
|
||||
'set',
|
||||
'--rgw-zone', zone_name,
|
||||
'--infile', infile.name,
|
||||
'--name', name,
|
||||
]
|
||||
)
|
||||
except CalledProcessError as err:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
os.unlink(infile.name)
|
||||
|
||||
|
||||
def handle_put_osd_in_bucket(request, service):
|
||||
"""Move an osd into a specified crush bucket.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
osd_id = request.get('osd')
|
||||
target_bucket = request.get('bucket')
|
||||
if not osd_id or not target_bucket:
|
||||
msg = "Missing OSD ID or Bucket"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
crushmap = Crushmap()
|
||||
try:
|
||||
crushmap.ensure_bucket_is_present(target_bucket)
|
||||
check_output(
|
||||
[
|
||||
'ceph',
|
||||
'--id', service,
|
||||
'osd',
|
||||
'crush',
|
||||
'set',
|
||||
str(osd_id),
|
||||
str(get_osd_weight(osd_id)),
|
||||
"root={}".format(target_bucket)
|
||||
]
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
msg = "Failed to move OSD " \
|
||||
"{} into Bucket {} :: {}".format(osd_id, target_bucket, exc)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
|
||||
def handle_rgw_create_user(request, service):
|
||||
"""Create a new rados gateway user.
|
||||
|
||||
:param request: dict of request operations and params
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
user_id = request.get('rgw-uid')
|
||||
display_name = request.get('display-name')
|
||||
name = request.get('client-name')
|
||||
if not name or not display_name or not user_id:
|
||||
msg = "Missing client-name, display-name or rgw-uid"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
try:
|
||||
create_output = check_output(
|
||||
[
|
||||
'radosgw-admin',
|
||||
'--id', service,
|
||||
'user',
|
||||
'create',
|
||||
'--uid', user_id,
|
||||
'--display-name', display_name,
|
||||
'--name', name,
|
||||
'--system'
|
||||
]
|
||||
)
|
||||
try:
|
||||
user_json = json.loads(str(create_output.decode('UTF-8')))
|
||||
return {'exit-code': 0, 'user': user_json}
|
||||
except ValueError as err:
|
||||
log(err, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err}
|
||||
|
||||
except CalledProcessError as err:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
|
||||
|
||||
def handle_create_cephfs(request, service):
|
||||
"""Create a new cephfs.
|
||||
|
||||
:param request: The broker request
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
cephfs_name = request.get('mds_name')
|
||||
data_pool = request.get('data_pool')
|
||||
metadata_pool = request.get('metadata_pool')
|
||||
# Check if the user params were provided
|
||||
if not cephfs_name or not data_pool or not metadata_pool:
|
||||
msg = "Missing mds_name, data_pool or metadata_pool params"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
# Sanity check that the required pools exist
|
||||
if not pool_exists(service=service, name=data_pool):
|
||||
msg = "CephFS data pool does not exist. Cannot create CephFS"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
if not pool_exists(service=service, name=metadata_pool):
|
||||
msg = "CephFS metadata pool does not exist. Cannot create CephFS"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
if get_cephfs(service=service):
|
||||
# CephFS new has already been called
|
||||
log("CephFS already created")
|
||||
return
|
||||
|
||||
# Finally create CephFS
|
||||
try:
|
||||
check_output(["ceph",
|
||||
'--id', service,
|
||||
"fs", "new", cephfs_name,
|
||||
metadata_pool,
|
||||
data_pool])
|
||||
except CalledProcessError as err:
|
||||
if err.returncode == 22:
|
||||
log("CephFS already created")
|
||||
return
|
||||
else:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
|
||||
|
||||
def handle_rgw_region_set(request, service):
|
||||
# radosgw-admin region set --infile us.json --name client.radosgw.us-east-1
|
||||
"""Set the rados gateway region.
|
||||
|
||||
:param request: dict. The broker request.
|
||||
:param service: The ceph client to run the command under.
|
||||
:returns: dict. exit-code and reason if not 0
|
||||
"""
|
||||
json_file = request.get('region-json')
|
||||
name = request.get('client-name')
|
||||
region_name = request.get('region-name')
|
||||
zone_name = request.get('zone-name')
|
||||
if not json_file or not name or not region_name or not zone_name:
|
||||
msg = "Missing json-file or client-name params"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
infile = NamedTemporaryFile(delete=False)
|
||||
with open(infile.name, 'w') as infile_handle:
|
||||
infile_handle.write(json_file)
|
||||
try:
|
||||
check_output(
|
||||
[
|
||||
'radosgw-admin',
|
||||
'--id', service,
|
||||
'region',
|
||||
'set',
|
||||
'--rgw-zone', zone_name,
|
||||
'--infile', infile.name,
|
||||
'--name', name,
|
||||
]
|
||||
)
|
||||
except CalledProcessError as err:
|
||||
log(err.output, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': err.output}
|
||||
os.unlink(infile.name)
|
||||
|
||||
|
||||
def process_requests_v1(reqs):
|
||||
"""Process v1 requests.
|
||||
|
||||
Takes a list of requests (dicts) and processes each one. If an error is
|
||||
found, processing stops and the client is notified in the response.
|
||||
|
||||
Returns a response dict containing the exit code (non-zero if any
|
||||
operation failed along with an explanation).
|
||||
"""
|
||||
ret = None
|
||||
log("Processing {} ceph broker requests".format(len(reqs)), level=INFO)
|
||||
for req in reqs:
|
||||
op = req.get('op')
|
||||
log("Processing op='{}'".format(op), level=DEBUG)
|
||||
# Use admin client since we do not have other client key locations
|
||||
# setup to use them for these operations.
|
||||
svc = 'admin'
|
||||
if op == "create-pool":
|
||||
pool_type = req.get('pool-type') # "replicated" | "erasure"
|
||||
|
||||
# Default to replicated if pool_type isn't given
|
||||
if pool_type == 'erasure':
|
||||
ret = handle_erasure_pool(request=req, service=svc)
|
||||
else:
|
||||
ret = handle_replicated_pool(request=req, service=svc)
|
||||
elif op == "create-cephfs":
|
||||
ret = handle_create_cephfs(request=req, service=svc)
|
||||
elif op == "create-cache-tier":
|
||||
ret = handle_create_cache_tier(request=req, service=svc)
|
||||
elif op == "remove-cache-tier":
|
||||
ret = handle_remove_cache_tier(request=req, service=svc)
|
||||
elif op == "create-erasure-profile":
|
||||
ret = handle_create_erasure_profile(request=req, service=svc)
|
||||
elif op == "delete-pool":
|
||||
pool = req.get('name')
|
||||
ret = delete_pool(service=svc, name=pool)
|
||||
elif op == "rename-pool":
|
||||
old_name = req.get('name')
|
||||
new_name = req.get('new-name')
|
||||
ret = rename_pool(service=svc, old_name=old_name,
|
||||
new_name=new_name)
|
||||
elif op == "snapshot-pool":
|
||||
pool = req.get('name')
|
||||
snapshot_name = req.get('snapshot-name')
|
||||
ret = snapshot_pool(service=svc, pool_name=pool,
|
||||
snapshot_name=snapshot_name)
|
||||
elif op == "remove-pool-snapshot":
|
||||
pool = req.get('name')
|
||||
snapshot_name = req.get('snapshot-name')
|
||||
ret = remove_pool_snapshot(service=svc, pool_name=pool,
|
||||
snapshot_name=snapshot_name)
|
||||
elif op == "set-pool-value":
|
||||
ret = handle_set_pool_value(request=req, service=svc)
|
||||
elif op == "rgw-region-set":
|
||||
ret = handle_rgw_region_set(request=req, service=svc)
|
||||
elif op == "rgw-zone-set":
|
||||
ret = handle_rgw_zone_set(request=req, service=svc)
|
||||
elif op == "rgw-regionmap-update":
|
||||
ret = handle_rgw_regionmap_update(request=req, service=svc)
|
||||
elif op == "rgw-regionmap-default":
|
||||
ret = handle_rgw_regionmap_default(request=req, service=svc)
|
||||
elif op == "rgw-create-user":
|
||||
ret = handle_rgw_create_user(request=req, service=svc)
|
||||
elif op == "move-osd-to-bucket":
|
||||
ret = handle_put_osd_in_bucket(request=req, service=svc)
|
||||
elif op == "add-permissions-to-key":
|
||||
ret = handle_add_permissions_to_key(request=req, service=svc)
|
||||
else:
|
||||
msg = "Unknown operation '{}'".format(op)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
if type(ret) == dict and 'exit-code' in ret:
|
||||
return ret
|
||||
|
||||
return {'exit-code': 0}
|
154
lib/ceph/crush_utils.py
Normal file
154
lib/ceph/crush_utils.py
Normal file
@ -0,0 +1,154 @@
|
||||
# Copyright 2014 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
|
||||
from subprocess import check_output, CalledProcessError
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
CRUSH_BUCKET = """root {name} {{
|
||||
id {id} # do not change unnecessarily
|
||||
# weight 0.000
|
||||
alg straw
|
||||
hash 0 # rjenkins1
|
||||
}}
|
||||
|
||||
rule {name} {{
|
||||
ruleset 0
|
||||
type replicated
|
||||
min_size 1
|
||||
max_size 10
|
||||
step take {name}
|
||||
step chooseleaf firstn 0 type host
|
||||
step emit
|
||||
}}"""
|
||||
|
||||
# This regular expression looks for a string like:
|
||||
# root NAME {
|
||||
# id NUMBER
|
||||
# so that we can extract NAME and ID from the crushmap
|
||||
CRUSHMAP_BUCKETS_RE = re.compile(r"root\s+(.+)\s+\{\s*id\s+(-?\d+)")
|
||||
|
||||
# This regular expression looks for ID strings in the crushmap like:
|
||||
# id NUMBER
|
||||
# so that we can extract the IDs from a crushmap
|
||||
CRUSHMAP_ID_RE = re.compile(r"id\s+(-?\d+)")
|
||||
|
||||
|
||||
class Crushmap(object):
|
||||
"""An object oriented approach to Ceph crushmap management."""
|
||||
|
||||
def __init__(self):
|
||||
self._crushmap = self.load_crushmap()
|
||||
roots = re.findall(CRUSHMAP_BUCKETS_RE, self._crushmap)
|
||||
buckets = []
|
||||
ids = list(map(
|
||||
lambda x: int(x),
|
||||
re.findall(CRUSHMAP_ID_RE, self._crushmap)))
|
||||
ids = sorted(ids)
|
||||
if roots != []:
|
||||
for root in roots:
|
||||
buckets.append(CRUSHBucket(root[0], root[1], True))
|
||||
|
||||
self._buckets = buckets
|
||||
if ids != []:
|
||||
self._ids = ids
|
||||
else:
|
||||
self._ids = [0]
|
||||
|
||||
def load_crushmap(self):
|
||||
try:
|
||||
crush = str(check_output(['ceph', 'osd', 'getcrushmap'])
|
||||
.decode('UTF-8'))
|
||||
return str(check_output(['crushtool', '-d', '-'],
|
||||
stdin=crush.stdout)
|
||||
.decode('UTF-8'))
|
||||
except CalledProcessError as e:
|
||||
log("Error occured while loading and decompiling CRUSH map:"
|
||||
"{}".format(e), ERROR)
|
||||
raise "Failed to read CRUSH map"
|
||||
|
||||
def ensure_bucket_is_present(self, bucket_name):
|
||||
if bucket_name not in [bucket.name for bucket in self.buckets()]:
|
||||
self.add_bucket(bucket_name)
|
||||
self.save()
|
||||
|
||||
def buckets(self):
|
||||
"""Return a list of buckets that are in the Crushmap."""
|
||||
return self._buckets
|
||||
|
||||
def add_bucket(self, bucket_name):
|
||||
"""Add a named bucket to Ceph"""
|
||||
new_id = min(self._ids) - 1
|
||||
self._ids.append(new_id)
|
||||
self._buckets.append(CRUSHBucket(bucket_name, new_id))
|
||||
|
||||
def save(self):
|
||||
"""Persist Crushmap to Ceph"""
|
||||
try:
|
||||
crushmap = self.build_crushmap()
|
||||
compiled = str(check_output(['crushtool', '-c', '/dev/stdin', '-o',
|
||||
'/dev/stdout'], stdin=crushmap)
|
||||
.decode('UTF-8'))
|
||||
ceph_output = str(check_output(['ceph', 'osd', 'setcrushmap', '-i',
|
||||
'/dev/stdin'], stdin=compiled)
|
||||
.decode('UTF-8'))
|
||||
return ceph_output
|
||||
except CalledProcessError as e:
|
||||
log("save error: {}".format(e))
|
||||
raise "Failed to save CRUSH map."
|
||||
|
||||
def build_crushmap(self):
|
||||
"""Modifies the current CRUSH map to include the new buckets"""
|
||||
tmp_crushmap = self._crushmap
|
||||
for bucket in self._buckets:
|
||||
if not bucket.default:
|
||||
tmp_crushmap = "{}\n\n{}".format(
|
||||
tmp_crushmap,
|
||||
Crushmap.bucket_string(bucket.name, bucket.id))
|
||||
|
||||
return tmp_crushmap
|
||||
|
||||
@staticmethod
|
||||
def bucket_string(name, id):
|
||||
return CRUSH_BUCKET.format(name=name, id=id)
|
||||
|
||||
|
||||
class CRUSHBucket(object):
|
||||
"""CRUSH bucket description object."""
|
||||
|
||||
def __init__(self, name, id, default=False):
|
||||
self.name = name
|
||||
self.id = int(id)
|
||||
self.default = default
|
||||
|
||||
def __repr__(self):
|
||||
return "Bucket {{Name: {name}, ID: {id}}}".format(
|
||||
name=self.name, id=self.id)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Override the default Equals behavior"""
|
||||
if isinstance(other, self.__class__):
|
||||
return self.__dict__ == other.__dict__
|
||||
return NotImplemented
|
||||
|
||||
def __ne__(self, other):
|
||||
"""Define a non-equality test"""
|
||||
if isinstance(other, self.__class__):
|
||||
return not self.__eq__(other)
|
||||
return NotImplemented
|
2729
lib/ceph/utils.py
Normal file
2729
lib/ceph/utils.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -23,7 +23,7 @@ mock_apt.apt_pkg = MagicMock()
|
||||
sys.modules['apt'] = mock_apt
|
||||
sys.modules['apt_pkg'] = mock_apt.apt_pkg
|
||||
|
||||
import ceph # noqa
|
||||
import ceph_rgw as ceph # noqa
|
||||
import utils # noqa
|
||||
|
||||
from test_utils import CharmTestCase # noqa
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
from mock import (
|
||||
patch, call
|
||||
patch, call, MagicMock
|
||||
)
|
||||
|
||||
from test_utils import (
|
||||
@ -63,6 +63,9 @@ TO_PATCH = [
|
||||
'request_per_unit_key',
|
||||
'get_certificate_request',
|
||||
'process_certificates',
|
||||
'filter_installed_packages',
|
||||
'filter_missing_packages',
|
||||
'ceph_utils',
|
||||
]
|
||||
|
||||
|
||||
@ -78,12 +81,69 @@ class CephRadosGWTests(CharmTestCase):
|
||||
self.service_name.return_value = 'radosgw'
|
||||
self.request_per_unit_key.return_value = False
|
||||
self.systemd_based_radosgw.return_value = False
|
||||
self.filter_installed_packages.side_effect = lambda pkgs: pkgs
|
||||
self.filter_missing_packages.side_effect = lambda pkgs: pkgs
|
||||
|
||||
def test_install_packages(self):
|
||||
def test_upgrade_available(self):
|
||||
_vers = {
|
||||
'distro': 'luminous',
|
||||
'cloud:bionic-rocky': 'mimic',
|
||||
}
|
||||
mock_config = MagicMock()
|
||||
self.test_config.set('source', 'cloud:bionic-rocky')
|
||||
mock_config.get.side_effect = self.test_config.get
|
||||
mock_config.previous.return_value = 'distro'
|
||||
self.config.side_effect = None
|
||||
self.config.return_value = mock_config
|
||||
self.ceph_utils.UPGRADE_PATHS = {
|
||||
'luminous': 'mimic',
|
||||
}
|
||||
self.ceph_utils.resolve_ceph_version.side_effect = (
|
||||
lambda v: _vers.get(v)
|
||||
)
|
||||
self.assertTrue(ceph_hooks.upgrade_available())
|
||||
|
||||
@patch.object(ceph_hooks, 'upgrade_available')
|
||||
def test_install_packages(self, upgrade_available):
|
||||
mock_config = MagicMock()
|
||||
mock_config.get.side_effect = self.test_config.get
|
||||
mock_config.changed.return_value = True
|
||||
self.config.side_effect = None
|
||||
self.config.return_value = mock_config
|
||||
upgrade_available.return_value = False
|
||||
ceph_hooks.install_packages()
|
||||
self.add_source.assert_called_with('distro', 'secretkey')
|
||||
self.assertTrue(self.apt_update.called)
|
||||
self.apt_purge.assert_called_with(['libapache2-mod-fastcgi'])
|
||||
self.apt_update.assert_called_with(fatal=True)
|
||||
self.apt_purge.assert_called_with(ceph_hooks.APACHE_PACKAGES)
|
||||
self.apt_install.assert_called_with(ceph_hooks.PACKAGES,
|
||||
fatal=True)
|
||||
mock_config.changed.assert_called_with('source')
|
||||
self.filter_installed_packages.assert_called_with(
|
||||
ceph_hooks.PACKAGES
|
||||
)
|
||||
self.filter_missing_packages.assert_called_with(
|
||||
ceph_hooks.APACHE_PACKAGES
|
||||
)
|
||||
|
||||
@patch.object(ceph_hooks, 'upgrade_available')
|
||||
def test_install_packages_upgrades(self, upgrade_available):
|
||||
mock_config = MagicMock()
|
||||
mock_config.get.side_effect = self.test_config.get
|
||||
mock_config.changed.return_value = True
|
||||
self.config.side_effect = None
|
||||
self.config.return_value = mock_config
|
||||
upgrade_available.return_value = True
|
||||
ceph_hooks.install_packages()
|
||||
self.add_source.assert_called_with('distro', 'secretkey')
|
||||
self.apt_update.assert_called_with(fatal=True)
|
||||
self.apt_purge.assert_called_with(ceph_hooks.APACHE_PACKAGES)
|
||||
self.apt_install.assert_called_with(ceph_hooks.PACKAGES,
|
||||
fatal=True)
|
||||
mock_config.changed.assert_called_with('source')
|
||||
self.filter_installed_packages.assert_not_called()
|
||||
self.filter_missing_packages.assert_called_with(
|
||||
ceph_hooks.APACHE_PACKAGES
|
||||
)
|
||||
|
||||
def test_install(self):
|
||||
_install_packages = self.patch('install_packages')
|
||||
|
Loading…
x
Reference in New Issue
Block a user