Merge "Add Ceph BlueStore Compression support"

This commit is contained in:
Zuul 2020-09-30 07:52:18 +00:00 committed by Gerrit Code Review
commit b398e40165
15 changed files with 475 additions and 291 deletions

View File

@ -690,3 +690,69 @@ options:
.
This option doesn't have any effect on clouds running
a release < Ocata.
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.

View File

@ -13,6 +13,7 @@
# limitations under the License.
''' Helpers for interacting with OpenvSwitch '''
import collections
import hashlib
import os
import re
@ -20,9 +21,9 @@ import six
import subprocess
from charmhelpers import deprecate
from charmhelpers.contrib.network.ovs import ovsdb as ch_ovsdb
from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import (
log, WARNING, INFO, DEBUG
)
@ -592,3 +593,76 @@ def ovs_appctl(target, args):
cmd = ['ovs-appctl', '-t', target]
cmd.extend(args)
return subprocess.check_output(cmd, universal_newlines=True)
def uuid_for_port(port_name):
"""Get UUID of named port.
:param port_name: Name of port.
:type port_name: str
:returns: Port UUID.
:rtype: Optional[uuid.UUID]
"""
for port in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').port.find('name={}'.format(port_name)):
return port['_uuid']
def bridge_for_port(port_uuid):
"""Find which bridge a port is on.
:param port_uuid: UUID of port.
:type port_uuid: uuid.UUID
:returns: Name of bridge or None.
:rtype: Optional[str]
"""
for bridge in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').bridge:
# If there is a single port on a bridge the ports property will not be
# a list. ref: juju/charm-helpers#510
if (isinstance(bridge['ports'], list) and
port_uuid in bridge['ports'] or
port_uuid == bridge['ports']):
return bridge['name']
PatchPort = collections.namedtuple('PatchPort', ('bridge', 'port'))
Patch = collections.namedtuple('Patch', ('this_end', 'other_end'))
def patch_ports_on_bridge(bridge):
"""Find patch ports on a bridge.
:param bridge: Name of bridge
:type bridge: str
:returns: Iterator with bridge and port name for both ends of a patch.
:rtype: Iterator[Patch[PatchPort[str,str],PatchPort[str,str]]]
:raises: ValueError
"""
# On any given vSwitch there will be a small number of patch ports, so we
# start by iterating over ports with type `patch` then look up which bridge
# they belong to and act on any ports that match the criteria.
for interface in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').interface.find('type=patch'):
for port in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').port.find('name={}'.format(interface['name'])):
if bridge_for_port(port['_uuid']) == bridge:
this_end = PatchPort(bridge, port['name'])
other_end = PatchPort(bridge_for_port(
uuid_for_port(
interface['options']['peer'])),
interface['options']['peer'])
yield(Patch(this_end, other_end))
# We expect one result and it is ok if it turns out to be a port
# for a different bridge. However we need a break here to satisfy
# the for/else check which is in place to detect interface refering
# to non-existent port.
break
else:
raise ValueError('Port for interface named "{}" does unexpectedly '
'not exist.'.format(interface['name']))
else:
# Allow our caller to handle no patch ports found gracefully, in
# reference to PEP479 just doing a return will provide a emtpy iterator
# and not None.
return

View File

@ -36,6 +36,11 @@ class SimpleOVSDB(object):
for br in ovsdb.bridge:
if br['name'] == 'br-test':
ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed')
WARNING: If a list type field only have one item `ovs-vsctl` will present
it as a single item. Since we do not know the schema we have no way of
knowing what fields should be de-serialized as lists so the caller has
to be careful of checking the type of values returned from this library.
"""
# For validation we keep a complete map of currently known good tool and
@ -157,6 +162,51 @@ class SimpleOVSDB(object):
self._tool = tool
self._table = table
def _deserialize_ovsdb(self, data):
"""Deserialize OVSDB RFC7047 section 5.1 data.
:param data: Multidimensional list where first row contains RFC7047
type information
:type data: List[str,any]
:returns: Deserialized data.
:rtype: any
"""
# When using json formatted output to OVS commands Internal OVSDB
# notation may occur that require further deserializing.
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
ovs_type_cb_map = {
'uuid': uuid.UUID,
# NOTE: OVSDB sets have overloaded type
# see special handling below
'set': list,
'map': dict,
}
assert len(data) > 1, ('Invalid data provided, expecting list '
'with at least two elements.')
if data[0] == 'set':
# special handling for set
#
# it is either a list of strings or a list of typed lists.
# taste first element to see which it is
for el in data[1]:
# NOTE: We lock this handling down to the `uuid` type as
# that is the only one we have a practical example of.
# We could potentially just handle this generally based on
# the types listed in `ovs_type_cb_map` but let's open for
# that as soon as we have a concrete example to validate on
if isinstance(
el, list) and len(el) and el[0] == 'uuid':
decoded_set = []
for el in data[1]:
decoded_set.append(self._deserialize_ovsdb(el))
return(decoded_set)
# fall back to normal processing below
break
# Use map to deserialize data with fallback to `str`
f = ovs_type_cb_map.get(data[0], str)
return f(data[1])
def _find_tbl(self, condition=None):
"""Run and parse output of OVSDB `find` command.
@ -165,15 +215,6 @@ class SimpleOVSDB(object):
:returns: Dictionary with data
:rtype: Dict[str, any]
"""
# When using json formatted output to OVS commands Internal OVSDB
# notation may occur that require further deserializing.
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
ovs_type_cb_map = {
'uuid': uuid.UUID,
# FIXME sets also appear to sometimes contain type/value tuples
'set': list,
'map': dict,
}
cmd = [self._tool, '-f', 'json', 'find', self._table]
if condition:
cmd.append(condition)
@ -182,9 +223,8 @@ class SimpleOVSDB(object):
for row in data['data']:
values = []
for col in row:
if isinstance(col, list):
f = ovs_type_cb_map.get(col[0], str)
values.append(f(col[1]))
if isinstance(col, list) and len(col) > 1:
values.append(self._deserialize_ovsdb(col))
else:
values.append(col)
yield dict(zip(data['headings'], values))

View File

@ -3245,6 +3245,18 @@ class CephBlueStoreCompressionContext(OSContextGenerator):
"""
return self.op
def get_kwargs(self):
"""Get values for use as keyword arguments.
:returns: Context values with key suitable for use as kwargs to
CephBrokerRq add_op_create_*_pool methods.
:rtype: Dict[str,any]
"""
return {
k.replace('-', '_'): v
for k, v in self.op.items()
}
def validate(self):
"""Validate options.

View File

@ -705,12 +705,12 @@ class ErasurePool(BasePool):
# from different handling of this in the `charms.ceph` library.
self.erasure_code_profile = op.get('erasure-profile',
'default-canonical')
self.allow_ec_overwrites = op.get('allow-ec-overwrites')
else:
# We keep the class default when initialized from keyword arguments
# to not break the API for any other consumers.
self.erasure_code_profile = erasure_code_profile or 'default'
self.allow_ec_overwrites = allow_ec_overwrites
self.allow_ec_overwrites = allow_ec_overwrites
def _create(self):
# Try to find the erasure profile information in order to properly
@ -1972,12 +1972,14 @@ class CephBrokerRq(object):
'request-id': self.request_id})
def _ops_equal(self, other):
keys_to_compare = [
'replicas', 'name', 'op', 'pg_num', 'group-permission',
'object-prefix-permissions',
]
keys_to_compare += list(self._partial_build_common_op_create().keys())
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
for key in [
'replicas', 'name', 'op', 'pg_num', 'weight',
'group', 'group-namespace', 'group-permission',
'object-prefix-permissions']:
for key in keys_to_compare:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:

View File

@ -61,6 +61,8 @@ from charmhelpers.fetch import (
filter_installed_packages,
)
import charmhelpers.contrib.openstack.context as ch_context
from charmhelpers.contrib.openstack.utils import (
CompareOpenStackReleases,
configure_installation_source,
@ -390,6 +392,7 @@ def get_ceph_request():
pool_name = config('rbd-pool')
replicas = config('ceph-osd-replication-count')
weight = config('ceph-pool-weight')
bluestore_compression = ch_context.CephBlueStoreCompressionContext()
if config('pool-type') == 'erasure-coded':
# General EC plugin config
@ -442,18 +445,31 @@ def get_ceph_request():
)
# Create EC data pool
rq.add_op_create_erasure_pool(
name=pool_name,
erasure_profile=profile_name,
weight=weight,
group="vms",
app_name="rbd",
allow_ec_overwrites=True
)
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'erasure_profile': profile_name,
'weight': weight,
'group': "vms",
'app_name': "rbd",
'allow_ec_overwrites': True
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_erasure_pool(**kwargs)
else:
rq.add_op_create_pool(name=pool_name, replica_count=replicas,
weight=weight,
group='vms', app_name='rbd')
kwargs = {
'name': pool_name,
'replica_count': replicas,
'weight': weight,
'group': 'vms',
'app_name': 'rbd',
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_replicated_pool(**kwargs)
if config('restrict-ceph-pools'):
rq.add_op_request_access_to_group(
@ -494,7 +510,16 @@ def ceph_changed(rid=None, unit=None):
create_libvirt_secret(secret_file=CEPH_SECRET,
secret_uuid=CEPH_SECRET_UUID, key=key)
_handle_ceph_request()
try:
_handle_ceph_request()
except ValueError as e:
# The end user has most likely provided a invalid value for a
# configuration option. Just log the traceback here, the end
# user will be notified by assess_status() called at the end of
# the hook execution.
log('Caught ValueError, invalid value provided for '
'configuration?: "{}"'.format(str(e)),
level=DEBUG)
# TODO: Refactor this method moving part of this logic to charmhelpers,

View File

@ -929,6 +929,33 @@ def assess_status(configs):
os_application_version_set(VERSION_PACKAGE)
def check_optional_config_and_relations(configs):
"""Validate optional configuration and relations when present.
This function is called from assess_status/set_os_workload_status as the
charm_func and needs to return either None, None if there is no problem or
the status, message if there is a problem.
:param configs: an OSConfigRender() instance.
:return 2-tuple: (string, string) = (status, message)
"""
if relation_ids('ceph'):
# Check that provided Ceph BlueStoe configuration is valid.
try:
bluestore_compression = context.CephBlueStoreCompressionContext()
bluestore_compression.validate()
except AttributeError:
# The charm does late installation of the `ceph-common` package and
# the class initializer above will throw an exception until it is.
pass
except ValueError as e:
return ('blocked', 'Invalid configuration: {}'.format(str(e)))
# return 'unknown' as the lowest priority to not clobber an existing
# status.
return "unknown", ""
def assess_status_func(configs, services_=None):
"""Helper function to create the function that will assess_status() for
the unit.
@ -957,6 +984,7 @@ def assess_status_func(configs, services_=None):
required_interfaces.update(optional_relations)
return make_assess_status_func(
configs, required_interfaces,
charm_func=check_optional_config_and_relations,
services=services_ or services(), ports=None)

View File

@ -1,247 +0,0 @@
variables:
openstack-origin: &openstack-origin distro
series: focal
comment:
- 'machines section to decide order of deployment. database sooner = faster'
machines:
'0':
constraints: mem=3072M
'1':
constraints: mem=3072M
'2':
constraints: mem=3072M
'3':
'4':
'5':
'6':
'7':
'8':
'9':
'10':
constraints: mem=4096M cores=4
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
'19':
applications:
nova-cloud-controller-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
keystone-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
glance-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
neutron-api-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
placement-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
mysql-innodb-cluster:
charm: cs:~openstack-charmers-next/mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: '10G'
options:
source: *openstack-origin
to:
- '11'
- '12'
- '13'
- '14'
- '15'
- '16'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '17'
- '18'
- '19'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '3'
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: *openstack-origin
network-manager: Neutron
debug: true
to:
- '4'
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
manage-neutron-plugin-legacy-mode: true
openstack-origin: *openstack-origin
flat-network-providers: physnet1
neutron-security-groups: true
to:
- '5'
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '6'
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: *openstack-origin
bridge-mappings: physnet1:br-ex
to:
- '7'
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '8'
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
placement:
charm: cs:~openstack-charmers-next/placement
num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '9'
nova-compute:
charm: ../../../nova-compute
num_units: 1
storage:
ephemeral-device: '40G'
options:
openstack-origin: *openstack-origin
config-flags: auto_assign_floating_ip=False
enable-live-migration: false
aa-profile-mode: enforce
debug: true
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
libvirt-image-backend: rbd
to:
- '10'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - 'nova-compute:image-service'
- 'glance:image-service'
- - 'nova-compute:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:shared-db'
- 'nova-cloud-controller-mysql-router:shared-db'
- - 'nova-cloud-controller-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'nova-cloud-controller:identity-service'
- 'keystone:identity-service'
- - 'nova-cloud-controller:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:cloud-compute'
- 'nova-compute:cloud-compute'
- - 'nova-cloud-controller:image-service'
- 'glance:image-service'
- - 'keystone:shared-db'
- 'keystone-mysql-router:shared-db'
- - 'keystone-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:identity-service'
- 'keystone:identity-service'
- - 'glance:shared-db'
- 'glance-mysql-router:shared-db'
- - 'glance-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'glance:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-gateway:amqp'
- 'rabbitmq-server:amqp'
- - 'nova-cloud-controller:quantum-network-service'
- 'neutron-gateway:quantum-network-service'
- - 'neutron-api:shared-db'
- 'neutron-api-mysql-router:shared-db'
- - 'neutron-api-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'neutron-api:amqp'
- 'rabbitmq-server:amqp'
- - 'neutron-api:neutron-api'
- 'nova-cloud-controller:neutron-api'
- - 'neutron-api:identity-service'
- 'keystone:identity-service'
- - 'nova-compute:neutron-plugin'
- 'neutron-openvswitch:neutron-plugin'
- - 'rabbitmq-server:amqp'
- 'neutron-openvswitch:amqp'
- - 'placement:shared-db'
- 'placement-mysql-router:shared-db'
- - 'placement-mysql-router:db-router'
- 'mysql-innodb-cluster:db-router'
- - 'placement:identity-service'
- 'keystone:identity-service'
- - 'placement:placement'
- 'nova-cloud-controller:placement'

View File

@ -21,6 +21,15 @@ machines:
'9':
'10':
constraints: mem=4096M cores=4
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
'19':
applications:
@ -45,6 +54,32 @@ applications:
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: '10G'
options:
source: *openstack-origin
to:
- '11'
- '12'
- '13'
- '14'
- '15'
- '16'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '17'
- '18'
- '19'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
@ -121,10 +156,20 @@ applications:
enable-live-migration: false
aa-profile-mode: enforce
debug: true
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
libvirt-image-backend: rbd
to:
- '10'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - 'nova-compute:image-service'
- 'glance:image-service'

View File

@ -21,6 +21,15 @@ machines:
'9':
'10':
constraints: mem=4096M cores=4
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
'19':
applications:
@ -45,6 +54,32 @@ applications:
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: '10G'
options:
source: *openstack-origin
to:
- '11'
- '12'
- '13'
- '14'
- '15'
- '16'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '17'
- '18'
- '19'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
@ -121,10 +156,20 @@ applications:
enable-live-migration: false
aa-profile-mode: enforce
debug: true
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
libvirt-image-backend: rbd
to:
- '10'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - 'nova-compute:image-service'
- 'glance:image-service'

View File

@ -21,6 +21,15 @@ machines:
'9':
'10':
constraints: mem=4096M cores=4
'11':
'12':
'13':
'14':
'15':
'16':
'17':
'18':
'19':
applications:
@ -45,6 +54,32 @@ applications:
- '1'
- '2'
ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd
num_units: 6
storage:
osd-devices: '10G'
options:
source: *openstack-origin
to:
- '11'
- '12'
- '13'
- '14'
- '15'
- '16'
ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon
num_units: 3
options:
source: *openstack-origin
monitor-count: '3'
to:
- '17'
- '18'
- '19'
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
@ -121,10 +156,20 @@ applications:
enable-live-migration: false
aa-profile-mode: enforce
debug: true
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2
libvirt-image-backend: rbd
to:
- '10'
relations:
- - 'ceph-osd:mon'
- 'ceph-mon:osd'
- - 'nova-compute:ceph'
- 'ceph-mon:client'
- - 'nova-compute:image-service'
- 'glance:image-service'

View File

@ -4,9 +4,8 @@ smoke_bundles:
- bionic-train
gate_bundles:
- erasure-coded: focal-ussuri-ec
- focal-victoria
- focal-ussuri
- ceph: focal-ussuri
- ceph: focal-victoria
- bionic-ussuri
- bionic-train
- bionic-stein
@ -19,10 +18,10 @@ gate_bundles:
- trusty-mitaka
dev_bundles:
- groovy-victoria
- ceph: groovy-victoria
configure:
- erasure-coded:
- ceph:
- zaza.openstack.charm_tests.glance.setup.add_cirros_image
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
@ -37,12 +36,13 @@ configure:
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
tests:
- erasure-coded:
- ceph:
- zaza.openstack.charm_tests.nova.tests.CirrosGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.LTSGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.NovaCompute
- zaza.openstack.charm_tests.nova.tests.SecurityTests
- zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes
- zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation
- zaza.openstack.charm_tests.nova.tests.CirrosGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.LTSGuestCreateTest
- zaza.openstack.charm_tests.nova.tests.NovaCompute

View File

@ -496,6 +496,10 @@ class NovaComputeRelationsTests(CharmTestCase):
create_libvirt_secret.assert_called_once_with(
secret_file='/etc/ceph/secret.xml', key=key,
secret_uuid=hooks.CEPH_SECRET_UUID)
# confirm exception is caught
_handle_ceph_request.side_effect = ValueError
hooks.ceph_changed()
self.log.assert_called_once()
@patch.object(hooks, 'get_ceph_request')
@patch.object(hooks, 'get_request_states')
@ -644,13 +648,15 @@ class NovaComputeRelationsTests(CharmTestCase):
self.assertIsNone(unit)
self.assertIsNone(rid)
@patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
@patch('uuid.uuid1')
def test_get_ceph_request(self, uuid1, mock_create_pool,
mock_request_access):
mock_request_access,
mock_bluestore_compression):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
@ -662,13 +668,15 @@ class NovaComputeRelationsTests(CharmTestCase):
mock_request_access.assert_not_called()
self.assertEqual(expected, result)
@patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
'.add_op_create_replicated_pool')
@patch('uuid.uuid1')
def test_get_ceph_request_rbd(self, uuid1, mock_create_pool,
mock_request_access):
mock_request_access,
mock_bluestore_compression):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
@ -682,7 +690,18 @@ class NovaComputeRelationsTests(CharmTestCase):
group='vms', app_name='rbd')
mock_request_access.assert_not_called()
self.assertEqual(expected, result)
# confirm operation with bluestore compression
mock_create_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
hooks.get_ceph_request()
mock_create_pool.assert_called_once_with(name='nova', replica_count=3,
weight=28, group='vms',
app_name='rbd',
compression_mode='fake')
@patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_erasure_pool')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
@ -695,7 +714,8 @@ class NovaComputeRelationsTests(CharmTestCase):
def test_get_ceph_request_rbd_ec(self, uuid1, mock_create_pool,
mock_request_access,
mock_create_erasure_profile,
mock_create_erasure_pool):
mock_create_erasure_pool,
mock_bluestore_compression):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
@ -738,14 +758,31 @@ class NovaComputeRelationsTests(CharmTestCase):
)
mock_request_access.assert_not_called()
self.assertEqual(expected, result)
# confirm operation with bluestore compression
mock_create_erasure_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
hooks.get_ceph_request()
mock_create_erasure_pool.assert_called_with(
name='nova',
erasure_profile='nova-profile',
weight=27.72,
group='vms',
app_name='rbd',
allow_ec_overwrites=True,
compression_mode='fake',
)
@patch.object(hooks.ch_context, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
'.add_op_create_replicated_pool')
@patch('uuid.uuid1')
def test_get_ceph_request_perms(self, uuid1, mock_create_pool,
mock_request_access):
mock_request_access,
mock_bluestore_compression):
self.assert_libvirt_rbd_imagebackend_allowed.return_value = True
self.test_config.set('rbd-pool', 'nova')
self.test_config.set('ceph-osd-replication-count', 3)
@ -770,6 +807,16 @@ class NovaComputeRelationsTests(CharmTestCase):
permission='rwx'),
])
self.assertEqual(expected, result)
# confirm operation with bluestore compression
mock_create_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
hooks.get_ceph_request()
mock_create_pool.assert_called_once_with(name='nova', replica_count=3,
weight=28, group='vms',
app_name='rbd',
compression_mode='fake')
@patch.object(hooks, 'service_restart_handler')
@patch.object(hooks, 'CONFIGS')

View File

@ -899,7 +899,9 @@ class NovaComputeUtilsTests(CharmTestCase):
utils.assess_status_func('test-config')
# ports=None whilst port checks are disabled.
make_assess_status_func.assert_called_once_with(
'test-config', test_interfaces, services=['s1'], ports=None)
'test-config', test_interfaces,
charm_func=utils.check_optional_config_and_relations,
services=['s1'], ports=None)
def test_pause_unit_helper(self):
with patch.object(utils, '_pause_resume_helper') as prh: