Sync libraries & common files prior to freeze

* charm-helpers sync for classic charms
* charms.ceph sync for ceph charms
* rebuild for reactive charms
* sync tox.ini files as needed
* sync requirements.txt files to sync to standard

Change-Id: I04b35ca8ba26c04d30a9d017b56cc700365c66a5
This commit is contained in:
Alex Kavanagh 2020-09-26 18:27:01 +01:00
parent 6ba782f61d
commit 07212a211b
6 changed files with 1110 additions and 434 deletions

View File

@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
import six import six
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
_config_ini as config_ini _config_ini as config_ini
) )
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
status_set, status_set,
network_get_primary_address, network_get_primary_address,
WARNING, WARNING,
service_name,
) )
from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.sysctl import create as sysctl_create
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if config('pool-type') and config('pool-type') == 'erasure-coded':
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
if not base_pool_name:
base_pool_name = service_name()
ctxt['rbd_default_data_pool'] = base_pool_name
if not os.path.isdir('/etc/ceph'): if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph') os.mkdir('/etc/ceph')
@ -3175,3 +3184,90 @@ class SRIOVContext(OSContextGenerator):
:rtype: Dict[str,int] :rtype: Dict[str,int]
""" """
return self._map return self._map
class CephBlueStoreCompressionContext(OSContextGenerator):
"""Ceph BlueStore compression options."""
# Tuple with Tuples that map configuration option name to CephBrokerRq op
# property name
options = (
('bluestore-compression-algorithm',
'compression-algorithm'),
('bluestore-compression-mode',
'compression-mode'),
('bluestore-compression-required-ratio',
'compression-required-ratio'),
('bluestore-compression-min-blob-size',
'compression-min-blob-size'),
('bluestore-compression-min-blob-size-hdd',
'compression-min-blob-size-hdd'),
('bluestore-compression-min-blob-size-ssd',
'compression-min-blob-size-ssd'),
('bluestore-compression-max-blob-size',
'compression-max-blob-size'),
('bluestore-compression-max-blob-size-hdd',
'compression-max-blob-size-hdd'),
('bluestore-compression-max-blob-size-ssd',
'compression-max-blob-size-ssd'),
)
def __init__(self):
"""Initialize context by loading values from charm config.
We keep two maps, one suitable for use with CephBrokerRq's and one
suitable for template generation.
"""
charm_config = config()
# CephBrokerRq op map
self.op = {}
# Context exposed for template generation
self.ctxt = {}
for config_key, op_key in self.options:
value = charm_config.get(config_key)
self.ctxt.update({config_key.replace('-', '_'): value})
self.op.update({op_key: value})
def __call__(self):
"""Get context.
:returns: Context
:rtype: Dict[str,any]
"""
return self.ctxt
def get_op(self):
"""Get values for use in CephBrokerRq op.
:returns: Context values with CephBrokerRq op property name as key.
:rtype: Dict[str,any]
"""
return self.op
def get_kwargs(self):
"""Get values for use as keyword arguments.
:returns: Context values with key suitable for use as kwargs to
CephBrokerRq add_op_create_*_pool methods.
:rtype: Dict[str,any]
"""
return {
k.replace('-', '_'): v
for k, v in self.op.items()
}
def validate(self):
"""Validate options.
:raises: AssertionError
"""
# We slip in a dummy name on class instantiation to allow validation of
# the other options. It will not affect further use.
#
# NOTE: once we retire Python 3.5 we can fold this into a in-line
# dictionary comprehension in the call to the initializer.
dummy_op = {'name': 'dummy-name'}
dummy_op.update(self.op)
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
pool.validate()

View File

@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
{{ key }} = {{ value }} {{ key }} = {{ value }}
{% endfor -%} {% endfor -%}
{%- endif %} {%- endif %}
{% if rbd_default_data_pool -%}
rbd default data pool = {{ rbd_default_data_pool }}
{% endif %}

View File

@ -0,0 +1,28 @@
{# section header omitted as options can belong to multiple sections #}
{% if bluestore_compression_algorithm -%}
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
{% endif -%}
{% if bluestore_compression_mode -%}
bluestore compression mode = {{ bluestore_compression_mode }}
{% endif -%}
{% if bluestore_compression_required_ratio -%}
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
{% endif -%}
{% if bluestore_compression_min_blob_size -%}
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
{% endif -%}
{% if bluestore_compression_min_blob_size_hdd -%}
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_min_blob_size_ssd -%}
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
{% endif -%}
{% if bluestore_compression_max_blob_size -%}
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
{% endif -%}
{% if bluestore_compression_max_blob_size_hdd -%}
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_max_blob_size_ssd -%}
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
{% endif -%}

File diff suppressed because it is too large Load Diff

View File

@ -113,8 +113,8 @@ def create_private_key(user, priv_key_path, key_type='rsa'):
check_call(cmd) check_call(cmd)
else: else:
log('SSH key already exists at %s.' % priv_key_path) log('SSH key already exists at %s.' % priv_key_path)
check_call(['chown', user, priv_key_path]) os.chown(priv_key_path, pwd.getpwnam(user).pw_uid, -1)
check_call(['chmod', '0600', priv_key_path]) os.chmod(priv_key_path, 0o600)
def create_public_key(user, priv_key_path, pub_key_path): def create_public_key(user, priv_key_path, pub_key_path):
@ -124,7 +124,7 @@ def create_public_key(user, priv_key_path, pub_key_path):
p = check_output(cmd).strip() p = check_output(cmd).strip()
with open(pub_key_path, 'wb') as out: with open(pub_key_path, 'wb') as out:
out.write(p) out.write(p)
check_call(['chown', user, pub_key_path]) os.chown(pub_key_path, pwd.getpwnam(user).pw_uid, -1)
def get_keypair(user): def get_keypair(user):
@ -157,6 +157,7 @@ def write_authorized_keys(user, keys):
with open(auth_keys, 'w') as out: with open(auth_keys, 'w') as out:
for k in keys: for k in keys:
out.write('%s\n' % k) out.write('%s\n' % k)
os.chown(auth_keys, pwd.getpwnam(user).pw_uid, -1)
def write_known_hosts(user, hosts): def write_known_hosts(user, hosts):
@ -172,6 +173,7 @@ def write_known_hosts(user, hosts):
with open(known_hosts, 'w') as out: with open(known_hosts, 'w') as out:
for host in khosts: for host in khosts:
out.write('%s\n' % host) out.write('%s\n' % host)
os.chown(known_hosts, pwd.getpwnam(user).pw_uid, -1)
def ensure_user(user, group=None): def ensure_user(user, group=None):

View File

@ -15,6 +15,5 @@ flake8>=2.2.4
stestr>=2.2.0 stestr>=2.2.0
coverage>=4.5.2 coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking) pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
juju!=2.8.3 # this version causes spurious JujuAPIError's
git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0' git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack