Sync libraries & common files prior to freeze

* charm-helpers sync for classic charms
* charms.ceph sync for ceph charms
* rebuild for reactive charms
* sync tox.ini files as needed
* sync requirements.txt files to sync to standard

Change-Id: I28bc6125b817c2f1608157509c27d54dab9541a5
This commit is contained in:
Alex Kavanagh 2020-09-26 18:27:02 +01:00
parent c6661bc9dc
commit 494908cee7
12 changed files with 1138 additions and 455 deletions

View File

@ -56,5 +56,6 @@ def openstack_upgrade():
CONFIGS)):
config_changed()
if __name__ == '__main__':
openstack_upgrade()

View File

@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
import six
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
_config_ini as config_ini
)
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
status_set,
network_get_primary_address,
WARNING,
service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if config('pool-type') and config('pool-type') == 'erasure-coded':
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
if not base_pool_name:
base_pool_name = service_name()
ctxt['rbd_default_data_pool'] = base_pool_name
if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph')
@ -3175,3 +3184,90 @@ class SRIOVContext(OSContextGenerator):
:rtype: Dict[str,int]
"""
return self._map
class CephBlueStoreCompressionContext(OSContextGenerator):
"""Ceph BlueStore compression options."""
# Tuple with Tuples that map configuration option name to CephBrokerRq op
# property name
options = (
('bluestore-compression-algorithm',
'compression-algorithm'),
('bluestore-compression-mode',
'compression-mode'),
('bluestore-compression-required-ratio',
'compression-required-ratio'),
('bluestore-compression-min-blob-size',
'compression-min-blob-size'),
('bluestore-compression-min-blob-size-hdd',
'compression-min-blob-size-hdd'),
('bluestore-compression-min-blob-size-ssd',
'compression-min-blob-size-ssd'),
('bluestore-compression-max-blob-size',
'compression-max-blob-size'),
('bluestore-compression-max-blob-size-hdd',
'compression-max-blob-size-hdd'),
('bluestore-compression-max-blob-size-ssd',
'compression-max-blob-size-ssd'),
)
def __init__(self):
"""Initialize context by loading values from charm config.
We keep two maps, one suitable for use with CephBrokerRq's and one
suitable for template generation.
"""
charm_config = config()
# CephBrokerRq op map
self.op = {}
# Context exposed for template generation
self.ctxt = {}
for config_key, op_key in self.options:
value = charm_config.get(config_key)
self.ctxt.update({config_key.replace('-', '_'): value})
self.op.update({op_key: value})
def __call__(self):
"""Get context.
:returns: Context
:rtype: Dict[str,any]
"""
return self.ctxt
def get_op(self):
"""Get values for use in CephBrokerRq op.
:returns: Context values with CephBrokerRq op property name as key.
:rtype: Dict[str,any]
"""
return self.op
def get_kwargs(self):
"""Get values for use as keyword arguments.
:returns: Context values with key suitable for use as kwargs to
CephBrokerRq add_op_create_*_pool methods.
:rtype: Dict[str,any]
"""
return {
k.replace('-', '_'): v
for k, v in self.op.items()
}
def validate(self):
"""Validate options.
:raises: AssertionError
"""
# We slip in a dummy name on class instantiation to allow validation of
# the other options. It will not affect further use.
#
# NOTE: once we retire Python 3.5 we can fold this into a in-line
# dictionary comprehension in the call to the initializer.
dummy_op = {'name': 'dummy-name'}
dummy_op.update(self.op)
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
pool.validate()

View File

@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
{{ key }} = {{ value }}
{% endfor -%}
{%- endif %}
{% if rbd_default_data_pool -%}
rbd default data pool = {{ rbd_default_data_pool }}
{% endif %}

View File

@ -0,0 +1,28 @@
{# section header omitted as options can belong to multiple sections #}
{% if bluestore_compression_algorithm -%}
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
{% endif -%}
{% if bluestore_compression_mode -%}
bluestore compression mode = {{ bluestore_compression_mode }}
{% endif -%}
{% if bluestore_compression_required_ratio -%}
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
{% endif -%}
{% if bluestore_compression_min_blob_size -%}
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
{% endif -%}
{% if bluestore_compression_min_blob_size_hdd -%}
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_min_blob_size_ssd -%}
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
{% endif -%}
{% if bluestore_compression_max_blob_size -%}
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
{% endif -%}
{% if bluestore_compression_max_blob_size_hdd -%}
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_max_blob_size_ssd -%}
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
{% endif -%}

File diff suppressed because it is too large Load Diff

View File

@ -368,7 +368,7 @@ def get_mount_point(device):
def find_block_devices(include_mounted=False):
found = []
incl = ['sd[a-z]', 'vd[a-z]', 'cciss\/c[0-9]d[0-9]']
incl = ['sd[a-z]', 'vd[a-z]', r'cciss\/c[0-9]d[0-9]']
with open('/proc/partitions') as proc:
partitions = [p.split() for p in proc.readlines()[2:]]

View File

@ -7,6 +7,7 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -7,10 +7,11 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
charm-tools>=2.4.4
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
flake8>=2.2.4
stestr>=2.2.0
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)

View File

@ -116,5 +116,5 @@ commands =
functest-run-suite --keep-model --bundle {posargs}
[flake8]
ignore = E402,E226
ignore = E402,E226,W503,W504
exclude = */charmhelpers

View File

@ -25,6 +25,7 @@ def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_actions)
_add_path(_hooks)
_add_path(_lib)

View File

@ -171,7 +171,10 @@ class SwiftStorageRelationsTests(CharmTestCase):
@patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None)
@patch.object(hooks, 'ensure_devs_tracked')
def test_upgrade_charm(self, mock_ensure_devs_tracked):
@patch.object(hooks, 'get_os_codename_install_source')
def test_upgrade_charm(self, mock_get_os_codename_install_source,
mock_ensure_devs_tracked):
mock_get_os_codename_install_source.return_value = 'stein'
hooks.upgrade_charm()
self.apt_install.assert_called_with(
['gdisk', 'lvm2', 'swift', 'swift-account',

View File

@ -110,7 +110,6 @@ TARGET SOURCE FSTYPE OPTIONS
"""
class SwiftStorageUtilsTests(CharmTestCase):
def setUp(self):
@ -157,7 +156,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
'-O', '/etc/swift/%s.ring.gz' % s])
wgets.append(_c)
self.assertEqual(wgets, self.check_call.call_args_list)
except:
except Exception:
shutil.rmtree(swift_utils.SWIFT_CONF_DIR)
def test_determine_block_device_no_config(self):
@ -367,11 +366,12 @@ class SwiftStorageUtilsTests(CharmTestCase):
call('/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe',
group='swift', owner='swift')
])
self.assertEqual(self.test_kv.get('prepared-devices'),
['/dev/mapper/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe'])
self.assertEqual(
self.test_kv.get('prepared-devices'),
['/dev/mapper/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe'])
mock_vaultlocker.write_vaultlocker_conf.assert_called_with(
'test_context',
priority=90
'test_context',
priority=90
)
@patch.object(swift_utils, "uuid")
@ -414,7 +414,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
def test_find_block_devices_real_world(self):
self.is_block_device.return_value = True
side_effect = lambda x: x in ["/dev/sdb", "/dev/sdb1"] # flake8: noqa
side_effect = lambda x: x in ["/dev/sdb", "/dev/sdb1"] # noqa
self.is_device_mounted.side_effect = side_effect
with patch_open() as (_open, _file):
_file.read.return_value = REAL_WORLD_PARTITIONS
@ -527,12 +527,13 @@ class SwiftStorageUtilsTests(CharmTestCase):
'bind_host_context',
'worker_context',
'vl_context']),
call('/etc/swift/container-server/container-server-replicator.conf',
[
'swift_context',
'bind_host_context',
'worker_context',
'vl_context']),
call('/etc/swift/container-server/'
'container-server-replicator.conf',
[
'swift_context',
'bind_host_context',
'worker_context',
'vl_context']),
call(
'/etc/swift/object-server/object-server-replicator.conf',
[
@ -543,7 +544,6 @@ class SwiftStorageUtilsTests(CharmTestCase):
]
self.assertEqual(sorted(ex), sorted(configs.register.call_args_list))
@patch.object(swift_utils, 'remove_old_packages')
def test_do_upgrade_queens(self, mock_remove_old_packages):
self.is_paused.return_value = False
@ -663,6 +663,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
mock_Fstab):
FstabEntry = namedtuple('FstabEntry', ['mountpoint', 'device'])
class MockFstab(object):
def __init__(self):
@ -720,8 +721,8 @@ class SwiftStorageUtilsTests(CharmTestCase):
port = '80'
self.ufw.grant_access = MagicMock()
swift_utils.grant_access(addr, port)
self.ufw.grant_access.assert_called_with(addr, port=port, index=1, proto='tcp')
self.ufw.grant_access.assert_called_with(
addr, port=port, index=1, proto='tcp')
def test_revoke_access(self):
addr = '10.1.1.1'
@ -736,17 +737,17 @@ class SwiftStorageUtilsTests(CharmTestCase):
def test_setup_ufw(self, mock_grant_access, mock_rsync, mock_get_host_ip):
peer_addr_1 = '10.1.1.1'
peer_addr_2 = '10.1.1.2'
client_addrs = ['10.3.3.1', '10.3.3.2','10.3.3.3', 'ubuntu.com']
client_addrs = ['10.3.3.1', '10.3.3.2', '10.3.3.3', 'ubuntu.com']
ports = [6660, 6661, 6662]
self.test_config.set('object-server-port', ports[0])
self.test_config.set('container-server-port', ports[1])
self.test_config.set('account-server-port', ports[2])
RelatedUnits = namedtuple('RelatedUnits', 'rid, unit')
self.iter_units_for_relation_name.return_value = [
RelatedUnits(rid='rid:1', unit='unit/1'),
RelatedUnits(rid='rid:1', unit='unit/2'),
RelatedUnits(rid='rid:1', unit='unit/3'),
RelatedUnits(rid='rid:1', unit='unit/4')]
RelatedUnits(rid='rid:1', unit='unit/1'),
RelatedUnits(rid='rid:1', unit='unit/2'),
RelatedUnits(rid='rid:1', unit='unit/3'),
RelatedUnits(rid='rid:1', unit='unit/4')]
self.ingress_address.side_effect = client_addrs
context_call = MagicMock()
context_call.return_value = {'allowed_hosts': '{} {}'