diff --git a/hooks/charmhelpers/contrib/network/ovs/ovn.py b/hooks/charmhelpers/contrib/network/ovs/ovn.py index 7a9de1e7..2075f11a 100644 --- a/hooks/charmhelpers/contrib/network/ovs/ovn.py +++ b/hooks/charmhelpers/contrib/network/ovs/ovn.py @@ -140,7 +140,7 @@ class OVNClusterStatus(object): return self.leader == 'self' -def cluster_status(target, schema=None, use_ovs_appctl=False): +def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None): """Retrieve status information from clustered OVSDB. :param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can @@ -151,6 +151,8 @@ def cluster_status(target, schema=None, use_ovs_appctl=False): :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, set this to True to use ``ovs-appctl`` instead. :type use_ovs_appctl: bool + :param rundir: Override path to sockets + :type rundir: Optional[str] :returns: cluster status data object :rtype: OVNClusterStatus :raises: subprocess.CalledProcessError, KeyError, RuntimeError @@ -164,8 +166,9 @@ def cluster_status(target, schema=None, use_ovs_appctl=False): status = {} k = '' - for line in ovn_appctl(target, 'cluster/status', - schema or schema_map[target], + for line in ovn_appctl(target, + ('cluster/status', schema or schema_map[target]), + rundir=rundir, use_ovs_appctl=use_ovs_appctl).splitlines(): if k and line.startswith(' '): # there is no key which means this is a instance of a multi-line/ @@ -222,7 +225,7 @@ def is_northd_active(): :rtype: bool """ try: - for line in ovn_appctl('ovn-northd', 'status').splitlines(): + for line in ovn_appctl('ovn-northd', ('status',)).splitlines(): if line.startswith('Status:') and 'active' in line: return True except subprocess.CalledProcessError: diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index eb31b782..95a0d82a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -92,6 +92,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 class OsdPostUpgradeError(Exception): @@ -399,16 +400,28 @@ class ReplicatedPool(Pool): def create(self): if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(self.pg_num)] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + try: check_call(cmd) # Set the pool replica size update_pool(client=self.service, pool=self.name, settings={'size': str(self.replicas)}) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, @@ -466,10 +479,24 @@ class ErasurePool(Pool): k = int(erasure_profile['k']) m = int(erasure_profile['m']) pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 # Create it - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', - self.name, str(pgs), str(pgs), - 'erasure', self.erasure_code_profile] + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + try: check_call(cmd) try: @@ -478,7 +505,6 @@ class ErasurePool(Pool): name=self.app_name) except CalledProcessError: log('Could not set app name for pool {}'.format(self.name, level=WARNING)) - nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 if nautilus_or_later: # Ensure we set the expected pool ratio update_pool(client=self.service, diff --git a/test-requirements.txt b/test-requirements.txt index 01df23f7..7d9c2587 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,10 +7,7 @@ # requirements. They are intertwined. Also, Zaza itself should specify # all of its own requirements and if it doesn't, fix it there. # -# NOTE(fnordahl): Revert to using charm-tools from PyPI as soon as we get a -# release with commit 2bba009 in it. -# charm-tools>=2.4.4 -git+https://github.com/juju/charm-tools.git#egg=charm-tools +charm-tools>=2.4.4 requests>=2.18.4 mock>=1.2 flake8>=2.2.4,<=2.4.1 diff --git a/tox.ini b/tox.ini index 20dbbfc5..b835733a 100644 --- a/tox.ini +++ b/tox.ini @@ -41,6 +41,11 @@ basepython = python3.7 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt