Sync helpers for 20.05
Change-Id: I99adb66fa9ff58a97f7cdf42ffecb44d8e4dd2f6
This commit is contained in:
parent
d9be6cc63a
commit
e5166a2c03
2
Makefile
2
Makefile
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/make
|
#!/usr/bin/make
|
||||||
PYTHON := /usr/bin/env python
|
PYTHON := /usr/bin/env python3
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
@tox -e pep8
|
@tox -e pep8
|
||||||
|
@ -140,7 +140,7 @@ class OVNClusterStatus(object):
|
|||||||
return self.leader == 'self'
|
return self.leader == 'self'
|
||||||
|
|
||||||
|
|
||||||
def cluster_status(target, schema=None, use_ovs_appctl=False):
|
def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None):
|
||||||
"""Retrieve status information from clustered OVSDB.
|
"""Retrieve status information from clustered OVSDB.
|
||||||
|
|
||||||
:param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can
|
:param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can
|
||||||
@ -151,6 +151,8 @@ def cluster_status(target, schema=None, use_ovs_appctl=False):
|
|||||||
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
|
:param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03,
|
||||||
set this to True to use ``ovs-appctl`` instead.
|
set this to True to use ``ovs-appctl`` instead.
|
||||||
:type use_ovs_appctl: bool
|
:type use_ovs_appctl: bool
|
||||||
|
:param rundir: Override path to sockets
|
||||||
|
:type rundir: Optional[str]
|
||||||
:returns: cluster status data object
|
:returns: cluster status data object
|
||||||
:rtype: OVNClusterStatus
|
:rtype: OVNClusterStatus
|
||||||
:raises: subprocess.CalledProcessError, KeyError, RuntimeError
|
:raises: subprocess.CalledProcessError, KeyError, RuntimeError
|
||||||
@ -164,8 +166,9 @@ def cluster_status(target, schema=None, use_ovs_appctl=False):
|
|||||||
|
|
||||||
status = {}
|
status = {}
|
||||||
k = ''
|
k = ''
|
||||||
for line in ovn_appctl(target, 'cluster/status',
|
for line in ovn_appctl(target,
|
||||||
schema or schema_map[target],
|
('cluster/status', schema or schema_map[target]),
|
||||||
|
rundir=rundir,
|
||||||
use_ovs_appctl=use_ovs_appctl).splitlines():
|
use_ovs_appctl=use_ovs_appctl).splitlines():
|
||||||
if k and line.startswith(' '):
|
if k and line.startswith(' '):
|
||||||
# there is no key which means this is a instance of a multi-line/
|
# there is no key which means this is a instance of a multi-line/
|
||||||
@ -222,7 +225,7 @@ def is_northd_active():
|
|||||||
:rtype: bool
|
:rtype: bool
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
for line in ovn_appctl('ovn-northd', 'status').splitlines():
|
for line in ovn_appctl('ovn-northd', ('status',)).splitlines():
|
||||||
if line.startswith('Status:') and 'active' in line:
|
if line.startswith('Status:') and 'active' in line:
|
||||||
return True
|
return True
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
|
@ -92,6 +92,7 @@ DEFAULT_PGS_PER_OSD_TARGET = 100
|
|||||||
DEFAULT_POOL_WEIGHT = 10.0
|
DEFAULT_POOL_WEIGHT = 10.0
|
||||||
LEGACY_PG_COUNT = 200
|
LEGACY_PG_COUNT = 200
|
||||||
DEFAULT_MINIMUM_PGS = 2
|
DEFAULT_MINIMUM_PGS = 2
|
||||||
|
AUTOSCALER_DEFAULT_PGS = 32
|
||||||
|
|
||||||
|
|
||||||
class OsdPostUpgradeError(Exception):
|
class OsdPostUpgradeError(Exception):
|
||||||
@ -399,16 +400,28 @@ class ReplicatedPool(Pool):
|
|||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if not pool_exists(self.service, self.name):
|
if not pool_exists(self.service, self.name):
|
||||||
|
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||||
# Create it
|
# Create it
|
||||||
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
|
if nautilus_or_later:
|
||||||
self.name, str(self.pg_num)]
|
cmd = [
|
||||||
|
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
'--pg-num-min={}'.format(
|
||||||
|
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
|
||||||
|
),
|
||||||
|
self.name, str(self.pg_num)
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
cmd = [
|
||||||
|
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
self.name, str(self.pg_num)
|
||||||
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
# Set the pool replica size
|
# Set the pool replica size
|
||||||
update_pool(client=self.service,
|
update_pool(client=self.service,
|
||||||
pool=self.name,
|
pool=self.name,
|
||||||
settings={'size': str(self.replicas)})
|
settings={'size': str(self.replicas)})
|
||||||
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
|
||||||
if nautilus_or_later:
|
if nautilus_or_later:
|
||||||
# Ensure we set the expected pool ratio
|
# Ensure we set the expected pool ratio
|
||||||
update_pool(client=self.service,
|
update_pool(client=self.service,
|
||||||
@ -466,10 +479,24 @@ class ErasurePool(Pool):
|
|||||||
k = int(erasure_profile['k'])
|
k = int(erasure_profile['k'])
|
||||||
m = int(erasure_profile['m'])
|
m = int(erasure_profile['m'])
|
||||||
pgs = self.get_pgs(k + m, self.percent_data)
|
pgs = self.get_pgs(k + m, self.percent_data)
|
||||||
|
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
||||||
# Create it
|
# Create it
|
||||||
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
|
if nautilus_or_later:
|
||||||
self.name, str(pgs), str(pgs),
|
cmd = [
|
||||||
'erasure', self.erasure_code_profile]
|
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
'--pg-num-min={}'.format(
|
||||||
|
min(AUTOSCALER_DEFAULT_PGS, pgs)
|
||||||
|
),
|
||||||
|
self.name, str(pgs), str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
cmd = [
|
||||||
|
'ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
self.name, str(pgs), str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile
|
||||||
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
try:
|
try:
|
||||||
@ -478,7 +505,6 @@ class ErasurePool(Pool):
|
|||||||
name=self.app_name)
|
name=self.app_name)
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
||||||
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
|
|
||||||
if nautilus_or_later:
|
if nautilus_or_later:
|
||||||
# Ensure we set the expected pool ratio
|
# Ensure we set the expected pool ratio
|
||||||
update_pool(client=self.service,
|
update_pool(client=self.service,
|
||||||
|
@ -1,6 +1,12 @@
|
|||||||
# The order of packages is significant, because pip processes them in the order
|
# This file is managed centrally by release-tools and should not be modified
|
||||||
# of appearance. Changing the order has an impact on the overall integration
|
# within individual charm repos. See the 'global' dir contents for available
|
||||||
# process, which may cause wedges in the gate later.
|
# choices of *requirements.txt files for OpenStack Charms:
|
||||||
|
# https://github.com/openstack-charmers/release-tools
|
||||||
|
#
|
||||||
|
# TODO: Distill the func test requirements from the lint/unit test
|
||||||
|
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||||
|
# all of its own requirements and if it doesn't, fix it there.
|
||||||
|
#
|
||||||
pbr>=1.8.0,<1.9.0
|
pbr>=1.8.0,<1.9.0
|
||||||
simplejson>=2.2.0
|
simplejson>=2.2.0
|
||||||
netifaces>=0.10.4
|
netifaces>=0.10.4
|
||||||
|
5
tox.ini
5
tox.ini
@ -41,6 +41,11 @@ basepython = python3.7
|
|||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/requirements.txt
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
|
[testenv:py38]
|
||||||
|
basepython = python3.8
|
||||||
|
deps = -r{toxinidir}/requirements.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
|
||||||
[testenv:py3]
|
[testenv:py3]
|
||||||
basepython = python3
|
basepython = python3
|
||||||
deps = -r{toxinidir}/requirements.txt
|
deps = -r{toxinidir}/requirements.txt
|
||||||
|
Loading…
Reference in New Issue
Block a user