Sync libraries & common files prior to freeze

* charm-helpers sync for classic charms
* charms.ceph sync for ceph charms
* rebuild for reactive charms
* sync tox.ini files as needed
* sync requirements.txt files to sync to standard

Change-Id: I512ef8bbb52c08e782c66450b435a40a76b8a532
This commit is contained in:
Alex Kavanagh 2020-09-26 18:27:01 +01:00
parent 12682da2fc
commit 5e80697a56
8 changed files with 56 additions and 20 deletions

View File

@ -16,12 +16,12 @@ def check_ceph_status(args):
with open(args.status_file, "r") as f:
lines = f.readlines()
status_data = dict(
l.strip().split(' ', 1) for l in lines if len(l) > 1
line.strip().split(' ', 1) for line in lines if len(line) > 1
)
else:
lines = subprocess.check_output(["ceph", "status"]).split('\n')
status_data = dict(
l.strip().split(' ', 1) for l in lines if len(l) > 1
line.strip().split(' ', 1) for line in lines if len(line) > 1
)
if ('health' not in status_data or
@ -33,7 +33,7 @@ def check_ceph_status(args):
msg = 'CRITICAL: ceph health status: "{}"'.format(
status_data['health'])
raise nagios_plugin.CriticalError(msg)
osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in",
osds = re.search(r"^.*: (\d+) osds: (\d+) up, (\d+) in",
status_data['osdmap'])
if osds.group(1) > osds.group(2): # not all OSDs are "up"
msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format(

View File

@ -89,7 +89,7 @@ def get_version():
package = "ceph"
try:
pkg = cache[package]
except:
except Exception:
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation ' \
'candidate: %s' % package
@ -104,7 +104,7 @@ def get_version():
# x.y match only for 20XX.X
# and ignore patch level for other packages
match = re.match('^(\d+)\.(\d+)', vers)
match = re.match(r'^(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
@ -274,6 +274,7 @@ def generate_monitor_secret():
return "{}==".format(res.split('=')[1].strip())
# OSD caps taken from ceph-create-keys
_osd_bootstrap_caps = {
'mon': [
@ -311,7 +312,7 @@ def get_osd_bootstrap_key():
# Attempt to get/create a key using the OSD bootstrap profile first
key = get_named_key('bootstrap-osd',
_osd_bootstrap_caps_profile)
except:
except Exception:
# If that fails try with the older style permissions
key = get_named_key('bootstrap-osd',
_osd_bootstrap_caps)
@ -335,6 +336,7 @@ def import_radosgw_key(key):
]
subprocess.check_call(cmd)
# OSD caps taken from ceph-create-keys
_radosgw_caps = {
'mon': ['allow rw'],
@ -516,7 +518,7 @@ def bootstrap_monitor_cluster(secret):
service_restart('ceph-mon')
else:
service_restart('ceph-mon-all')
except:
except Exception:
raise
finally:
os.unlink(keyring)

View File

@ -750,6 +750,7 @@ def handle_create_cephfs(request, service):
"""
cephfs_name = request.get('mds_name')
data_pool = request.get('data_pool')
extra_pools = request.get('extra_pools', [])
metadata_pool = request.get('metadata_pool')
# Check if the user params were provided
if not cephfs_name or not data_pool or not metadata_pool:
@ -758,14 +759,12 @@ def handle_create_cephfs(request, service):
return {'exit-code': 1, 'stderr': msg}
# Sanity check that the required pools exist
if not pool_exists(service=service, name=data_pool):
msg = "CephFS data pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if not pool_exists(service=service, name=metadata_pool):
msg = "CephFS metadata pool does not exist. Cannot create CephFS"
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
for pool_name in [data_pool, metadata_pool] + extra_pools:
if not pool_exists(service=service, name=pool_name):
msg = "CephFS pool {} does not exist. Cannot create CephFS".format(
pool_name)
log(msg, level=ERROR)
return {'exit-code': 1, 'stderr': msg}
if get_cephfs(service=service):
# CephFS new has already been called
@ -786,6 +785,14 @@ def handle_create_cephfs(request, service):
else:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
for pool_name in extra_pools:
cmd = ["ceph", '--id', service, "fs", "add_data_pool", cephfs_name,
pool_name]
try:
check_output(cmd)
except CalledProcessError as err:
log(err.output, level=ERROR)
return {'exit-code': 1, 'stderr': err.output}
def handle_rgw_region_set(request, service):

View File

@ -41,6 +41,7 @@ from charmhelpers.core.host import (
service_stop,
CompareHostReleases,
write_file,
is_container,
)
from charmhelpers.core.hookenv import (
cached,
@ -54,8 +55,12 @@ from charmhelpers.core.hookenv import (
storage_list,
)
from charmhelpers.fetch import (
add_source,
apt_cache,
add_source, apt_install, apt_update
apt_install,
apt_purge,
apt_update,
filter_missing_packages
)
from charmhelpers.contrib.storage.linux.ceph import (
get_mon_map,
@ -85,6 +90,9 @@ PACKAGES = ['ceph', 'gdisk',
'radosgw', 'xfsprogs',
'lvm2', 'parted', 'smartmontools']
REMOVE_PACKAGES = []
CHRONY_PACKAGE = 'chrony'
CEPH_KEY_MANAGER = 'ceph'
VAULT_KEY_MANAGER = 'vault'
KEY_MANAGERS = [
@ -623,7 +631,7 @@ def _get_child_dirs(path):
OSError if an error occurs reading the directory listing
"""
if not os.path.exists(path):
raise ValueError('Specfied path "%s" does not exist' % path)
raise ValueError('Specified path "%s" does not exist' % path)
if not os.path.isdir(path):
raise ValueError('Specified path "%s" is not a directory' % path)
@ -2209,6 +2217,9 @@ def upgrade_monitor(new_version, kick_function=None):
else:
service_stop('ceph-mon-all')
apt_install(packages=determine_packages(), fatal=True)
rm_packages = determine_packages_to_remove()
if rm_packages:
apt_purge(packages=rm_packages, fatal=True)
kick_function()
owner = ceph_user()
@ -3252,6 +3263,19 @@ def determine_packages():
return packages
def determine_packages_to_remove():
"""Determines packages for removal
:returns: list of packages to be removed
"""
rm_packages = REMOVE_PACKAGES.copy()
if is_container():
install_list = filter_missing_packages(CHRONY_PACKAGE)
if not install_list:
rm_packages.append(CHRONY_PACKAGE)
return rm_packages
def bootstrap_manager():
hostname = socket.gethostname()
path = '/var/lib/ceph/mgr/ceph-{}'.format(hostname)
@ -3307,7 +3331,7 @@ def apply_osd_settings(settings):
present. Settings stop being applied on encountering an error.
:param settings: dict. Dictionary of settings to apply.
:returns: bool. True if commands ran succesfully.
:returns: bool. True if commands ran successfully.
:raises: OSDConfigSetError
"""
current_settings = {}

View File

@ -7,6 +7,7 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4

View File

@ -7,10 +7,11 @@
# requirements. They are intertwined. Also, Zaza itself should specify
# all of its own requirements and if it doesn't, fix it there.
#
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
charm-tools>=2.4.4
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
flake8>=2.2.4
stestr>=2.2.0
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)

View File

@ -116,5 +116,5 @@ commands =
functest-run-suite --keep-model --bundle {posargs}
[flake8]
ignore = E402,E226
ignore = E402,E226,W503,W504
exclude = */charmhelpers

View File

@ -13,6 +13,7 @@ def _add_path(path):
if path not in sys.path:
sys.path.insert(1, path)
_add_path(_actions)
_add_path(_hooks)
_add_path(_charmhelpers)