Unpin flake8, fix lint

We need to unpin flake8 to allow linting on Python 3.8 systems.

Sync charms.ceph.

Depends-On: Idf2fea27b19cec47ffed9891b518ac7b5b75e405
Change-Id: I91a2133b6d7dc7e59d62dd80779cdc6e77206a20
This commit is contained in:
Frode Nordahl
2020-03-12 10:55:47 +01:00
parent 9fb21f1840
commit 969e6e0cae
6 changed files with 25 additions and 19 deletions

View File

@@ -134,7 +134,7 @@ def ensure_host_resolvable_v6(hostname):
with open(tmp_hosts, 'a+') as fd:
lines = fd.readlines()
for line in lines:
key = "^%s\s+" % (host_addr)
key = r"^%s\s+" % (host_addr)
if re.search(key, line):
break
else:

View File

@@ -177,7 +177,7 @@ def check_optional_relations(configs):
if relation_ids('ha'):
try:
get_hacluster_config()
except:
except Exception:
return ('blocked',
'hacluster missing configuration: '
'vip, vip_iface, vip_cidr')

View File

@@ -160,9 +160,10 @@ def handle_create_erasure_profile(request, service):
# "host" | "rack" or it defaults to "host" # Any valid Ceph bucket
failure_domain = request.get('failure-domain')
name = request.get('name')
k = request.get('k')
m = request.get('m')
l = request.get('l')
# Binary Distribution Matrix (BDM) parameters
bdm_k = request.get('k')
bdm_m = request.get('m')
bdm_l = request.get('l')
if failure_domain not in CEPH_BUCKET_TYPES:
msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
@@ -171,7 +172,8 @@ def handle_create_erasure_profile(request, service):
create_erasure_profile(service=service, erasure_plugin_name=erasure_type,
profile_name=name, failure_domain=failure_domain,
data_chunks=k, coding_chunks=m, locality=l)
data_chunks=bdm_k, coding_chunks=bdm_m,
locality=bdm_l)
def handle_add_permissions_to_key(request, service):
@@ -556,7 +558,7 @@ def handle_set_pool_value(request, service):
# Get the validation method
validator_params = POOL_KEYS[params['key']]
if len(validator_params) is 1:
if len(validator_params) == 1:
# Validate that what the user passed is actually legal per Ceph's rules
validator(params['value'], validator_params[0])
else:

View File

@@ -637,7 +637,7 @@ def _get_osd_num_from_dirname(dirname):
:raises ValueError: if the osd number cannot be parsed from the provided
directory name.
"""
match = re.search('ceph-(?P<osd_id>\d+)', dirname)
match = re.search(r'ceph-(?P<osd_id>\d+)', dirname)
if not match:
raise ValueError("dirname not in correct format: {}".format(dirname))
@@ -706,7 +706,7 @@ def get_version():
package = "ceph"
try:
pkg = cache[package]
except:
except KeyError:
# the package is unknown to the current apt cache.
e = 'Could not determine version of package with no installation ' \
'candidate: %s' % package
@@ -721,7 +721,7 @@ def get_version():
# x.y match only for 20XX.X
# and ignore patch level for other packages
match = re.match('^(\d+)\.(\d+)', vers)
match = re.match(r'^(\d+)\.(\d+)', vers)
if match:
vers = match.group(0)
@@ -956,11 +956,11 @@ def start_osds(devices):
rescan_osd_devices()
if (cmp_pkgrevno('ceph', '0.56.6') >= 0 and
cmp_pkgrevno('ceph', '14.2.0') < 0):
# Use ceph-disk activate for directory based OSD's
for dev_or_path in devices:
if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
subprocess.check_call(
['ceph-disk', 'activate', dev_or_path])
# Use ceph-disk activate for directory based OSD's
for dev_or_path in devices:
if os.path.exists(dev_or_path) and os.path.isdir(dev_or_path):
subprocess.check_call(
['ceph-disk', 'activate', dev_or_path])
def udevadm_settle():
@@ -978,6 +978,7 @@ def rescan_osd_devices():
udevadm_settle()
_client_admin_keyring = '/etc/ceph/ceph.client.admin.keyring'
@@ -1002,6 +1003,7 @@ def generate_monitor_secret():
return "{}==".format(res.split('=')[1].strip())
# OSD caps taken from ceph-create-keys
_osd_bootstrap_caps = {
'mon': [
@@ -1039,7 +1041,7 @@ def get_osd_bootstrap_key():
# Attempt to get/create a key using the OSD bootstrap profile first
key = get_named_key('bootstrap-osd',
_osd_bootstrap_caps_profile)
except:
except Exception:
# If that fails try with the older style permissions
key = get_named_key('bootstrap-osd',
_osd_bootstrap_caps)
@@ -1063,6 +1065,7 @@ def import_radosgw_key(key):
]
subprocess.check_call(cmd)
# OSD caps taken from ceph-create-keys
_radosgw_caps = {
'mon': ['allow rw'],
@@ -1299,7 +1302,7 @@ def bootstrap_monitor_cluster(secret):
path,
done,
init_marker)
except:
except Exception:
raise
finally:
os.unlink(keyring)
@@ -2789,6 +2792,7 @@ def dirs_need_ownership_update(service):
# All child directories had the expected ownership
return False
# A dict of valid ceph upgrade paths. Mapping is old -> new
UPGRADE_PATHS = collections.OrderedDict([
('firefly', 'hammer'),

View File

@@ -10,7 +10,7 @@
charm-tools>=2.4.4
requests>=2.18.4
mock>=1.2
flake8>=2.2.4,<=2.4.1
flake8>=2.2.4
stestr>=2.2.0
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)

View File

@@ -111,5 +111,5 @@ commands =
functest-run-suite --keep-model --bundle {posargs}
[flake8]
ignore = E402,E226
ignore = E402,E226,W504
exclude = */charmhelpers