Sync libraries & common files prior to freeze
* charm-helpers sync for classic charms * charms.ceph sync for ceph charms * rebuild for reactive charms * sync tox.ini files as needed * sync requirements.txt files to sync to standard Change-Id: I452af2fa5a833c435b913571fd059282bbe0a1a6
This commit is contained in:
parent
c65cc751b2
commit
e869f021cb
|
@ -125,7 +125,7 @@ def diskusage(args):
|
|||
except CalledProcessError as e:
|
||||
action_set({'output': e.output})
|
||||
action_fail('Failed to run swift-recon -d')
|
||||
except:
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
|
||||
|
|
|
@ -92,5 +92,6 @@ def add_user():
|
|||
'add-user.message': message,
|
||||
})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
add_user()
|
||||
|
|
|
@ -55,5 +55,6 @@ def openstack_upgrade():
|
|||
CONFIGS)):
|
||||
config_changed()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
openstack_upgrade()
|
||||
|
|
|
@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
|
|||
|
||||
import six
|
||||
|
||||
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
|
||||
|
||||
from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
|
||||
_config_ini as config_ini
|
||||
)
|
||||
|
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
|
|||
status_set,
|
||||
network_get_primary_address,
|
||||
WARNING,
|
||||
service_name,
|
||||
)
|
||||
|
||||
from charmhelpers.core.sysctl import create as sysctl_create
|
||||
|
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
|
|||
|
||||
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
||||
|
||||
if config('pool-type') and config('pool-type') == 'erasure-coded':
|
||||
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
|
||||
if not base_pool_name:
|
||||
base_pool_name = service_name()
|
||||
ctxt['rbd_default_data_pool'] = base_pool_name
|
||||
|
||||
if not os.path.isdir('/etc/ceph'):
|
||||
os.mkdir('/etc/ceph')
|
||||
|
||||
|
@ -3175,3 +3184,90 @@ class SRIOVContext(OSContextGenerator):
|
|||
:rtype: Dict[str,int]
|
||||
"""
|
||||
return self._map
|
||||
|
||||
|
||||
class CephBlueStoreCompressionContext(OSContextGenerator):
|
||||
"""Ceph BlueStore compression options."""
|
||||
|
||||
# Tuple with Tuples that map configuration option name to CephBrokerRq op
|
||||
# property name
|
||||
options = (
|
||||
('bluestore-compression-algorithm',
|
||||
'compression-algorithm'),
|
||||
('bluestore-compression-mode',
|
||||
'compression-mode'),
|
||||
('bluestore-compression-required-ratio',
|
||||
'compression-required-ratio'),
|
||||
('bluestore-compression-min-blob-size',
|
||||
'compression-min-blob-size'),
|
||||
('bluestore-compression-min-blob-size-hdd',
|
||||
'compression-min-blob-size-hdd'),
|
||||
('bluestore-compression-min-blob-size-ssd',
|
||||
'compression-min-blob-size-ssd'),
|
||||
('bluestore-compression-max-blob-size',
|
||||
'compression-max-blob-size'),
|
||||
('bluestore-compression-max-blob-size-hdd',
|
||||
'compression-max-blob-size-hdd'),
|
||||
('bluestore-compression-max-blob-size-ssd',
|
||||
'compression-max-blob-size-ssd'),
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize context by loading values from charm config.
|
||||
|
||||
We keep two maps, one suitable for use with CephBrokerRq's and one
|
||||
suitable for template generation.
|
||||
"""
|
||||
charm_config = config()
|
||||
|
||||
# CephBrokerRq op map
|
||||
self.op = {}
|
||||
# Context exposed for template generation
|
||||
self.ctxt = {}
|
||||
for config_key, op_key in self.options:
|
||||
value = charm_config.get(config_key)
|
||||
self.ctxt.update({config_key.replace('-', '_'): value})
|
||||
self.op.update({op_key: value})
|
||||
|
||||
def __call__(self):
|
||||
"""Get context.
|
||||
|
||||
:returns: Context
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return self.ctxt
|
||||
|
||||
def get_op(self):
|
||||
"""Get values for use in CephBrokerRq op.
|
||||
|
||||
:returns: Context values with CephBrokerRq op property name as key.
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return self.op
|
||||
|
||||
def get_kwargs(self):
|
||||
"""Get values for use as keyword arguments.
|
||||
|
||||
:returns: Context values with key suitable for use as kwargs to
|
||||
CephBrokerRq add_op_create_*_pool methods.
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return {
|
||||
k.replace('-', '_'): v
|
||||
for k, v in self.op.items()
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate options.
|
||||
|
||||
:raises: AssertionError
|
||||
"""
|
||||
# We slip in a dummy name on class instantiation to allow validation of
|
||||
# the other options. It will not affect further use.
|
||||
#
|
||||
# NOTE: once we retire Python 3.5 we can fold this into a in-line
|
||||
# dictionary comprehension in the call to the initializer.
|
||||
dummy_op = {'name': 'dummy-name'}
|
||||
dummy_op.update(self.op)
|
||||
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
|
||||
pool.validate()
|
||||
|
|
|
@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
|
|||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{%- endif %}
|
||||
|
||||
{% if rbd_default_data_pool -%}
|
||||
rbd default data pool = {{ rbd_default_data_pool }}
|
||||
{% endif %}
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
{# section header omitted as options can belong to multiple sections #}
|
||||
{% if bluestore_compression_algorithm -%}
|
||||
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_mode -%}
|
||||
bluestore compression mode = {{ bluestore_compression_mode }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_required_ratio -%}
|
||||
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size -%}
|
||||
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size_hdd -%}
|
||||
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_min_blob_size_ssd -%}
|
||||
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size -%}
|
||||
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size_hdd -%}
|
||||
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
|
||||
{% endif -%}
|
||||
{% if bluestore_compression_max_blob_size_ssd -%}
|
||||
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
|
||||
{% endif -%}
|
File diff suppressed because it is too large
Load Diff
|
@ -286,7 +286,7 @@ def get_read_affinity():
|
|||
"""
|
||||
if config('read-affinity'):
|
||||
read_affinity = config('read-affinity')
|
||||
pattern = re.compile("^r\d+z?(\d+)?=\d+(,\s?r\d+z?(\d+)?=\d+)*$")
|
||||
pattern = re.compile(r"^r\d+z?(\d+)?=\d+(,\s?r\d+z?(\d+)?=\d+)*$")
|
||||
if not pattern.match(read_affinity):
|
||||
msg = "'read-affinity' config option is malformed"
|
||||
status_set('blocked', msg)
|
||||
|
@ -308,7 +308,7 @@ def get_write_affinity():
|
|||
"""
|
||||
if config('write-affinity'):
|
||||
write_affinity = config('write-affinity')
|
||||
pattern = re.compile("^r\d+(,\s?r\d+)*$")
|
||||
pattern = re.compile(r"^r\d+(,\s?r\d+)*$")
|
||||
if not pattern.match(write_affinity):
|
||||
msg = "'write-affinity' config option is malformed"
|
||||
status_set('blocked', msg)
|
||||
|
@ -330,7 +330,7 @@ def get_write_affinity_node_count():
|
|||
"""
|
||||
if config('write-affinity-node-count'):
|
||||
write_affinity_node_count = config('write-affinity-node-count')
|
||||
pattern = re.compile("^\d+(\s\*\sreplicas)?$")
|
||||
pattern = re.compile(r"^\d+(\s\*\sreplicas)?$")
|
||||
if not pattern.match(write_affinity_node_count):
|
||||
msg = "'write-affinity-node-count' config option is malformed"
|
||||
status_set('blocked', msg)
|
||||
|
|
|
@ -1257,7 +1257,7 @@ def fully_synced():
|
|||
not_synced.append(ringfile)
|
||||
|
||||
if not_synced:
|
||||
log("Not yet synced: {}".format(', '.join(not_synced), level=INFO))
|
||||
log("Not yet synced: {}".format(', '.join(not_synced)), level=INFO)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -1550,6 +1550,7 @@ def set_role(role_name, has_role=True):
|
|||
# Unset the key to show it does not have the role.
|
||||
leader_set({role_name: None})
|
||||
|
||||
|
||||
is_ring_consumer = functools.partial(
|
||||
is_role,
|
||||
role_name=RING_CONSUMER_ROLE)
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||
# all of its own requirements and if it doesn't, fix it there.
|
||||
#
|
||||
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
|
||||
pbr>=1.8.0,<1.9.0
|
||||
simplejson>=2.2.0
|
||||
netifaces>=0.10.4
|
||||
|
|
|
@ -7,10 +7,11 @@
|
|||
# requirements. They are intertwined. Also, Zaza itself should specify
|
||||
# all of its own requirements and if it doesn't, fix it there.
|
||||
#
|
||||
setuptools<50.0.0 # https://github.com/pypa/setuptools/commit/04e3df22df840c6bb244e9b27bc56750c44b7c85
|
||||
charm-tools>=2.4.4
|
||||
requests>=2.18.4
|
||||
mock>=1.2
|
||||
flake8>=2.2.4,<=2.4.1
|
||||
flake8>=2.2.4
|
||||
stestr>=2.2.0
|
||||
coverage>=4.5.2
|
||||
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -116,5 +116,5 @@ commands =
|
|||
functest-run-suite --keep-model --bundle {posargs}
|
||||
|
||||
[flake8]
|
||||
ignore = E402,E226
|
||||
ignore = E402,E226,W503,W504
|
||||
exclude = */charmhelpers
|
||||
|
|
|
@ -785,6 +785,6 @@ class SwiftUtilsTestCase(unittest.TestCase):
|
|||
'/%s.%s' % (s, ext)])
|
||||
wgets.append(_c)
|
||||
self.assertEqual(wgets, self.check_call.call_args_list)
|
||||
except:
|
||||
except Exception:
|
||||
shutil.rmtree(swift_utils.SWIFT_CONF_DIR)
|
||||
swift_utils.SWIFT_CONF_DIR = _SWIFT_CONF_DIR
|
||||
|
|
Loading…
Reference in New Issue