Sync charm-helpers

Change-Id: I26a45ed4aaf449c9ec208a7358808cf987577acf
This commit is contained in:
Chris MacNaughton 2019-07-12 15:05:59 +02:00 committed by Liam Young
parent 365ff6cec4
commit 23e26a4cf2
10 changed files with 77 additions and 19 deletions

View File

@ -217,19 +217,35 @@ def full_restart():
service('force-reload-kmod', 'openvswitch-switch')
def enable_ipfix(bridge, target):
'''Enable IPfix on bridge to target.
def enable_ipfix(bridge, target,
cache_active_timeout=60,
cache_max_flows=128,
sampling=64):
'''Enable IPFIX on bridge to target.
:param bridge: Bridge to monitor
:param target: IPfix remote endpoint
:param target: IPFIX remote endpoint
:param cache_active_timeout: The maximum period in seconds for
which an IPFIX flow record is cached
and aggregated before being sent
:param cache_max_flows: The maximum number of IPFIX flow records
that can be cached at a time
:param sampling: The rate at which packets should be sampled and
sent to each target collector
'''
cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
'--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
cmd = [
'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
'--id=@i', 'create', 'IPFIX',
'targets="{}"'.format(target),
'sampling={}'.format(sampling),
'cache_active_timeout={}'.format(cache_active_timeout),
'cache_max_flows={}'.format(cache_max_flows),
]
log('Enabling IPfix on {}.'.format(bridge))
subprocess.check_call(cmd)
def disable_ipfix(bridge):
'''Diable IPfix on target bridge.
'''Diable IPFIX on target bridge.
:param bridge: Bridge to modify
'''
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']

View File

@ -323,6 +323,23 @@ class OpenStackAmuletDeployment(AmuletDeployment):
else:
return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw

View File

@ -126,7 +126,11 @@ def _config_ini(path):
:returns: Configuration contained in path
:rtype: Dict
"""
conf = configparser.ConfigParser()
# When strict is enabled, duplicate options are not allowed in the
# parsed INI; however, Oslo allows duplicate values. This change
# causes us to ignore the duplicate values which is acceptable as
# long as we don't validate any multi-value options
conf = configparser.ConfigParser(strict=False)
conf.read(path)
return dict(conf)
@ -204,7 +208,7 @@ def validate_file_ownership(config):
"Invalid ownership configuration: {}".format(key))
owner = options.get('owner', config.get('owner', 'root'))
group = options.get('group', config.get('group', 'root'))
optional = options.get('optional', config.get('optional', 'False'))
optional = options.get('optional', config.get('optional', False))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():
@ -226,7 +230,7 @@ def validate_file_permissions(config):
raise RuntimeError(
"Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600'))
optional = options.get('optional', config.get('optional', 'False'))
optional = options.get('optional', config.get('optional', False))
if '*' in file_name:
for file in glob.glob(file_name):
if file not in files.keys():

View File

@ -106,9 +106,11 @@ class CertRequest(object):
sans = sorted(list(set(entry['addresses'])))
request[entry['cn']] = {'sans': sans}
if self.json_encode:
return {'cert_requests': json.dumps(request, sort_keys=True)}
req = {'cert_requests': json.dumps(request, sort_keys=True)}
else:
return {'cert_requests': request}
req = {'cert_requests': request}
req['unit_name'] = local_unit().replace('/', '_')
return req
def get_certificate_request(json_encode=True):

View File

@ -258,7 +258,7 @@ class SharedDBContext(OSContextGenerator):
'database_password': rdata.get(password_setting),
'database_type': 'mysql+pymysql'
}
if CompareOpenStackReleases(rel) < 'stein':
if CompareOpenStackReleases(rel) < 'queens':
ctxt['database_type'] = 'mysql'
if self.context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir)
@ -443,8 +443,10 @@ class IdentityServiceContext(OSContextGenerator):
'api_version': api_version})
if float(api_version) > 2:
ctxt.update({'admin_domain_name':
rdata.get('service_domain')})
ctxt.update({
'admin_domain_name': rdata.get('service_domain'),
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic

View File

@ -1482,6 +1482,21 @@ def send_request_if_needed(request, relation='ceph'):
relation_set(relation_id=rid, broker_req=request.request)
def has_broker_rsp(rid=None, unit=None):
"""Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
:param rid: The relation to check (default of None means current relation)
:type rid: Union[str, None]
:param unit: The remote unit to check (default of None means current unit)
:type unit: Union[str, None]
:returns: True if broker key exists and is set to something 'truthy'
:rtype: bool
"""
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
return True if broker_rsp else False
def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet.

View File

@ -110,17 +110,19 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False):
def mkfs_xfs(device, force=False, inode_size=1024):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
:param device: Full path to device to format
:ptype device: tr
:param force: Force operation
:ptype: force: boolean"""
:ptype: force: boolean
:param inode_size: XFS inode size in bytes
:ptype inode_size: int"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', device]
cmd += ['-i', "size={}".format(inode_size), device]
check_call(cmd)

View File

@ -72,7 +72,7 @@ class NeutronOVSBasicDeployment(OpenStackAmuletDeployment):
{'name': 'keystone'},
{'name': 'glance'},
{'name': 'neutron-api'},
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
self.get_percona_service_entry(),
]
super(NeutronOVSBasicDeployment, self)._add_services(this_service,
other_services)

View File

@ -100,7 +100,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-rocky --no-destroy
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy
[testenv:func27-dfs]
# Charm Functional Test