Sync in security guide fixes and fix local
Neutron.conf, in Ocata, has a multi valued string config option which ConfigParser treats as invalid INI. This change resovles that and allows it to parse. Closes-Bug: #1833368 Change-Id: I838ca6e7bf505d316e0dd703a0a50a8bc2e16e53
This commit is contained in:
parent
a82b210c47
commit
1ea1398f45
@ -58,7 +58,7 @@ def main():
|
||||
'validate-uses-tls-for-glance',
|
||||
],
|
||||
}
|
||||
conf = configparser.ConfigParser()
|
||||
conf = configparser.ConfigParser(strict=False)
|
||||
conf.read("/etc/neutron/neutron.conf")
|
||||
config['neutron_config'] = dict(conf)
|
||||
return audits.action_parse_results(audits.run(config))
|
||||
|
@ -217,19 +217,35 @@ def full_restart():
|
||||
service('force-reload-kmod', 'openvswitch-switch')
|
||||
|
||||
|
||||
def enable_ipfix(bridge, target):
|
||||
'''Enable IPfix on bridge to target.
|
||||
def enable_ipfix(bridge, target,
|
||||
cache_active_timeout=60,
|
||||
cache_max_flows=128,
|
||||
sampling=64):
|
||||
'''Enable IPFIX on bridge to target.
|
||||
:param bridge: Bridge to monitor
|
||||
:param target: IPfix remote endpoint
|
||||
:param target: IPFIX remote endpoint
|
||||
:param cache_active_timeout: The maximum period in seconds for
|
||||
which an IPFIX flow record is cached
|
||||
and aggregated before being sent
|
||||
:param cache_max_flows: The maximum number of IPFIX flow records
|
||||
that can be cached at a time
|
||||
:param sampling: The rate at which packets should be sampled and
|
||||
sent to each target collector
|
||||
'''
|
||||
cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
|
||||
'--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
|
||||
cmd = [
|
||||
'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
|
||||
'--id=@i', 'create', 'IPFIX',
|
||||
'targets="{}"'.format(target),
|
||||
'sampling={}'.format(sampling),
|
||||
'cache_active_timeout={}'.format(cache_active_timeout),
|
||||
'cache_max_flows={}'.format(cache_max_flows),
|
||||
]
|
||||
log('Enabling IPfix on {}.'.format(bridge))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def disable_ipfix(bridge):
|
||||
'''Diable IPfix on target bridge.
|
||||
'''Diable IPFIX on target bridge.
|
||||
:param bridge: Bridge to modify
|
||||
'''
|
||||
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
|
||||
|
@ -294,8 +294,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
('bionic', None): self.bionic_queens,
|
||||
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
|
||||
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
|
||||
('bionic', 'cloud:bionic-train'): self.bionic_train,
|
||||
('cosmic', None): self.cosmic_rocky,
|
||||
('disco', None): self.disco_stein,
|
||||
('eoan', None): self.eoan_train,
|
||||
}
|
||||
return releases[(self.series, self.openstack)]
|
||||
|
||||
@ -313,6 +315,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
('bionic', 'queens'),
|
||||
('cosmic', 'rocky'),
|
||||
('disco', 'stein'),
|
||||
('eoan', 'train'),
|
||||
])
|
||||
if self.openstack:
|
||||
os_origin = self.openstack.split(':')[1]
|
||||
|
@ -54,11 +54,15 @@ NOVA_CLIENT_VERSION = "2"
|
||||
|
||||
OPENSTACK_RELEASES_PAIRS = [
|
||||
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
|
||||
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
|
||||
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
|
||||
'xenial_pike', 'artful_pike', 'xenial_queens',
|
||||
'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
|
||||
'bionic_stein', 'disco_stein']
|
||||
'trusty_mitaka', 'xenial_mitaka',
|
||||
'xenial_newton', 'yakkety_newton',
|
||||
'xenial_ocata', 'zesty_ocata',
|
||||
'xenial_pike', 'artful_pike',
|
||||
'xenial_queens', 'bionic_queens',
|
||||
'bionic_rocky', 'cosmic_rocky',
|
||||
'bionic_stein', 'disco_stein',
|
||||
'bionic_train', 'eoan_train',
|
||||
]
|
||||
|
||||
|
||||
class OpenStackAmuletUtils(AmuletUtils):
|
||||
|
@ -126,7 +126,11 @@ def _config_ini(path):
|
||||
:returns: Configuration contained in path
|
||||
:rtype: Dict
|
||||
"""
|
||||
conf = configparser.ConfigParser()
|
||||
# When strict is enabled, duplicate options are not allowed in the
|
||||
# parsed INI; however, Oslo allows duplicate values. This change
|
||||
# causes us to ignore the duplicate values which is acceptable as
|
||||
# long as we don't validate any multi-value options
|
||||
conf = configparser.ConfigParser(strict=False)
|
||||
conf.read(path)
|
||||
return dict(conf)
|
||||
|
||||
@ -204,7 +208,7 @@ def validate_file_ownership(config):
|
||||
"Invalid ownership configuration: {}".format(key))
|
||||
owner = options.get('owner', config.get('owner', 'root'))
|
||||
group = options.get('group', config.get('group', 'root'))
|
||||
optional = options.get('optional', config.get('optional', 'False'))
|
||||
optional = options.get('optional', config.get('optional', False))
|
||||
if '*' in file_name:
|
||||
for file in glob.glob(file_name):
|
||||
if file not in files.keys():
|
||||
@ -226,7 +230,7 @@ def validate_file_permissions(config):
|
||||
raise RuntimeError(
|
||||
"Invalid ownership configuration: {}".format(key))
|
||||
mode = options.get('mode', config.get('permissions', '600'))
|
||||
optional = options.get('optional', config.get('optional', 'False'))
|
||||
optional = options.get('optional', config.get('optional', False))
|
||||
if '*' in file_name:
|
||||
for file in glob.glob(file_name):
|
||||
if file not in files.keys():
|
||||
|
@ -106,9 +106,11 @@ class CertRequest(object):
|
||||
sans = sorted(list(set(entry['addresses'])))
|
||||
request[entry['cn']] = {'sans': sans}
|
||||
if self.json_encode:
|
||||
return {'cert_requests': json.dumps(request, sort_keys=True)}
|
||||
req = {'cert_requests': json.dumps(request, sort_keys=True)}
|
||||
else:
|
||||
return {'cert_requests': request}
|
||||
req = {'cert_requests': request}
|
||||
req['unit_name'] = local_unit().replace('/', '_')
|
||||
return req
|
||||
|
||||
|
||||
def get_certificate_request(json_encode=True):
|
||||
@ -220,6 +222,8 @@ def process_certificates(service_name, relation_id, unit,
|
||||
:type user: str
|
||||
:param group: (Optional) Group of certificate files. Defaults to 'root'
|
||||
:type group: str
|
||||
:returns: True if certificates processed for local unit or False
|
||||
:rtype: bool
|
||||
"""
|
||||
data = relation_get(rid=relation_id, unit=unit)
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
|
||||
@ -235,6 +239,8 @@ def process_certificates(service_name, relation_id, unit,
|
||||
create_ip_cert_links(
|
||||
ssl_dir,
|
||||
custom_hostname_link=custom_hostname_link)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_requests_for_local_unit(relation_name=None):
|
||||
|
@ -117,6 +117,7 @@ except ImportError:
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||
HAPROXY_RUN_DIR = '/var/run/haproxy/'
|
||||
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
|
||||
|
||||
|
||||
def ensure_packages(packages):
|
||||
@ -351,10 +352,70 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
return cachedir
|
||||
return None
|
||||
|
||||
def _get_pkg_name(self, python_name='keystonemiddleware'):
|
||||
"""Get corresponding distro installed package for python
|
||||
package name.
|
||||
|
||||
:param python_name: nameof the python package
|
||||
:type: string
|
||||
"""
|
||||
pkg_names = map(lambda x: x + python_name, ('python3-', 'python-'))
|
||||
|
||||
for pkg in pkg_names:
|
||||
if not filter_installed_packages((pkg,)):
|
||||
return pkg
|
||||
|
||||
return None
|
||||
|
||||
def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel):
|
||||
"""Build Jinja2 context for full rendering of [keystone_authtoken]
|
||||
section with variable names included. Re-constructed from former
|
||||
template 'section-keystone-auth-mitaka'.
|
||||
|
||||
:param ctxt: Jinja2 context returned from self.__call__()
|
||||
:type: dict
|
||||
:param keystonemiddleware_os_rel: OpenStack release name of
|
||||
keystonemiddleware package installed
|
||||
"""
|
||||
c = collections.OrderedDict((('auth_type', 'password'),))
|
||||
|
||||
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
|
||||
# see keystonemiddleware upstream sources for more info
|
||||
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
|
||||
c.update((
|
||||
('www_authenticate_uri', "{}://{}:{}/v3".format(
|
||||
ctxt.get('service_protocol', ''),
|
||||
ctxt.get('service_host', ''),
|
||||
ctxt.get('service_port', ''))),))
|
||||
else:
|
||||
c.update((
|
||||
('auth_uri', "{}://{}:{}/v3".format(
|
||||
ctxt.get('service_protocol', ''),
|
||||
ctxt.get('service_host', ''),
|
||||
ctxt.get('service_port', ''))),))
|
||||
|
||||
c.update((
|
||||
('auth_url', "{}://{}:{}/v3".format(
|
||||
ctxt.get('auth_protocol', ''),
|
||||
ctxt.get('auth_host', ''),
|
||||
ctxt.get('auth_port', ''))),
|
||||
('project_domain_name', ctxt.get('admin_domain_name', '')),
|
||||
('user_domain_name', ctxt.get('admin_domain_name', '')),
|
||||
('project_name', ctxt.get('admin_tenant_name', '')),
|
||||
('username', ctxt.get('admin_user', '')),
|
||||
('password', ctxt.get('admin_password', '')),
|
||||
('signing_dir', ctxt.get('signing_dir', '')),))
|
||||
|
||||
return c
|
||||
|
||||
def __call__(self):
|
||||
log('Generating template context for ' + self.rel_name, level=DEBUG)
|
||||
ctxt = {}
|
||||
|
||||
keystonemiddleware_os_release = None
|
||||
if self._get_pkg_name():
|
||||
keystonemiddleware_os_release = os_release(self._get_pkg_name())
|
||||
|
||||
cachedir = self._setup_pki_cache()
|
||||
if cachedir:
|
||||
ctxt['signing_dir'] = cachedir
|
||||
@ -385,6 +446,14 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
ctxt.update({'admin_domain_name':
|
||||
rdata.get('service_domain')})
|
||||
|
||||
# we keep all veriables in ctxt for compatibility and
|
||||
# add nested dictionary for keystone_authtoken generic
|
||||
# templating
|
||||
if keystonemiddleware_os_release:
|
||||
ctxt['keystone_authtoken'] = \
|
||||
self._get_keystone_authtoken_ctxt(
|
||||
ctxt, keystonemiddleware_os_release)
|
||||
|
||||
if self.context_complete(ctxt):
|
||||
# NOTE(jamespage) this is required for >= icehouse
|
||||
# so a missing value just indicates keystone needs
|
||||
@ -452,6 +521,86 @@ class IdentityCredentialsContext(IdentityServiceContext):
|
||||
return {}
|
||||
|
||||
|
||||
class NovaVendorMetadataContext(OSContextGenerator):
|
||||
"""Context used for configuring nova vendor metadata on nova.conf file."""
|
||||
|
||||
def __init__(self, os_release_pkg, interfaces=None):
|
||||
"""Initialize the NovaVendorMetadataContext object.
|
||||
|
||||
:param os_release_pkg: the package name to extract the OpenStack
|
||||
release codename from.
|
||||
:type os_release_pkg: str
|
||||
:param interfaces: list of string values to be used as the Context's
|
||||
relation interfaces.
|
||||
:type interfaces: List[str]
|
||||
"""
|
||||
self.os_release_pkg = os_release_pkg
|
||||
if interfaces is not None:
|
||||
self.interfaces = interfaces
|
||||
|
||||
def __call__(self):
|
||||
cmp_os_release = CompareOpenStackReleases(
|
||||
os_release(self.os_release_pkg))
|
||||
ctxt = {'vendor_data': False}
|
||||
|
||||
vdata_providers = []
|
||||
vdata = config('vendor-data')
|
||||
vdata_url = config('vendor-data-url')
|
||||
|
||||
if vdata:
|
||||
try:
|
||||
# validate the JSON. If invalid, we do not set anything here
|
||||
json.loads(vdata)
|
||||
except (TypeError, ValueError) as e:
|
||||
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
|
||||
else:
|
||||
ctxt['vendor_data'] = True
|
||||
# Mitaka does not support DynamicJSON
|
||||
# so vendordata_providers is not needed
|
||||
if cmp_os_release > 'mitaka':
|
||||
vdata_providers.append('StaticJSON')
|
||||
|
||||
if vdata_url:
|
||||
if cmp_os_release > 'mitaka':
|
||||
ctxt['vendor_data_url'] = vdata_url
|
||||
vdata_providers.append('DynamicJSON')
|
||||
else:
|
||||
log('Dynamic vendor data unsupported'
|
||||
' for {}.'.format(cmp_os_release), level=ERROR)
|
||||
if vdata_providers:
|
||||
ctxt['vendordata_providers'] = ','.join(vdata_providers)
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class NovaVendorMetadataJSONContext(OSContextGenerator):
|
||||
"""Context used for writing nova vendor metadata json file."""
|
||||
|
||||
def __init__(self, os_release_pkg):
|
||||
"""Initialize the NovaVendorMetadataJSONContext object.
|
||||
|
||||
:param os_release_pkg: the package name to extract the OpenStack
|
||||
release codename from.
|
||||
:type os_release_pkg: str
|
||||
"""
|
||||
self.os_release_pkg = os_release_pkg
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {'vendor_data_json': '{}'}
|
||||
|
||||
vdata = config('vendor-data')
|
||||
if vdata:
|
||||
try:
|
||||
# validate the JSON. If invalid, we return empty.
|
||||
json.loads(vdata)
|
||||
except (TypeError, ValueError) as e:
|
||||
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
|
||||
else:
|
||||
ctxt['vendor_data_json'] = vdata
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class AMQPContext(OSContextGenerator):
|
||||
|
||||
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
|
||||
@ -569,6 +718,19 @@ class AMQPContext(OSContextGenerator):
|
||||
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
||||
oslo_messaging_flags)
|
||||
|
||||
oslo_messaging_driver = conf.get(
|
||||
'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER)
|
||||
if oslo_messaging_driver:
|
||||
ctxt['oslo_messaging_driver'] = oslo_messaging_driver
|
||||
|
||||
notification_format = conf.get('notification-format', None)
|
||||
if notification_format:
|
||||
ctxt['notification_format'] = notification_format
|
||||
|
||||
send_notifications_to_logs = conf.get('send-notifications-to-logs', None)
|
||||
if send_notifications_to_logs:
|
||||
ctxt['send_notifications_to_logs'] = send_notifications_to_logs
|
||||
|
||||
if not self.complete:
|
||||
return {}
|
||||
|
||||
@ -620,6 +782,25 @@ class CephContext(OSContextGenerator):
|
||||
ensure_packages(['ceph-common'])
|
||||
return ctxt
|
||||
|
||||
def context_complete(self, ctxt):
|
||||
"""Overridden here to ensure the context is actually complete.
|
||||
|
||||
We set `key` and `auth` to None here, by default, to ensure
|
||||
that the context will always evaluate to incomplete until the
|
||||
Ceph relation has actually sent these details; otherwise,
|
||||
there is a potential race condition between the relation
|
||||
appearing and the first unit actually setting this data on the
|
||||
relation.
|
||||
|
||||
:param ctxt: The current context members
|
||||
:type ctxt: Dict[str, ANY]
|
||||
:returns: True if the context is complete
|
||||
:rtype: bool
|
||||
"""
|
||||
if 'auth' not in ctxt or 'key' not in ctxt:
|
||||
return False
|
||||
return super(CephContext, self).context_complete(ctxt)
|
||||
|
||||
|
||||
class HAProxyContext(OSContextGenerator):
|
||||
"""Provides half a context for the haproxy template, which describes
|
||||
@ -1110,7 +1291,9 @@ class NeutronPortContext(OSContextGenerator):
|
||||
|
||||
hwaddr_to_nic = {}
|
||||
hwaddr_to_ip = {}
|
||||
for nic in list_nics():
|
||||
extant_nics = list_nics()
|
||||
|
||||
for nic in extant_nics:
|
||||
# Ignore virtual interfaces (bond masters will be identified from
|
||||
# their slaves)
|
||||
if not is_phy_iface(nic):
|
||||
@ -1141,10 +1324,11 @@ class NeutronPortContext(OSContextGenerator):
|
||||
# Entry is a MAC address for a valid interface that doesn't
|
||||
# have an IP address assigned yet.
|
||||
resolved.append(hwaddr_to_nic[entry])
|
||||
else:
|
||||
# If the passed entry is not a MAC address, assume it's a valid
|
||||
# interface, and that the user put it there on purpose (we can
|
||||
# trust it to be the real external network).
|
||||
elif entry in extant_nics:
|
||||
# If the passed entry is not a MAC address and the interface
|
||||
# exists, assume it's a valid interface, and that the user put
|
||||
# it there on purpose (we can trust it to be the real external
|
||||
# network).
|
||||
resolved.append(entry)
|
||||
|
||||
# Ensure no duplicates
|
||||
@ -1526,6 +1710,18 @@ class NeutronAPIContext(OSContextGenerator):
|
||||
'rel_key': 'enable-nsg-logging',
|
||||
'default': False,
|
||||
},
|
||||
'enable_nfg_logging': {
|
||||
'rel_key': 'enable-nfg-logging',
|
||||
'default': False,
|
||||
},
|
||||
'global_physnet_mtu': {
|
||||
'rel_key': 'global-physnet-mtu',
|
||||
'default': 1500,
|
||||
},
|
||||
'physical_network_mtus': {
|
||||
'rel_key': 'physical-network-mtus',
|
||||
'default': None,
|
||||
},
|
||||
}
|
||||
ctxt = self.get_neutron_options({})
|
||||
for rid in relation_ids('neutron-plugin-api'):
|
||||
@ -1587,13 +1783,13 @@ class DataPortContext(NeutronPortContext):
|
||||
def __call__(self):
|
||||
ports = config('data-port')
|
||||
if ports:
|
||||
# Map of {port/mac:bridge}
|
||||
# Map of {bridge:port/mac}
|
||||
portmap = parse_data_port_mappings(ports)
|
||||
ports = portmap.keys()
|
||||
# Resolve provided ports or mac addresses and filter out those
|
||||
# already attached to a bridge.
|
||||
resolved = self.resolve_ports(ports)
|
||||
# FIXME: is this necessary?
|
||||
# Rebuild port index using resolved and filtered ports.
|
||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||
if port not in ports}
|
||||
normalized.update({port: port for port in resolved
|
||||
|
@ -217,6 +217,11 @@ def neutron_plugins():
|
||||
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
|
||||
plugins['vsp']['driver'] = (
|
||||
'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
|
||||
if CompareOpenStackReleases(release) >= 'newton':
|
||||
plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||
plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||
plugins['vsp']['server_packages'] = ['neutron-server',
|
||||
'neutron-plugin-ml2']
|
||||
return plugins
|
||||
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
{% if auth_host -%}
|
||||
[keystone_authtoken]
|
||||
{% for option_name, option_value in keystone_authtoken.items() -%}
|
||||
{{ option_name }} = {{ option_value }}
|
||||
{% endfor -%}
|
||||
{% if use_memcache == true %}
|
||||
memcached_servers = {{ memcache_url }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
@ -1,11 +1,15 @@
|
||||
{% if transport_url -%}
|
||||
[oslo_messaging_notifications]
|
||||
driver = messagingv2
|
||||
driver = {{ oslo_messaging_driver }}
|
||||
transport_url = {{ transport_url }}
|
||||
{% if send_notifications_to_logs %}
|
||||
driver = log
|
||||
{% endif %}
|
||||
{% if notification_topics -%}
|
||||
topics = {{ notification_topics }}
|
||||
{% endif -%}
|
||||
{% if notification_format -%}
|
||||
[notifications]
|
||||
notification_format = {{ notification_format }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
@ -0,0 +1 @@
|
||||
{{ vendor_data_json }}
|
@ -120,6 +120,7 @@ OPENSTACK_RELEASES = (
|
||||
'queens',
|
||||
'rocky',
|
||||
'stein',
|
||||
'train',
|
||||
)
|
||||
|
||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
@ -139,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('bionic', 'queens'),
|
||||
('cosmic', 'rocky'),
|
||||
('disco', 'stein'),
|
||||
('eoan', 'train'),
|
||||
])
|
||||
|
||||
|
||||
@ -159,6 +161,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2018.1', 'queens'),
|
||||
('2018.2', 'rocky'),
|
||||
('2019.1', 'stein'),
|
||||
('2019.2', 'train'),
|
||||
])
|
||||
|
||||
# The ugly duckling - must list releases oldest to newest
|
||||
@ -194,7 +197,9 @@ SWIFT_CODENAMES = OrderedDict([
|
||||
('rocky',
|
||||
['2.18.0', '2.19.0']),
|
||||
('stein',
|
||||
['2.20.0']),
|
||||
['2.20.0', '2.21.0']),
|
||||
('train',
|
||||
['2.22.0']),
|
||||
])
|
||||
|
||||
# >= Liberty version->codename mapping
|
||||
@ -208,6 +213,7 @@ PACKAGE_CODENAMES = {
|
||||
('17', 'queens'),
|
||||
('18', 'rocky'),
|
||||
('19', 'stein'),
|
||||
('20', 'train'),
|
||||
]),
|
||||
'neutron-common': OrderedDict([
|
||||
('7', 'liberty'),
|
||||
@ -218,6 +224,7 @@ PACKAGE_CODENAMES = {
|
||||
('12', 'queens'),
|
||||
('13', 'rocky'),
|
||||
('14', 'stein'),
|
||||
('15', 'train'),
|
||||
]),
|
||||
'cinder-common': OrderedDict([
|
||||
('7', 'liberty'),
|
||||
@ -228,6 +235,7 @@ PACKAGE_CODENAMES = {
|
||||
('12', 'queens'),
|
||||
('13', 'rocky'),
|
||||
('14', 'stein'),
|
||||
('15', 'train'),
|
||||
]),
|
||||
'keystone': OrderedDict([
|
||||
('8', 'liberty'),
|
||||
@ -238,6 +246,7 @@ PACKAGE_CODENAMES = {
|
||||
('13', 'queens'),
|
||||
('14', 'rocky'),
|
||||
('15', 'stein'),
|
||||
('16', 'train'),
|
||||
]),
|
||||
'horizon-common': OrderedDict([
|
||||
('8', 'liberty'),
|
||||
@ -248,6 +257,7 @@ PACKAGE_CODENAMES = {
|
||||
('13', 'queens'),
|
||||
('14', 'rocky'),
|
||||
('15', 'stein'),
|
||||
('16', 'train'),
|
||||
]),
|
||||
'ceilometer-common': OrderedDict([
|
||||
('5', 'liberty'),
|
||||
@ -258,6 +268,7 @@ PACKAGE_CODENAMES = {
|
||||
('10', 'queens'),
|
||||
('11', 'rocky'),
|
||||
('12', 'stein'),
|
||||
('13', 'train'),
|
||||
]),
|
||||
'heat-common': OrderedDict([
|
||||
('5', 'liberty'),
|
||||
@ -268,6 +279,7 @@ PACKAGE_CODENAMES = {
|
||||
('10', 'queens'),
|
||||
('11', 'rocky'),
|
||||
('12', 'stein'),
|
||||
('13', 'train'),
|
||||
]),
|
||||
'glance-common': OrderedDict([
|
||||
('11', 'liberty'),
|
||||
@ -278,6 +290,7 @@ PACKAGE_CODENAMES = {
|
||||
('16', 'queens'),
|
||||
('17', 'rocky'),
|
||||
('18', 'stein'),
|
||||
('19', 'train'),
|
||||
]),
|
||||
'openstack-dashboard': OrderedDict([
|
||||
('8', 'liberty'),
|
||||
@ -288,6 +301,7 @@ PACKAGE_CODENAMES = {
|
||||
('13', 'queens'),
|
||||
('14', 'rocky'),
|
||||
('15', 'stein'),
|
||||
('16', 'train'),
|
||||
]),
|
||||
}
|
||||
|
||||
|
@ -1488,7 +1488,7 @@ def is_broker_action_done(action, rid=None, unit=None):
|
||||
@param action: name of action to be performed
|
||||
@returns True if action complete otherwise False
|
||||
"""
|
||||
rdata = relation_get(rid, unit) or {}
|
||||
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||
if not broker_rsp:
|
||||
return False
|
||||
@ -1510,7 +1510,7 @@ def mark_broker_action_done(action, rid=None, unit=None):
|
||||
@param action: name of action to be performed
|
||||
@returns None
|
||||
"""
|
||||
rdata = relation_get(rid, unit) or {}
|
||||
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||
if not broker_rsp:
|
||||
return
|
||||
|
@ -110,17 +110,19 @@ def is_device_mounted(device):
|
||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||
|
||||
|
||||
def mkfs_xfs(device, force=False):
|
||||
def mkfs_xfs(device, force=False, inode_size=1024):
|
||||
"""Format device with XFS filesystem.
|
||||
|
||||
By default this should fail if the device already has a filesystem on it.
|
||||
:param device: Full path to device to format
|
||||
:ptype device: tr
|
||||
:param force: Force operation
|
||||
:ptype: force: boolean"""
|
||||
:ptype: force: boolean
|
||||
:param inode_size: XFS inode size in bytes
|
||||
:ptype inode_size: int"""
|
||||
cmd = ['mkfs.xfs']
|
||||
if force:
|
||||
cmd.append("-f")
|
||||
|
||||
cmd += ['-i', 'size=1024', device]
|
||||
cmd += ['-i', "size={}".format(inode_size), device]
|
||||
check_call(cmd)
|
||||
|
@ -173,6 +173,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'stein/proposed': 'bionic-proposed/stein',
|
||||
'bionic-stein/proposed': 'bionic-proposed/stein',
|
||||
'bionic-proposed/stein': 'bionic-proposed/stein',
|
||||
# Train
|
||||
'train': 'bionic-updates/train',
|
||||
'bionic-train': 'bionic-updates/train',
|
||||
'bionic-train/updates': 'bionic-updates/train',
|
||||
'bionic-updates/train': 'bionic-updates/train',
|
||||
'train/proposed': 'bionic-proposed/train',
|
||||
'bionic-train/proposed': 'bionic-proposed/train',
|
||||
'bionic-proposed/train': 'bionic-proposed/train',
|
||||
}
|
||||
|
||||
|
||||
@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False):
|
||||
for r, fn in six.iteritems(_mapping):
|
||||
m = re.match(r, source)
|
||||
if m:
|
||||
# call the assoicated function with the captured groups
|
||||
# raises SourceConfigError on error.
|
||||
fn(*m.groups())
|
||||
if key:
|
||||
# Import key before adding the source which depends on it,
|
||||
# as refreshing packages could fail otherwise.
|
||||
try:
|
||||
import_key(key)
|
||||
except GPGKeyError as e:
|
||||
raise SourceConfigError(str(e))
|
||||
# call the associated function with the captured groups
|
||||
# raises SourceConfigError on error.
|
||||
fn(*m.groups())
|
||||
break
|
||||
else:
|
||||
# nothing matched. log an error and maybe sys.exit
|
||||
|
@ -562,6 +562,7 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment):
|
||||
data = amulet.actions.get_action_output(action_id, full_output=True)
|
||||
assert data.get(u"status") == "failed", \
|
||||
"Security check is expected to not pass by default"
|
||||
assert data.get(u"results") is not None
|
||||
|
||||
def test_900_restart_on_config_change(self):
|
||||
"""Verify that the specified services are restarted when the
|
||||
|
Loading…
Reference in New Issue
Block a user