Update rabbit driver config options
The stein version of python-oslo.messaging (9.0.0+) has removed the following config options from the [oslo_messaging_rabbit] section: rabbit_host, rabbit_port, rabbit_hosts, rabbit_userid, rabbit_password, rabbit_virtual_host rabbit_max_retries, and rabbit_durable_queues. The above change requires a sync from charm-helpers. Additionally the transport_url directive has been moved to the [DEFAULT] section. These have been deprecated since Ocata, therefore this change will be provided to pre-Stein templates in order to drop deprecation warnings. See release notes at: https://docs.openstack.org/releasenotes/oslo.messaging/index.html test_300_neutron_config is also removed in this change as amulet tests no longer need to confirm config file settings. Change-Id: I4b95c3ff4a37a09e7df5fb5cb6331dc3a46c0095 Closes-Bug: #1817672
This commit is contained in:
parent
daf0371843
commit
c73311350e
134
hooks/charmhelpers/contrib/openstack/audits/__init__.py
Normal file
134
hooks/charmhelpers/contrib/openstack/audits/__init__.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
# Copyright 2019 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""OpenStack Security Audit code"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
from enum import Enum
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
from charmhelpers.core.host import cmp_pkgrevno
|
||||||
|
|
||||||
|
import charmhelpers.core.hookenv as hookenv
|
||||||
|
|
||||||
|
|
||||||
|
class AuditType(Enum):
|
||||||
|
OpenStackSecurityGuide = 1
|
||||||
|
|
||||||
|
|
||||||
|
_audits = {}
|
||||||
|
|
||||||
|
Audit = collections.namedtuple('Audit', 'func filters')
|
||||||
|
|
||||||
|
|
||||||
|
def audit(*args):
|
||||||
|
"""Decorator to register an audit.
|
||||||
|
|
||||||
|
These are used to generate audits that can be run on a
|
||||||
|
deployed system that matches the given configuration
|
||||||
|
|
||||||
|
:param args: List of functions to filter tests against
|
||||||
|
:type args: List[Callable(Config)]
|
||||||
|
"""
|
||||||
|
def wrapper(f):
|
||||||
|
test_name = f.__name__
|
||||||
|
if _audits.get(test_name):
|
||||||
|
raise RuntimeError(
|
||||||
|
"Test name '{}' used more than once"
|
||||||
|
.format(test_name))
|
||||||
|
non_callables = [fn for fn in args if not callable(fn)]
|
||||||
|
if non_callables:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Configuration includes non-callable filters: {}"
|
||||||
|
.format(non_callables))
|
||||||
|
_audits[test_name] = Audit(func=f, filters=args)
|
||||||
|
return f
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def is_audit_type(*args):
|
||||||
|
"""This audit is included in the specified kinds of audits."""
|
||||||
|
def should_run(audit_options):
|
||||||
|
if audit_options.get('audit_type') in args:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
return should_run
|
||||||
|
|
||||||
|
|
||||||
|
def since_package(pkg, pkg_version):
|
||||||
|
"""This audit should be run after the specified package version (incl)."""
|
||||||
|
return lambda audit_options=None: cmp_pkgrevno(pkg, pkg_version) >= 0
|
||||||
|
|
||||||
|
|
||||||
|
def before_package(pkg, pkg_version):
|
||||||
|
"""This audit should be run before the specified package version (excl)."""
|
||||||
|
return lambda audit_options=None: not since_package(pkg, pkg_version)()
|
||||||
|
|
||||||
|
|
||||||
|
def it_has_config(config_key):
|
||||||
|
"""This audit should be run based on specified config keys."""
|
||||||
|
return lambda audit_options: audit_options.get(config_key) is not None
|
||||||
|
|
||||||
|
|
||||||
|
def run(audit_options):
|
||||||
|
"""Run the configured audits with the specified audit_options.
|
||||||
|
|
||||||
|
:param audit_options: Configuration for the audit
|
||||||
|
:type audit_options: Config
|
||||||
|
"""
|
||||||
|
errors = {}
|
||||||
|
results = {}
|
||||||
|
for name, audit in sorted(_audits.items()):
|
||||||
|
result_name = name.replace('_', '-')
|
||||||
|
if all(p(audit_options) for p in audit.filters):
|
||||||
|
try:
|
||||||
|
audit.func(audit_options)
|
||||||
|
print("{}: PASS".format(name))
|
||||||
|
results[result_name] = {
|
||||||
|
'success': True,
|
||||||
|
}
|
||||||
|
except AssertionError as e:
|
||||||
|
print("{}: FAIL ({})".format(name, e))
|
||||||
|
results[result_name] = {
|
||||||
|
'success': False,
|
||||||
|
'message': e,
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
print("{}: ERROR ({})".format(name, e))
|
||||||
|
errors[name] = e
|
||||||
|
results[result_name] = {
|
||||||
|
'success': False,
|
||||||
|
'message': e,
|
||||||
|
}
|
||||||
|
for name, error in errors.items():
|
||||||
|
print("=" * 20)
|
||||||
|
print("Error in {}: ".format(name))
|
||||||
|
traceback.print_tb(error.__traceback__)
|
||||||
|
print()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def action_parse_results(result):
|
||||||
|
"""Parse the result of `run` in the context of an action."""
|
||||||
|
passed = True
|
||||||
|
for test, result in result.items():
|
||||||
|
if result['success']:
|
||||||
|
hookenv.action_set({test: 'PASS'})
|
||||||
|
else:
|
||||||
|
hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
|
||||||
|
passed = False
|
||||||
|
if not passed:
|
||||||
|
hookenv.action_fail("One or more tests failed")
|
||||||
|
return 0 if passed else 1
|
@ -0,0 +1,303 @@
|
|||||||
|
# Copyright 2019 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import configparser
|
||||||
|
import glob
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.audits import (
|
||||||
|
audit,
|
||||||
|
AuditType,
|
||||||
|
# filters
|
||||||
|
is_audit_type,
|
||||||
|
it_has_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
cached,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
FILE_ASSERTIONS = {
|
||||||
|
'barbican': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
|
||||||
|
'/etc/barbican/barbican-api-paste.ini':
|
||||||
|
{'group': 'barbican', 'mode': '640'},
|
||||||
|
'/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'ceph-mon': {
|
||||||
|
'/var/lib/charm/ceph-mon/ceph.conf':
|
||||||
|
{'owner': 'root', 'group': 'root', 'mode': '644'},
|
||||||
|
'/etc/ceph/ceph.client.admin.keyring':
|
||||||
|
{'owner': 'ceph', 'group': 'ceph'},
|
||||||
|
'/etc/ceph/rbdmap': {'mode': '644'},
|
||||||
|
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
|
||||||
|
'/var/lib/ceph/bootstrap-*/ceph.keyring':
|
||||||
|
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
|
||||||
|
},
|
||||||
|
'ceph-osd': {
|
||||||
|
'/var/lib/charm/ceph-osd/ceph.conf':
|
||||||
|
{'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
|
||||||
|
'/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
|
||||||
|
'/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
|
||||||
|
'/var/lib/ceph/bootstrap-*/ceph.keyring':
|
||||||
|
{'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
|
||||||
|
'/var/lib/ceph/radosgw':
|
||||||
|
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
|
||||||
|
},
|
||||||
|
'cinder': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
|
||||||
|
'/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
|
||||||
|
'/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'glance': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-registry-paste.ini':
|
||||||
|
{'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/glance-swift-store.conf':
|
||||||
|
{'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
|
||||||
|
'/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'keystone': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/keystone-paste.ini':
|
||||||
|
{'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/ssl/certs/signing_cert.pem':
|
||||||
|
{'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/ssl/private/signing_key.pem':
|
||||||
|
{'group': 'keystone', 'mode': '640'},
|
||||||
|
'/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'manilla': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
|
||||||
|
'/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
|
||||||
|
'/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
|
||||||
|
'/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'neutron-gateway': {
|
||||||
|
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
|
||||||
|
'/etc/neutron/rootwrap.conf': {'mode': '640'},
|
||||||
|
'/etc/neutron/rootwrap.d': {'mode': '755'},
|
||||||
|
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
|
||||||
|
},
|
||||||
|
'neutron-api': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
|
||||||
|
'/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
|
||||||
|
'/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
|
||||||
|
# Additional validations
|
||||||
|
'/etc/neutron/rootwrap.d': {'mode': '755'},
|
||||||
|
'/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
|
||||||
|
'/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
|
||||||
|
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
|
||||||
|
},
|
||||||
|
'nova-cloud-controller': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
|
||||||
|
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
|
||||||
|
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
|
||||||
|
# Additional validations
|
||||||
|
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'nova-compute': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
|
||||||
|
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
|
||||||
|
'/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
|
||||||
|
# Additional Validations
|
||||||
|
'/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
|
||||||
|
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
|
||||||
|
'/etc/nova/nm.conf': {'mode': '644'},
|
||||||
|
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
|
||||||
|
},
|
||||||
|
'openstack-dashboard': {
|
||||||
|
# From security guide
|
||||||
|
'/etc/openstack-dashboard/local_settings.py':
|
||||||
|
{'group': 'horizon', 'mode': '640'},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
Ownership = collections.namedtuple('Ownership', 'owner group mode')
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def _stat(file):
|
||||||
|
"""
|
||||||
|
Get the Ownership information from a file.
|
||||||
|
|
||||||
|
:param file: The path to a file to stat
|
||||||
|
:type file: str
|
||||||
|
:returns: owner, group, and mode of the specified file
|
||||||
|
:rtype: Ownership
|
||||||
|
:raises subprocess.CalledProcessError: If the underlying stat fails
|
||||||
|
"""
|
||||||
|
out = subprocess.check_output(
|
||||||
|
['stat', '-c', '%U %G %a', file]).decode('utf-8')
|
||||||
|
return Ownership(*out.strip().split(' '))
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def _config_ini(path):
|
||||||
|
"""
|
||||||
|
Parse an ini file
|
||||||
|
|
||||||
|
:param path: The path to a file to parse
|
||||||
|
:type file: str
|
||||||
|
:returns: Configuration contained in path
|
||||||
|
:rtype: Dict
|
||||||
|
"""
|
||||||
|
conf = configparser.ConfigParser()
|
||||||
|
conf.read(path)
|
||||||
|
return dict(conf)
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_file_ownership(owner, group, file_name):
|
||||||
|
"""
|
||||||
|
Validate that a specified file is owned by `owner:group`.
|
||||||
|
|
||||||
|
:param owner: Name of the owner
|
||||||
|
:type owner: str
|
||||||
|
:param group: Name of the group
|
||||||
|
:type group: str
|
||||||
|
:param file_name: Path to the file to verify
|
||||||
|
:type file_name: str
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ownership = _stat(file_name)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print("Error reading file: {}".format(e))
|
||||||
|
assert False, "Specified file does not exist: {}".format(file_name)
|
||||||
|
assert owner == ownership.owner, \
|
||||||
|
"{} has an incorrect owner: {} should be {}".format(
|
||||||
|
file_name, ownership.owner, owner)
|
||||||
|
assert group == ownership.group, \
|
||||||
|
"{} has an incorrect group: {} should be {}".format(
|
||||||
|
file_name, ownership.group, group)
|
||||||
|
print("Validate ownership of {}: PASS".format(file_name))
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_file_mode(mode, file_name):
|
||||||
|
"""
|
||||||
|
Validate that a specified file has the specified permissions.
|
||||||
|
|
||||||
|
:param mode: file mode that is desires
|
||||||
|
:type owner: str
|
||||||
|
:param file_name: Path to the file to verify
|
||||||
|
:type file_name: str
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ownership = _stat(file_name)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print("Error reading file: {}".format(e))
|
||||||
|
assert False, "Specified file does not exist: {}".format(file_name)
|
||||||
|
assert mode == ownership.mode, \
|
||||||
|
"{} has an incorrect mode: {} should be {}".format(
|
||||||
|
file_name, ownership.mode, mode)
|
||||||
|
print("Validate mode of {}: PASS".format(file_name))
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def _config_section(config, section):
|
||||||
|
"""Read the configuration file and return a section."""
|
||||||
|
path = os.path.join(config.get('config_path'), config.get('config_file'))
|
||||||
|
conf = _config_ini(path)
|
||||||
|
return conf.get(section)
|
||||||
|
|
||||||
|
|
||||||
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
|
||||||
|
it_has_config('files'))
|
||||||
|
def validate_file_ownership(config):
|
||||||
|
"""Verify that configuration files are owned by the correct user/group."""
|
||||||
|
files = config.get('files', {})
|
||||||
|
for file_name, options in files.items():
|
||||||
|
for key in options.keys():
|
||||||
|
if key not in ["owner", "group", "mode"]:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Invalid ownership configuration: {}".format(key))
|
||||||
|
owner = options.get('owner', config.get('owner', 'root'))
|
||||||
|
group = options.get('group', config.get('group', 'root'))
|
||||||
|
if '*' in file_name:
|
||||||
|
for file in glob.glob(file_name):
|
||||||
|
if file not in files.keys():
|
||||||
|
if os.path.isfile(file):
|
||||||
|
_validate_file_ownership(owner, group, file)
|
||||||
|
else:
|
||||||
|
if os.path.isfile(file_name):
|
||||||
|
_validate_file_ownership(owner, group, file_name)
|
||||||
|
|
||||||
|
|
||||||
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
|
||||||
|
it_has_config('files'))
|
||||||
|
def validate_file_permissions(config):
|
||||||
|
"""Verify that permissions on configuration files are secure enough."""
|
||||||
|
files = config.get('files', {})
|
||||||
|
for file_name, options in files.items():
|
||||||
|
for key in options.keys():
|
||||||
|
if key not in ["owner", "group", "mode"]:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Invalid ownership configuration: {}".format(key))
|
||||||
|
mode = options.get('mode', config.get('permissions', '600'))
|
||||||
|
if '*' in file_name:
|
||||||
|
for file in glob.glob(file_name):
|
||||||
|
if file not in files.keys():
|
||||||
|
if os.path.isfile(file):
|
||||||
|
_validate_file_mode(mode, file)
|
||||||
|
else:
|
||||||
|
if os.path.isfile(file_name):
|
||||||
|
_validate_file_mode(mode, file_name)
|
||||||
|
|
||||||
|
|
||||||
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
|
||||||
|
def validate_uses_keystone(audit_options):
|
||||||
|
"""Validate that the service uses Keystone for authentication."""
|
||||||
|
section = _config_section(audit_options, 'DEFAULT')
|
||||||
|
assert section is not None, "Missing section 'DEFAULT'"
|
||||||
|
assert section.get('auth_strategy') == "keystone", \
|
||||||
|
"Application is not using Keystone"
|
||||||
|
|
||||||
|
|
||||||
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
|
||||||
|
def validate_uses_tls_for_keystone(audit_options):
|
||||||
|
"""Verify that TLS is used to communicate with Keystone."""
|
||||||
|
section = _config_section(audit_options, 'keystone_authtoken')
|
||||||
|
assert section is not None, "Missing section 'keystone_authtoken'"
|
||||||
|
assert not section.get('insecure') and \
|
||||||
|
"https://" in section.get("auth_uri"), \
|
||||||
|
"TLS is not used for Keystone"
|
||||||
|
|
||||||
|
|
||||||
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
|
||||||
|
def validate_uses_tls_for_glance(audit_options):
|
||||||
|
"""Verify that TLS is used to communicate with Glance."""
|
||||||
|
section = _config_section(audit_options, 'glance')
|
||||||
|
assert section is not None, "Missing section 'glance'"
|
||||||
|
assert not section.get('insecure') and \
|
||||||
|
"https://" in section.get("api_servers"), \
|
||||||
|
"TLS is not used for Glance"
|
@ -29,6 +29,7 @@ from charmhelpers.fetch import (
|
|||||||
filter_installed_packages,
|
filter_installed_packages,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
NoNetworkBinding,
|
||||||
config,
|
config,
|
||||||
is_relation_made,
|
is_relation_made,
|
||||||
local_unit,
|
local_unit,
|
||||||
@ -868,7 +869,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||||||
addr = network_get_primary_address(
|
addr = network_get_primary_address(
|
||||||
ADDRESS_MAP[net_type]['binding']
|
ADDRESS_MAP[net_type]['binding']
|
||||||
)
|
)
|
||||||
except NotImplementedError:
|
except (NotImplementedError, NoNetworkBinding):
|
||||||
addr = fallback
|
addr = fallback
|
||||||
|
|
||||||
endpoint = resolve_address(net_type)
|
endpoint = resolve_address(net_type)
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
NoNetworkBinding,
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
service_name,
|
service_name,
|
||||||
@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
|
|||||||
# configuration is not in use
|
# configuration is not in use
|
||||||
try:
|
try:
|
||||||
resolved_address = network_get_primary_address(binding)
|
resolved_address = network_get_primary_address(binding)
|
||||||
except NotImplementedError:
|
except (NotImplementedError, NoNetworkBinding):
|
||||||
resolved_address = fallback_addr
|
resolved_address = fallback_addr
|
||||||
|
|
||||||
if resolved_address is None:
|
if resolved_address is None:
|
||||||
|
@ -0,0 +1,10 @@
|
|||||||
|
[oslo_messaging_rabbit]
|
||||||
|
{% if rabbitmq_ha_queues -%}
|
||||||
|
rabbit_ha_queues = True
|
||||||
|
{% endif -%}
|
||||||
|
{% if rabbit_ssl_port -%}
|
||||||
|
ssl = True
|
||||||
|
{% endif -%}
|
||||||
|
{% if rabbit_ssl_ca -%}
|
||||||
|
ssl_ca_file = {{ rabbit_ssl_ca }}
|
||||||
|
{% endif -%}
|
@ -59,6 +59,7 @@ from charmhelpers.core.host import (
|
|||||||
service_stop,
|
service_stop,
|
||||||
service_running,
|
service_running,
|
||||||
umount,
|
umount,
|
||||||
|
cmp_pkgrevno,
|
||||||
)
|
)
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
apt_install,
|
apt_install,
|
||||||
@ -178,7 +179,6 @@ class Pool(object):
|
|||||||
"""
|
"""
|
||||||
# read-only is easy, writeback is much harder
|
# read-only is easy, writeback is much harder
|
||||||
mode = get_cache_mode(self.service, cache_pool)
|
mode = get_cache_mode(self.service, cache_pool)
|
||||||
version = ceph_version()
|
|
||||||
if mode == 'readonly':
|
if mode == 'readonly':
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
@ -186,7 +186,7 @@ class Pool(object):
|
|||||||
elif mode == 'writeback':
|
elif mode == 'writeback':
|
||||||
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
||||||
'cache-mode', cache_pool, 'forward']
|
'cache-mode', cache_pool, 'forward']
|
||||||
if version >= '10.1':
|
if cmp_pkgrevno('ceph', '10.1') >= 0:
|
||||||
# Jewel added a mandatory flag
|
# Jewel added a mandatory flag
|
||||||
pool_forward_cmd.append('--yes-i-really-mean-it')
|
pool_forward_cmd.append('--yes-i-really-mean-it')
|
||||||
|
|
||||||
@ -196,7 +196,8 @@ class Pool(object):
|
|||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
|
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
|
||||||
|
device_class=None):
|
||||||
"""Return the number of placement groups to use when creating the pool.
|
"""Return the number of placement groups to use when creating the pool.
|
||||||
|
|
||||||
Returns the number of placement groups which should be specified when
|
Returns the number of placement groups which should be specified when
|
||||||
@ -229,6 +230,9 @@ class Pool(object):
|
|||||||
increased. NOTE: the default is primarily to handle the scenario
|
increased. NOTE: the default is primarily to handle the scenario
|
||||||
where related charms requiring pools has not been upgraded to
|
where related charms requiring pools has not been upgraded to
|
||||||
include an update to indicate their relative usage of the pools.
|
include an update to indicate their relative usage of the pools.
|
||||||
|
:param device_class: str. class of storage to use for basis of pgs
|
||||||
|
calculation; ceph supports nvme, ssd and hdd by default based
|
||||||
|
on presence of devices of each type in the deployment.
|
||||||
:return: int. The number of pgs to use.
|
:return: int. The number of pgs to use.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -243,17 +247,20 @@ class Pool(object):
|
|||||||
|
|
||||||
# If the expected-osd-count is specified, then use the max between
|
# If the expected-osd-count is specified, then use the max between
|
||||||
# the expected-osd-count and the actual osd_count
|
# the expected-osd-count and the actual osd_count
|
||||||
osd_list = get_osds(self.service)
|
osd_list = get_osds(self.service, device_class)
|
||||||
expected = config('expected-osd-count') or 0
|
expected = config('expected-osd-count') or 0
|
||||||
|
|
||||||
if osd_list:
|
if osd_list:
|
||||||
|
if device_class:
|
||||||
|
osd_count = len(osd_list)
|
||||||
|
else:
|
||||||
osd_count = max(expected, len(osd_list))
|
osd_count = max(expected, len(osd_list))
|
||||||
|
|
||||||
# Log a message to provide some insight if the calculations claim
|
# Log a message to provide some insight if the calculations claim
|
||||||
# to be off because someone is setting the expected count and
|
# to be off because someone is setting the expected count and
|
||||||
# there are more OSDs in reality. Try to make a proper guess
|
# there are more OSDs in reality. Try to make a proper guess
|
||||||
# based upon the cluster itself.
|
# based upon the cluster itself.
|
||||||
if expected and osd_count != expected:
|
if not device_class and expected and osd_count != expected:
|
||||||
log("Found more OSDs than provided expected count. "
|
log("Found more OSDs than provided expected count. "
|
||||||
"Using the actual count instead", INFO)
|
"Using the actual count instead", INFO)
|
||||||
elif expected:
|
elif expected:
|
||||||
@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name):
|
|||||||
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
||||||
failure_domain='host',
|
failure_domain='host',
|
||||||
data_chunks=2, coding_chunks=1,
|
data_chunks=2, coding_chunks=1,
|
||||||
locality=None, durability_estimator=None):
|
locality=None, durability_estimator=None,
|
||||||
|
device_class=None):
|
||||||
"""
|
"""
|
||||||
Create a new erasure code profile if one does not already exist for it. Updates
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||||||
:param coding_chunks: int
|
:param coding_chunks: int
|
||||||
:param locality: int
|
:param locality: int
|
||||||
:param durability_estimator: int
|
:param durability_estimator: int
|
||||||
|
:param device_class: six.string_types
|
||||||
:return: None. Can raise CalledProcessError
|
:return: None. Can raise CalledProcessError
|
||||||
"""
|
"""
|
||||||
version = ceph_version()
|
|
||||||
|
|
||||||
# Ensure this failure_domain is allowed by Ceph
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
validator(failure_domain, six.string_types,
|
validator(failure_domain, six.string_types,
|
||||||
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||||||
if locality is not None and durability_estimator is not None:
|
if locality is not None and durability_estimator is not None:
|
||||||
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
||||||
# failure_domain changed in luminous
|
# failure_domain changed in luminous
|
||||||
if version and version >= '12.0.0':
|
if luminous_or_later:
|
||||||
cmd.append('crush-failure-domain=' + failure_domain)
|
cmd.append('crush-failure-domain=' + failure_domain)
|
||||||
else:
|
else:
|
||||||
cmd.append('ruleset-failure-domain=' + failure_domain)
|
cmd.append('ruleset-failure-domain=' + failure_domain)
|
||||||
|
|
||||||
|
# device class new in luminous
|
||||||
|
if luminous_or_later and device_class:
|
||||||
|
cmd.append('crush-device-class={}'.format(device_class))
|
||||||
|
else:
|
||||||
|
log('Skipping device class configuration (ceph < 12.0.0)',
|
||||||
|
level=DEBUG)
|
||||||
|
|
||||||
# Add plugin specific information
|
# Add plugin specific information
|
||||||
if locality is not None:
|
if locality is not None:
|
||||||
# For local erasure codes
|
# For local erasure codes
|
||||||
@ -744,12 +759,20 @@ def pool_exists(service, name):
|
|||||||
return name in out.split()
|
return name in out.split()
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
def get_osds(service, device_class=None):
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
cluster.
|
cluster (optionally filtered by storage device class).
|
||||||
|
|
||||||
|
:param device_class: Class of storage device for OSD's
|
||||||
|
:type device_class: str
|
||||||
"""
|
"""
|
||||||
version = ceph_version()
|
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
||||||
if version and version >= '0.56':
|
if luminous_or_later and device_class:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'crush', 'class',
|
||||||
|
'ls-osd', device_class,
|
||||||
|
'--format=json'])
|
||||||
|
else:
|
||||||
out = check_output(['ceph', '--id', service,
|
out = check_output(['ceph', '--id', service,
|
||||||
'osd', 'ls',
|
'osd', 'ls',
|
||||||
'--format=json'])
|
'--format=json'])
|
||||||
@ -757,8 +780,6 @@ def get_osds(service):
|
|||||||
out = out.decode('UTF-8')
|
out = out.decode('UTF-8')
|
||||||
return json.loads(out)
|
return json.loads(out)
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
"""Basic Ceph client installation."""
|
"""Basic Ceph client installation."""
|
||||||
@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name):
|
|||||||
|
|
||||||
:raises: CalledProcessError if ceph call fails
|
:raises: CalledProcessError if ceph call fails
|
||||||
"""
|
"""
|
||||||
if ceph_version() >= '12.0.0':
|
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
||||||
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
||||||
'application', 'enable', pool, name]
|
'application', 'enable', pool, name]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def ceph_version():
|
|
||||||
"""Retrieve the local version of ceph."""
|
|
||||||
if os.path.exists('/usr/bin/ceph'):
|
|
||||||
cmd = ['ceph', '-v']
|
|
||||||
output = check_output(cmd)
|
|
||||||
if six.PY3:
|
|
||||||
output = output.decode('UTF-8')
|
|
||||||
output = output.split()
|
|
||||||
if len(output) > 3:
|
|
||||||
return output[2]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRq(object):
|
class CephBrokerRq(object):
|
||||||
"""Ceph broker request.
|
"""Ceph broker request.
|
||||||
|
|
||||||
@ -1147,7 +1152,8 @@ class CephBrokerRq(object):
|
|||||||
'object-prefix-permissions': object_prefix_permissions})
|
'object-prefix-permissions': object_prefix_permissions})
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||||
weight=None, group=None, namespace=None):
|
weight=None, group=None, namespace=None,
|
||||||
|
app_name=None):
|
||||||
"""Adds an operation to create a pool.
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
@param pg_num setting: optional setting. If not provided, this value
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
@ -1155,6 +1161,11 @@ class CephBrokerRq(object):
|
|||||||
cluster at the time of creation. Note that, if provided, this value
|
cluster at the time of creation. Note that, if provided, this value
|
||||||
will be capped at the current available maximum.
|
will be capped at the current available maximum.
|
||||||
@param weight: the percentage of data the pool makes up
|
@param weight: the percentage of data the pool makes up
|
||||||
|
:param app_name: (Optional) Tag pool with application name. Note that
|
||||||
|
there is certain protocols emerging upstream with
|
||||||
|
regard to meaningful application names to use.
|
||||||
|
Examples are ``rbd`` and ``rgw``.
|
||||||
|
:type app_name: str
|
||||||
"""
|
"""
|
||||||
if pg_num and weight:
|
if pg_num and weight:
|
||||||
raise ValueError('pg_num and weight are mutually exclusive')
|
raise ValueError('pg_num and weight are mutually exclusive')
|
||||||
@ -1162,7 +1173,7 @@ class CephBrokerRq(object):
|
|||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count, 'pg_num': pg_num,
|
'replicas': replica_count, 'pg_num': pg_num,
|
||||||
'weight': weight, 'group': group,
|
'weight': weight, 'group': group,
|
||||||
'group-namespace': namespace})
|
'group-namespace': namespace, 'app-name': app_name})
|
||||||
|
|
||||||
def set_ops(self, ops):
|
def set_ops(self, ops):
|
||||||
"""Set request ops to provided value.
|
"""Set request ops to provided value.
|
||||||
|
29
templates/ocata/neutron.conf
Normal file
29
templates/ocata/neutron.conf
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
# mitaka
|
||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
[DEFAULT]
|
||||||
|
verbose = {{ verbose }}
|
||||||
|
debug = {{ debug }}
|
||||||
|
core_plugin = {{ core_plugin }}
|
||||||
|
{% if network_device_mtu -%}
|
||||||
|
network_device_mtu = {{ network_device_mtu }}
|
||||||
|
{% endif -%}
|
||||||
|
api_workers = {{ workers }}
|
||||||
|
rpc_response_timeout = {{ rpc_response_timeout }}
|
||||||
|
{% if transport_url %}
|
||||||
|
transport_url = {{ transport_url }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[agent]
|
||||||
|
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
|
||||||
|
report_interval = {{ report_interval }}
|
||||||
|
{% include "parts/agent" %}
|
||||||
|
|
||||||
|
{% include "section-oslo-messaging-rabbit" %}
|
||||||
|
|
||||||
|
{% include "section-oslo-notifications" %}
|
||||||
|
|
||||||
|
[oslo_concurrency]
|
||||||
|
lock_path = /var/lock/neutron
|
@ -480,68 +480,6 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
|
|||||||
message = u.relation_error('nova-cc neutron-api', ret)
|
message = u.relation_error('nova-cc neutron-api', ret)
|
||||||
amulet.raise_status(amulet.FAIL, msg=message)
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
|
||||||
def test_300_neutron_config(self):
|
|
||||||
"""Verify the data in the neutron config file."""
|
|
||||||
u.log.debug('Checking neutron gateway config file data...')
|
|
||||||
unit = self.neutron_gateway_sentry
|
|
||||||
rmq_ng_rel = self.rmq_sentry.relation(
|
|
||||||
'amqp', 'neutron-gateway:amqp')
|
|
||||||
|
|
||||||
conf = '/etc/neutron/neutron.conf'
|
|
||||||
expected = {
|
|
||||||
'DEFAULT': {
|
|
||||||
'verbose': 'False',
|
|
||||||
'debug': 'False',
|
|
||||||
'core_plugin': 'ml2',
|
|
||||||
'control_exchange': 'neutron',
|
|
||||||
'notification_driver': 'messaging',
|
|
||||||
},
|
|
||||||
'agent': {
|
|
||||||
'root_helper': 'sudo /usr/bin/neutron-rootwrap '
|
|
||||||
'/etc/neutron/rootwrap.conf'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self._get_openstack_release() >= self.trusty_mitaka:
|
|
||||||
del expected['DEFAULT']['control_exchange']
|
|
||||||
del expected['DEFAULT']['notification_driver']
|
|
||||||
connection_uri = (
|
|
||||||
"rabbit://neutron:{}@{}:5672/"
|
|
||||||
"openstack".format(rmq_ng_rel['password'],
|
|
||||||
rmq_ng_rel['hostname'])
|
|
||||||
)
|
|
||||||
expected['oslo_messaging_notifications'] = {
|
|
||||||
'driver': 'messagingv2',
|
|
||||||
'transport_url': connection_uri
|
|
||||||
}
|
|
||||||
|
|
||||||
if self._get_openstack_release() >= self.trusty_kilo:
|
|
||||||
# Kilo or later
|
|
||||||
expected['oslo_messaging_rabbit'] = {
|
|
||||||
'rabbit_userid': 'neutron',
|
|
||||||
'rabbit_virtual_host': 'openstack',
|
|
||||||
'rabbit_password': rmq_ng_rel['password'],
|
|
||||||
'rabbit_host': rmq_ng_rel['hostname'],
|
|
||||||
}
|
|
||||||
expected['oslo_concurrency'] = {
|
|
||||||
'lock_path': '/var/lock/neutron'
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
# Juno or earlier
|
|
||||||
expected['DEFAULT'].update({
|
|
||||||
'rabbit_userid': 'neutron',
|
|
||||||
'rabbit_virtual_host': 'openstack',
|
|
||||||
'rabbit_password': rmq_ng_rel['password'],
|
|
||||||
'rabbit_host': rmq_ng_rel['hostname'],
|
|
||||||
'lock_path': '/var/lock/neutron',
|
|
||||||
})
|
|
||||||
|
|
||||||
for section, pairs in expected.iteritems():
|
|
||||||
ret = u.validate_config_data(unit, conf, section, pairs)
|
|
||||||
if ret:
|
|
||||||
message = "neutron config error: {}".format(ret)
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=message)
|
|
||||||
|
|
||||||
def test_301_neutron_ml2_config(self):
|
def test_301_neutron_ml2_config(self):
|
||||||
"""Verify the data in the ml2 config file. This is only available
|
"""Verify the data in the ml2 config file. This is only available
|
||||||
since icehouse."""
|
since icehouse."""
|
||||||
|
Loading…
Reference in New Issue
Block a user