Added new config 'project-domain-name'

This commit is contained in:
Junaid Ali 2016-06-21 22:40:31 +05:00
commit 9075da864b
11 changed files with 344 additions and 132 deletions

View File

@ -53,3 +53,7 @@ options:
type: string
default: Default
description: Keystone user domain name
project-domain-name:
type: string
default: Default
description: Keystone project domain name

View File

@ -214,7 +214,16 @@ def format_ipv6_addr(address):
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""Return the assigned IP address for a given interface, if any."""
"""Return the assigned IP address for a given interface, if any.
:param iface: network interface on which address(es) are expected to
be found.
:param inet_type: inet address family
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:return: list of ip addresses
"""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
@ -315,6 +324,14 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
:param iface: network interface on which ipv6 address(es) are expected to
be found.
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:param dynamic_only: only recognise dynamic addresses
:return: list of ipv6 addresses
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
@ -336,7 +353,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
@ -388,10 +405,10 @@ def is_ip(address):
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4 address
socket.inet_aton(address)
# Test to see if already an IPv4/IPv6 address
address = netaddr.IPAddress(address)
return True
except socket.error:
except netaddr.AddrFormatError:
return False

View File

@ -23,7 +23,6 @@ from base64 import b64decode
from subprocess import check_call, CalledProcessError
import six
import yaml
from charmhelpers.fetch import (
apt_install,
@ -50,6 +49,7 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.sysctl import create as sysctl_create
from charmhelpers.core.strutils import bool_from_string
from charmhelpers.contrib.openstack.exceptions import OSContextError
from charmhelpers.core.host import (
get_bond_master,
@ -88,7 +88,10 @@ from charmhelpers.contrib.network.ip import (
is_address_in_network,
is_bridge_member,
)
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
get_host_ip,
)
from charmhelpers.core.unitdata import kv
try:
@ -101,10 +104,6 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
class OSContextError(Exception):
pass
def ensure_packages(packages):
"""Install but do not upgrade required plugin packages."""
required = filter_installed_packages(packages)
@ -125,83 +124,6 @@ def context_complete(ctxt):
return True
def config_flags_parser(config_flags):
"""Parses config flags string into dict.
This parsing method supports a few different formats for the config
flag values to be parsed:
1. A string in the simple format of key=value pairs, with the possibility
of specifying multiple key value pairs within the same string. For
example, a string in the format of 'key1=value1, key2=value2' will
return a dict of:
{'key1': 'value1',
'key2': 'value2'}.
2. A string in the above format, but supporting a comma-delimited list
of values for the same key. For example, a string in the format of
'key1=value1, key2=value3,value4,value5' will return a dict of:
{'key1', 'value1',
'key2', 'value2,value3,value4'}
3. A string containing a colon character (:) prior to an equal
character (=) will be treated as yaml and parsed as such. This can be
used to specify more complex key value pairs. For example,
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
return a dict of:
{'key1', 'subkey1=value1, subkey2=value2'}
The provided config_flags string may be a list of comma-separated values
which themselves may be comma-separated list of values.
"""
# If we find a colon before an equals sign then treat it as yaml.
# Note: limit it to finding the colon first since this indicates assignment
# for inline yaml.
colon = config_flags.find(':')
equals = config_flags.find('=')
if colon > 0:
if colon < equals or equals < 0:
return yaml.safe_load(config_flags)
if config_flags.find('==') >= 0:
log("config_flags is not in expected format (key=value)", level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in range(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
log("Invalid config value(s) at index %s" % (i), level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags
class OSContextGenerator(object):
"""Base class for all context generators."""
interfaces = []

View File

@ -0,0 +1,6 @@
class OSContextError(Exception):
"""Raised when an error occurs during context generation.
This exception is principally used in contrib.openstack.context
"""
pass

View File

@ -25,6 +25,7 @@ import sys
import re
import itertools
import functools
import shutil
import six
import tempfile
@ -46,6 +47,7 @@ from charmhelpers.core.hookenv import (
charm_dir,
DEBUG,
INFO,
ERROR,
related_units,
relation_ids,
relation_set,
@ -82,6 +84,7 @@ from charmhelpers.core.host import (
from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
from charmhelpers.contrib.openstack.exceptions import OSContextError
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -100,6 +103,8 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zebra', 'ocata'), # TODO: upload with real Z name
])
@ -114,6 +119,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2015.1', 'kilo'),
('2015.2', 'liberty'),
('2016.1', 'mitaka'),
('2016.2', 'newton'),
('2017.1', 'ocata'),
])
# The ugly duckling - must list releases oldest to newest
@ -138,46 +145,65 @@ SWIFT_CODENAMES = OrderedDict([
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0', '2.6.0', '2.7.0']),
('newton',
['2.8.0']),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12.0', 'liberty'),
('13.0', 'mitaka'),
('12', 'liberty'),
('13', 'mitaka'),
('14', 'newton'),
('15', 'ocata'),
]),
'neutron-common': OrderedDict([
('7.0', 'liberty'),
('8.0', 'mitaka'),
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
]),
'cinder-common': OrderedDict([
('7.0', 'liberty'),
('8.0', 'mitaka'),
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
]),
'keystone': OrderedDict([
('8.0', 'liberty'),
('8.1', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
'horizon-common': OrderedDict([
('8.0', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
'ceilometer-common': OrderedDict([
('5.0', 'liberty'),
('6.0', 'mitaka'),
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
]),
'heat-common': OrderedDict([
('5.0', 'liberty'),
('6.0', 'mitaka'),
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
]),
'glance-common': OrderedDict([
('11.0', 'liberty'),
('12.0', 'mitaka'),
('11', 'liberty'),
('12', 'mitaka'),
('13', 'newton'),
('14', 'ocata'),
]),
'openstack-dashboard': OrderedDict([
('8.0', 'liberty'),
('9.0', 'mitaka'),
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
]),
}
@ -253,6 +279,7 @@ def get_os_version_codename_swift(codename):
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
if len(codenames) > 1:
# If more than one release codename contains this version we determine
# the actual codename based on the highest available install source.
@ -264,6 +291,16 @@ def get_swift_codename(version):
return codename
elif len(codenames) == 1:
return codenames[0]
# NOTE: fallback - attempt to match with just major.minor version
match = re.match('^(\d+)\.(\d+)', version)
if match:
major_minor_version = match.group(0)
for codename, versions in six.iteritems(SWIFT_CODENAMES):
for release_version in versions:
if release_version.startswith(major_minor_version):
return codename
return None
@ -302,10 +339,13 @@ def get_os_codename_package(package, fatal=True):
if match:
vers = match.group(0)
# Generate a major version number for newer semantic
# versions of openstack projects
major_vers = vers.split('.')[0]
# >= Liberty independent project versions
if (package in PACKAGE_CODENAMES and
vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][vers]
major_vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][major_vers]
else:
# < Liberty co-ordinated project versions
try:
@ -465,6 +505,9 @@ def configure_installation_source(rel):
'mitaka': 'trusty-updates/mitaka',
'mitaka/updates': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
'newton': 'xenial-updates/newton',
'newton/updates': 'xenial-updates/newton',
'newton/proposed': 'xenial-proposed/newton',
}
try:
@ -857,6 +900,47 @@ def git_yaml_value(projects_yaml, key):
return None
def git_generate_systemd_init_files(templates_dir):
"""
Generate systemd init files.
Generates and installs systemd init units and script files based on the
*.init.in files contained in the templates_dir directory.
This code is based on the openstack-pkg-tools package and its init
script generation, which is used by the OpenStack packages.
"""
for f in os.listdir(templates_dir):
if f.endswith(".init.in"):
init_in_file = f
init_file = f[:-8]
service_file = "{}.service".format(init_file)
init_in_source = os.path.join(templates_dir, init_in_file)
init_source = os.path.join(templates_dir, init_file)
service_source = os.path.join(templates_dir, service_file)
init_dest = os.path.join('/etc/init.d', init_file)
service_dest = os.path.join('/lib/systemd/system', service_file)
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = '/usr/share/openstack-pkg-tools/init-script-template'
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
cmd = ['pkgos-gen-systemd-unit', init_in_source]
subprocess.check_call(cmd)
if os.path.exists(init_dest):
os.remove(init_dest)
if os.path.exists(service_dest):
os.remove(service_dest)
shutil.move(init_source, init_dest)
shutil.move(service_source, service_dest)
os.chmod(init_dest, 0o755)
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@ -1573,3 +1657,82 @@ def pausable_restart_on_change(restart_map, stopstart=False,
restart_functions)
return wrapped_f
return wrap
def config_flags_parser(config_flags):
"""Parses config flags string into dict.
This parsing method supports a few different formats for the config
flag values to be parsed:
1. A string in the simple format of key=value pairs, with the possibility
of specifying multiple key value pairs within the same string. For
example, a string in the format of 'key1=value1, key2=value2' will
return a dict of:
{'key1': 'value1',
'key2': 'value2'}.
2. A string in the above format, but supporting a comma-delimited list
of values for the same key. For example, a string in the format of
'key1=value1, key2=value3,value4,value5' will return a dict of:
{'key1', 'value1',
'key2', 'value2,value3,value4'}
3. A string containing a colon character (:) prior to an equal
character (=) will be treated as yaml and parsed as such. This can be
used to specify more complex key value pairs. For example,
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
return a dict of:
{'key1', 'subkey1=value1, subkey2=value2'}
The provided config_flags string may be a list of comma-separated values
which themselves may be comma-separated list of values.
"""
# If we find a colon before an equals sign then treat it as yaml.
# Note: limit it to finding the colon first since this indicates assignment
# for inline yaml.
colon = config_flags.find(':')
equals = config_flags.find('=')
if colon > 0:
if colon < equals or equals < 0:
return yaml.safe_load(config_flags)
if config_flags.find('==') >= 0:
juju_log("config_flags is not in expected format (key=value)",
level=ERROR)
raise OSContextError
# strip the following from each value.
post_strippers = ' ,'
# we strip any leading/trailing '=' or ' ' from the string then
# split on '='.
split = config_flags.strip(' =').split('=')
limit = len(split)
flags = {}
for i in range(0, limit - 1):
current = split[i]
next = split[i + 1]
vindex = next.rfind(',')
if (i == limit - 2) or (vindex < 0):
value = next
else:
value = next[:vindex]
if i == 0:
key = current
else:
# if this not the first entry, expect an embedded key.
index = current.rfind(',')
if index < 0:
juju_log("Invalid config value(s) at index %s" % (i),
level=ERROR)
raise OSContextError
key = current[index + 1:]
# Add to collection.
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
return flags

View File

@ -40,6 +40,7 @@ from subprocess import (
CalledProcessError,
)
from charmhelpers.core.hookenv import (
config,
local_unit,
relation_get,
relation_ids,
@ -64,6 +65,7 @@ from charmhelpers.fetch import (
)
from charmhelpers.core.kernel import modprobe
from charmhelpers.contrib.openstack.utils import config_flags_parser
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
KEYFILE = '/etc/ceph/ceph.client.{}.key'
@ -1204,3 +1206,42 @@ def send_request_if_needed(request, relation='ceph'):
for rid in relation_ids(relation):
log('Sending request {}'.format(request.request_id), level=DEBUG)
relation_set(relation_id=rid, broker_req=request.request)
class CephConfContext(object):
"""Ceph config (ceph.conf) context.
Supports user-provided Ceph configuration settings. Use can provide a
dictionary as the value for the config-flags charm option containing
Ceph configuration settings keyede by their section in ceph.conf.
"""
def __init__(self, permitted_sections=None):
self.permitted_sections = permitted_sections or []
def __call__(self):
conf = config('config-flags')
if not conf:
return {}
conf = config_flags_parser(conf)
if type(conf) != dict:
log("Provided config-flags is not a dictionary - ignoring",
level=WARNING)
return {}
permitted = self.permitted_sections
if permitted:
diff = set(conf.keys()).difference(set(permitted))
if diff:
log("Config-flags contains invalid keys '%s' - they will be "
"ignored" % (', '.join(diff)), level=WARNING)
ceph_conf = {}
for key in conf:
if permitted and key not in permitted:
log("Ignoring key '%s'" % key, level=WARNING)
continue
ceph_conf[key] = conf[key]
return ceph_conf

View File

@ -128,11 +128,8 @@ def service(action, service_name):
return subprocess.call(cmd) == 0
def systemv_services_running():
output = subprocess.check_output(
['service', '--status-all'],
stderr=subprocess.STDOUT).decode('UTF-8')
return [row.split()[-1] for row in output.split('\n') if '[ + ]' in row]
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name):
@ -140,22 +137,22 @@ def service_running(service_name):
if init_is_systemd():
return service('is-active', service_name)
else:
try:
output = subprocess.check_output(
['service', service_name, 'status'],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if ("start/running" in output or "is running" in output or
"up and running" in output):
return True
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
output = subprocess.check_output(
['status', service_name],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running 'start/running'
if "start/running" in output:
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
if service_name in systemv_services_running():
return True
return False
return service('status', service_name)
return False
def service_available(service_name):
@ -179,7 +176,7 @@ def init_is_systemd():
def adduser(username, password=None, shell='/bin/bash', system_user=False,
primary_group=None, secondary_groups=None):
primary_group=None, secondary_groups=None, uid=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
@ -190,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if system_user or password is None:
cmd.append('--system')
else:
@ -233,14 +236,58 @@ def user_exists(username):
return user_exists
def add_group(group_name, system_group=False):
"""Add a group to the system"""
def uid_exists(uid):
"""Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if gid:
cmd.extend(['--gid', str(gid)])
if system_group:
cmd.append('--system')
else:

View File

@ -106,6 +106,14 @@ CLOUD_ARCHIVE_POCKETS = {
'mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
# Newton
'newton': 'xenial-updates/newton',
'xenial-newton': 'xenial-updates/newton',
'xenial-newton/updates': 'xenial-updates/newton',
'xenial-updates/newton': 'xenial-updates/newton',
'newton/proposed': 'xenial-proposed/newton',
'xenial-newton/proposed': 'xenial-proposed/newton',
'xenial-proposed/newton': 'xenial-proposed/newton',
}
# The order of this list is very important. Handlers should be listed in from

View File

@ -119,6 +119,7 @@ class NeutronPGPluginContext(context.NeutronContext):
pg_ctxt['metadata_mode'] = 'tunnel'
pg_ctxt['connector_type'] = config('connector-type')
pg_ctxt['user_domain_name'] = config('user-domain-name')
pg_ctxt['project_domain_name'] = config('project-domain-name')
if enable_metadata:
plumgrid_edge_ctxt = _edge_context()
pg_ctxt['nova_metadata_proxy_secret'] = \

View File

@ -60,4 +60,5 @@ admin_tenant_name = {{ admin_tenant_name }}
auth_uri = {{ service_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0/
identity_version = v2.0
user_domain_name = {{ user_domain_name }}
project_domain_name = {{ project_domain_name }}
{% endif -%}

View File

@ -61,6 +61,7 @@ class NeutronPGContextTest(CharmTestCase):
'switch-password': 'plumgrid',
'connector-type': 'service',
'user-domain-name': 'Default',
'project-domain-name': 'Default'
}
def mock_config(key=None):
@ -92,6 +93,7 @@ class NeutronPGContextTest(CharmTestCase):
'metadata_mode': 'tunnel',
'connector_type': 'service',
'user_domain_name': 'Default',
'project_domain_name': 'Default',
'nova_metadata_proxy_secret': 'plumgrid',
'pg_metadata_ip': '169.254.169.254',
'pg_metadata_subnet': '169.254.169.254/30',