From 6c6db6ef5fc59d6d6bd44c9367b471e138194968 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sat, 12 Jul 2014 06:01:20 +0000 Subject: [PATCH 001/125] API server reassert endpoints when taking over from nova-cc --- hooks/neutron_api_hooks.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index d40da3ec..23513ef3 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -1,6 +1,7 @@ #!/usr/bin/python import sys +import uuid from subprocess import check_call from charmhelpers.core.hookenv import ( @@ -172,9 +173,13 @@ def relation_broken(): @hooks.hook('identity-service-relation-joined') -def identity_joined(rid=None): +def identity_joined(rid=None, relation_trigger=False): + # Use relation trigger to reassert endpoints with identity server base_url = canonical_url(CONFIGS) - relation_set(relation_id=rid, **determine_endpoints(base_url)) + rel_settings = determine_endpoints(base_url) + if relation_trigger: + rel_settings['relation_trigger'] = str(uuid.uuid4()) + relation_set(relation_id=rid, **rel_settings) @hooks.hook('identity-service-relation-changed') @@ -205,7 +210,7 @@ def neutron_api_relation_joined(rid=None): # Nova-cc may have grabbed the quantum endpoint so kick identity-service # relation to register that its here for r_id in relation_ids('identity-service'): - identity_joined(rid=r_id) + identity_joined(rid=r_id, relation_trigger=True) @hooks.hook('neutron-api-relation-changed') From a743bdc0eb8ab5c17fad84277691cc5804e794cf Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 14 Jul 2014 14:59:33 +0000 Subject: [PATCH 002/125] Don't use the nova_url from a non-api nova-cc in cell confiuguration --- hooks/neutron_api_context.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 9180c2a2..0cc82739 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -69,6 +69,11 @@ class NeutronCCContext(context.NeutronContext): ctxt['nova_url'] = relation_get(attribute='nova_url', rid=rid, unit=unit) + cell_type = relation_get(attribute='cell_type', + rid=rid, + unit=unit) + if cell_type and not cell_type == "api": + pass if ctxt['nova_url']: return ctxt return ctxt From 528df341685d19659ff809210fc62e3551559e4e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 14 Jul 2014 15:05:46 +0000 Subject: [PATCH 003/125] Fix typo --- hooks/neutron_api_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 0cc82739..59115135 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -73,7 +73,7 @@ class NeutronCCContext(context.NeutronContext): rid=rid, unit=unit) if cell_type and not cell_type == "api": - pass + continue if ctxt['nova_url']: return ctxt return ctxt From 79b22d8d2368daeaf32005ae803f92eb927bee86 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Sep 2014 12:19:21 +0000 Subject: [PATCH 004/125] Added l2population support --- config.yaml | 7 +++++++ hooks/neutron_api_hooks.py | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 3bc6da9d..a9e47a13 100644 --- a/config.yaml +++ b/config.yaml @@ -123,3 +123,10 @@ options: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. + l2-population: + type: boolean + default: True + description: | + Populate the forwarding tables of virtual switches (LinuxBridge or OVS), + to decrease broadcast traffics inside the physical networks fabric while + using overlays networks (VXLan, GRE). diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 4721448c..be001cfb 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -217,6 +217,7 @@ def neutron_api_relation_joined(rid=None): relation_data = { 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), + 'l2-population': config('l2_population'), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" @@ -238,7 +239,8 @@ def neutron_api_relation_changed(): @hooks.hook('neutron-plugin-api-relation-joined') def neutron_plugin_api_relation_joined(rid=None): relation_data = { - 'neutron-security-groups': config('neutron-security-groups') + 'neutron-security-groups': config('neutron-security-groups'), + 'l2-population': config('l2_population'), } relation_set(relation_id=rid, **relation_data) From a21d7b62251996917765e57e1aca2e25a35bfc21 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Sep 2014 12:35:48 +0000 Subject: [PATCH 005/125] Only allow l2population if the neutron plugin supports it --- hooks/neutron_api_hooks.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index be001cfb..125514ec 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -210,6 +210,11 @@ def identity_changed(): configure_https() +def get_l2population(): + plugin = config('neutron-plugin') + return config('l2-population') if plugin == "ovs" else False + + @hooks.hook('neutron-api-relation-joined') def neutron_api_relation_joined(rid=None): base_url = canonical_url(CONFIGS, INTERNAL) @@ -217,7 +222,7 @@ def neutron_api_relation_joined(rid=None): relation_data = { 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), - 'l2-population': config('l2_population'), + 'l2-population': get_l2population(), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" @@ -240,7 +245,7 @@ def neutron_api_relation_changed(): def neutron_plugin_api_relation_joined(rid=None): relation_data = { 'neutron-security-groups': config('neutron-security-groups'), - 'l2-population': config('l2_population'), + 'l2-population': get_l2population(), } relation_set(relation_id=rid, **relation_data) From e64b74824276d7f42551ba0cd35a8255b9b0bb7c Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Sep 2014 12:58:55 +0000 Subject: [PATCH 006/125] Set l2_population in the local neutron_api ml2 conf --- hooks/neutron_api_context.py | 8 ++++++++ hooks/neutron_api_hooks.py | 6 +----- templates/icehouse/ml2_conf.ini | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index bdb7df3d..68d84921 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -10,6 +10,9 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, ) +def get_l2population(): + plugin = config('neutron-plugin') + return config('l2-population') if plugin == "ovs" else False class ApacheSSLContext(context.ApacheSSLContext): @@ -49,6 +52,10 @@ class NeutronCCContext(context.NeutronContext): def neutron_security_groups(self): return config('neutron-security-groups') + @property + def neutron_l2_population(self): + return get_l2population() + # Do not need the plugin agent installed on the api server def _ensure_packages(self): pass @@ -60,6 +67,7 @@ class NeutronCCContext(context.NeutronContext): def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() + ctxt['l2_population'] = self.neutron_l2_population ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 125514ec..ef1c156a 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -43,6 +43,7 @@ from neutron_api_utils import ( CLUSTER_RES, do_openstack_upgrade, ) +from neutron_api_context import get_l2population from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, @@ -210,11 +211,6 @@ def identity_changed(): configure_https() -def get_l2population(): - plugin = config('neutron-plugin') - return config('l2-population') if plugin == "ovs" else False - - @hooks.hook('neutron-api-relation-joined') def neutron_api_relation_joined(rid=None): base_url = canonical_url(CONFIGS, INTERNAL) diff --git a/templates/icehouse/ml2_conf.ini b/templates/icehouse/ml2_conf.ini index cf5e10d0..5474b098 100644 --- a/templates/icehouse/ml2_conf.ini +++ b/templates/icehouse/ml2_conf.ini @@ -20,6 +20,7 @@ local_ip = {{ local_ip }} [agent] tunnel_types = gre +l2_population = {{ l2_population }} [securitygroup] {% if neutron_security_groups -%} From eac17d3e18aade1dc973dd70ea8aad213e2005e8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Sep 2014 13:53:57 +0000 Subject: [PATCH 007/125] Nova cc doesn't need to know about the l2 pop setting --- hooks/neutron_api_hooks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index ef1c156a..e67c45e1 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -218,7 +218,6 @@ def neutron_api_relation_joined(rid=None): relation_data = { 'neutron-url': neutron_url, 'neutron-plugin': config('neutron-plugin'), - 'l2-population': get_l2population(), } if config('neutron-security-groups'): relation_data['neutron-security-groups'] = "yes" From 3436322529a5ed20e09c0379f2218918ee282e98 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 4 Sep 2014 14:43:19 +0000 Subject: [PATCH 008/125] Fix tests and lint --- hooks/neutron_api_context.py | 2 ++ unit_tests/test_neutron_api_context.py | 1 + unit_tests/test_neutron_api_hooks.py | 9 ++++++--- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 68d84921..3efaf01e 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -10,10 +10,12 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, ) + def get_l2population(): plugin = config('neutron-plugin') return config('l2-population') if plugin == "ovs" else False + class ApacheSSLContext(context.ApacheSSLContext): interfaces = ['https'] diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index e81ac10f..6cb970f4 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -127,6 +127,7 @@ class NeutronAPIContextsTest(CharmTestCase): 'external_network': 'bob', 'neutron_bind_port': self.api_port, 'verbose': True, + 'l2_population': True, } with patch.object(napi_ctxt, '_ensure_packages'): self.assertEquals(ctxt_data, napi_ctxt()) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 04d73901..6bda03c7 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -30,6 +30,9 @@ TO_PATCH = [ 'determine_ports', 'do_openstack_upgrade', 'execd_preinstall', + 'get_iface_for_address', + 'get_l2population', + 'get_netmask_for_address', 'is_leader', 'is_relation_made', 'log', @@ -40,8 +43,6 @@ TO_PATCH = [ 'relation_ids', 'relation_set', 'unit_get', - 'get_iface_for_address', - 'get_netmask_for_address', ] NEUTRON_CONF_DIR = "/etc/neutron" @@ -256,10 +257,12 @@ class NeutronAPIHooksTests(CharmTestCase): self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - def test_neutron_plugin_api_relation_joined(self): + def test_neutron_plugin_api_relation_joined_nol2(self): _relation_data = { 'neutron-security-groups': False, + 'l2-population': False, } + self.get_l2population.return_value = False self._call_hook('neutron-plugin-api-relation-joined') self.relation_set.assert_called_with( relation_id=None, From e360473fae32081c110de1db2625ccced1bc8e54 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 11 Sep 2014 11:41:44 +0100 Subject: [PATCH 009/125] [hopem, r=] Neutron server fails if $state_path is used in [keystone_authtoken] so replace with expanded value. --- templates/icehouse/neutron.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf index 9a47965c..7c00e9a1 100644 --- a/templates/icehouse/neutron.conf +++ b/templates/icehouse/neutron.conf @@ -53,7 +53,7 @@ quota_items = network,subnet,port,security_group,security_group_rule root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf [keystone_authtoken] -signing_dir = $state_path/keystone-signing +signing_dir = /var/lib/neutron/keystone-signing {% if service_host -%} service_protocol = {{ service_protocol }} service_host = {{ service_host }} From 1865e245a8f1c680ac1fe5ccb7df610edb637842 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 17 Sep 2014 17:26:59 +0800 Subject: [PATCH 010/125] Support neutron-api for IPv6. --- config.yaml | 6 ++++ hooks/charmhelpers/contrib/network/ip.py | 33 ++++++++++++++++- .../charmhelpers/contrib/openstack/context.py | 35 ++++++++++++++++--- hooks/neutron_api_context.py | 19 ++++++++++ hooks/neutron_api_hooks.py | 35 +++++++++++++------ hooks/neutron_api_utils.py | 3 +- templates/icehouse/neutron.conf | 2 +- unit_tests/test_neutron_api_context.py | 4 +++ 8 files changed, 119 insertions(+), 18 deletions(-) diff --git a/config.yaml b/config.yaml index 3bc6da9d..17979c7c 100644 --- a/config.yaml +++ b/config.yaml @@ -123,3 +123,9 @@ options: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. + # IPv6 + prefer-ipv6: + default: false + type: boolean + description: "Enable IPv6" + diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..f8cc1975 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -4,7 +4,7 @@ from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + ERROR, log, config, ) try: @@ -154,3 +154,34 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def get_ipv6_addr(iface="eth0"): + try: + iface_addrs = netifaces.ifaddresses(iface) + if netifaces.AF_INET6 not in iface_addrs: + raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) + + addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] + ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') + and config('vip') != a['addr']] + if not ipv6_addr: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + + return ipv6_addr[0] + + except ValueError: + raise ValueError("Invalid interface '%s'" % iface) + + +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92c41b23..d41b74a2 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -44,7 +44,10 @@ from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) -from charmhelpers.contrib.network.ip import get_address_in_network +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv6_addr, +) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -401,9 +404,12 @@ class HAProxyContext(OSContextGenerator): cluster_hosts = {} l_unit = local_unit().replace('/', '-') - cluster_hosts[l_unit] = \ - get_address_in_network(config('os-internal-network'), - unit_get('private-address')) + if config('prefer-ipv6'): + addr = get_ipv6_addr() + else: + addr = unit_get('private-address') + cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), + addr) for rid in relation_ids('cluster'): for unit in related_units(rid): @@ -414,6 +420,16 @@ class HAProxyContext(OSContextGenerator): ctxt = { 'units': cluster_hosts, } + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + ctxt['stat_port'] = ':::8888' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + ctxt['stat_port'] = ':8888' + if len(cluster_hosts.keys()) > 1: # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.') @@ -753,6 +769,17 @@ class SubordinateConfigContext(OSContextGenerator): return ctxt +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + return ctxt + + class SyslogContext(OSContextGenerator): def __call__(self): diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index bdb7df3d..f6db7537 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -10,6 +10,10 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, ) +from charmhelpers.contrib.network.ip import( + format_ipv6_addr, +) + class ApacheSSLContext(context.ApacheSSLContext): @@ -101,3 +105,18 @@ class HAProxyContext(context.HAProxyContext): # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt + + +class NeutronCCIPv6Context(context.SharedDBContext): + def __call__(self): + ctxt = super(NeutronCCIPv6Context, self).__call__() + print "ctxt:%s" % ctxt + if config('prefer-ipv6'): + ctxt['bind_host'] = '::' + else: + ctxt['bind_host'] = '0.0.0.0' + + if ctxt.get('database_host'): + db_host = ctxt['database_host'] + ctxt['database_host'] = format_ipv6_addr(db_host) or db_host + return ctxt diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 0a99387e..1553b0ac 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -58,7 +58,8 @@ from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.network.ip import ( get_iface_for_address, - get_netmask_for_address + get_netmask_for_address, + get_ipv6_addr, ) hooks = Hooks() @@ -137,9 +138,14 @@ def db_joined(): log(e, level=ERROR) raise Exception(e) + if config('prefer-ipv6'): + host = get_ipv6_addr() + else: + host = unit_get('private-address') + relation_set(database=config('database'), username=config('database-user'), - hostname=unit_get('private-address')) + hostname=host) @hooks.hook('pgsql-db-relation-joined') @@ -252,7 +258,14 @@ def cluster_changed(): @hooks.hook('ha-relation-joined') def ha_joined(): - config = get_hacluster_config() + cluster_config = get_hacluster_config() + if config('prefer-ipv6'): + res_neutron_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_neutron_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + resources = { 'res_neutron_haproxy': 'lsb:haproxy', } @@ -260,21 +273,21 @@ def ha_joined(): 'res_neutron_haproxy': 'op monitor interval="5s"' } vip_group = [] - for vip in config['vip'].split(): + for vip in cluster_config['vip'].split(): iface = get_iface_for_address(vip) if iface is not None: vip_key = 'res_neutron_{}_vip'.format(iface) - resources[vip_key] = 'ocf:heartbeat:IPaddr2' + resources[vip_key] = res_neutron_vip resource_params[vip_key] = ( - 'params ip="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(vip=vip, + 'params {ip}="{vip}" cidr_netmask="{netmask}"' + ' nic="{iface}"'.format(ip=vip_params, + vip=vip, iface=iface, netmask=get_netmask_for_address(vip)) ) vip_group.append(vip_key) - if len(vip_group) >= 1: - relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) + relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) init_services = { 'res_neutron_haproxy': 'haproxy' @@ -283,8 +296,8 @@ def ha_joined(): 'cl_nova_haproxy': 'res_neutron_haproxy' } relation_set(init_services=init_services, - corosync_bindiface=config['ha-bindiface'], - corosync_mcastport=config['ha-mcastport'], + corosync_bindiface=cluster_config['ha-bindiface'], + corosync_mcastport=cluster_config['ha-mcastport'], resources=resources, resource_params=resource_params, clones=clones) diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 3a000068..43018767 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -61,7 +61,8 @@ BASE_RESOURCE_MAP = OrderedDict([ context.PostgresqlDBContext(database=config('database')), neutron_api_context.IdentityServiceContext(), neutron_api_context.NeutronCCContext(), - context.SyslogContext()], + context.SyslogContext(), + neutron_api_context.NeutronCCIPv6Context()], }), (NEUTRON_DEFAULT, { 'services': ['neutron-server'], diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf index 9a47965c..efb9447d 100644 --- a/templates/icehouse/neutron.conf +++ b/templates/icehouse/neutron.conf @@ -8,7 +8,7 @@ debug = {{ debug }} use_syslog = {{ use_syslog }} state_path = /var/lib/neutron lock_path = $state_path/lock -bind_host = 0.0.0.0 +bind_host = {{ bind_host }} auth_strategy = keystone notification_driver = neutron.openstack.common.notifier.rpc_notifier diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index e81ac10f..2b0541f4 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -19,6 +19,7 @@ class IdentityServiceContext(CharmTestCase): self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get self.test_config.set('region', 'region457') + self.test_config.set('prefer-ipv6', False) @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @@ -89,6 +90,9 @@ class HAProxyContextTest(CharmTestCase): service_ports = {'neutron-server': [9696, 9686]} ctxt_data = { + 'local_host': '127.0.0.1', + 'haproxy_host': '0.0.0.0', + 'stat_port': ':8888', 'units': unit_addresses, 'service_ports': service_ports, 'neutron_bind_port': 9686, From 2ab7e2671cce725a736f441ee481dfe7551c4e41 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 18 Sep 2014 13:01:12 +0800 Subject: [PATCH 011/125] Add haproxy and fix unit tests error. --- hooks/neutron_api_hooks.py | 15 +++++++++--- unit_tests/test_neutron_api_hooks.py | 36 ++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 1553b0ac..19781969 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -22,12 +22,15 @@ from charmhelpers.core.host import ( ) from charmhelpers.fetch import ( - apt_install, apt_update + apt_install, + apt_update, + add_source ) from charmhelpers.contrib.openstack.utils import ( configure_installation_source, openstack_upgrade_available, + lsb_release, ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, @@ -89,8 +92,14 @@ def configure_https(): def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) + trusty = lsb_release()['DISTRIB_CODENAME'] == 'trusty' + if config('prefer-ipv6') and trusty: + add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports' + ' main') apt_update() apt_install(determine_packages(), fatal=True) + if config('prefer-ipv6') and trusty: + apt_install('haproxy/trusty-backports', fatal=True) [open_port(port) for port in determine_ports()] @@ -279,8 +288,8 @@ def ha_joined(): vip_key = 'res_neutron_{}_vip'.format(iface) resources[vip_key] = res_neutron_vip resource_params[vip_key] = ( - 'params {ip}="{vip}" cidr_netmask="{netmask}"' - ' nic="{iface}"'.format(ip=vip_params, + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, netmask=get_netmask_for_address(vip)) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 04d73901..004f8e18 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -303,6 +303,42 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) + @patch.object(hooks, 'get_hacluster_config') + def test_ha_joined_with_ipv6(self, _get_ha_config): + self.test_config.set('prefer-ipv6', 'True') + _ha_config = { + 'vip': '2001:db8:1::1', + 'vip_cidr': '64', + 'vip_iface': 'eth0', + 'ha-bindiface': 'eth1', + 'ha-mcastport': '5405', + } + vip_params = 'params ipv6addr="%s" ' \ + 'cidr_netmask="ffff.ffff.ffff.ffff" ' \ + 'nic="%s"' % \ + (_ha_config['vip'], _ha_config['vip_iface']) + _get_ha_config.return_value = _ha_config + self.get_iface_for_address.return_value = 'eth0' + self.get_netmask_for_address.return_value = 'ffff.ffff.ffff.ffff' + _relation_data = { + 'init_services': {'res_neutron_haproxy': 'haproxy'}, + 'corosync_bindiface': _ha_config['ha-bindiface'], + 'corosync_mcastport': _ha_config['ha-mcastport'], + 'resources': { + 'res_neutron_eth0_vip': 'ocf:heartbeat:IPv6addr', + 'res_neutron_haproxy': 'lsb:haproxy' + }, + 'resource_params': { + 'res_neutron_eth0_vip': vip_params, + 'res_neutron_haproxy': 'op monitor interval="5s"' + }, + 'clones': {'cl_nova_haproxy': 'res_neutron_haproxy'} + } + self._call_hook('ha-relation-joined') + self.relation_set.assert_called_with( + **_relation_data + ) + def test_ha_changed(self): self.test_relation.set({ 'clustered': 'true', From 48db703268bc44c55da43f2a1a24cdc3ddf60081 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 18 Sep 2014 17:11:56 +0800 Subject: [PATCH 012/125] Set ipv6 address for cluster --- hooks/neutron_api_hooks.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 19781969..6e84fde0 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -262,6 +262,12 @@ def neutron_plugin_api_relation_joined(rid=None): 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) def cluster_changed(): + if config('prefer-ipv6'): + for rid in relation_ids('cluster'): + relation_set(relation_id=rid, + relation_settings={'private-address': + get_ipv6_addr()}) + CONFIGS.write_all() @@ -290,9 +296,9 @@ def ha_joined(): resource_params[vip_key] = ( 'params {ip}="{vip}" cidr_netmask="{netmask}" ' 'nic="{iface}"'.format(ip=vip_params, - vip=vip, - iface=iface, - netmask=get_netmask_for_address(vip)) + vip=vip, + iface=iface, + netmask=get_netmask_for_address(vip)) ) vip_group.append(vip_key) From 9529e6a2fb4632ba07d55e3a2afd3fa9d9ecf22f Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 18 Sep 2014 21:28:17 +0800 Subject: [PATCH 013/125] Integrated all IPv6 check into setup_ipv6()/ --- config.yaml | 2 -- hooks/neutron_api_context.py | 8 -------- hooks/neutron_api_hooks.py | 21 ++++++++++----------- hooks/neutron_api_utils.py | 29 ++++++++++++++++++++++++++++- 4 files changed, 38 insertions(+), 22 deletions(-) diff --git a/config.yaml b/config.yaml index 17979c7c..e1636aff 100644 --- a/config.yaml +++ b/config.yaml @@ -123,9 +123,7 @@ options: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. - # IPv6 prefer-ipv6: default: false type: boolean description: "Enable IPv6" - diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index f6db7537..37a45dac 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -10,10 +10,6 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, ) -from charmhelpers.contrib.network.ip import( - format_ipv6_addr, -) - class ApacheSSLContext(context.ApacheSSLContext): @@ -110,13 +106,9 @@ class HAProxyContext(context.HAProxyContext): class NeutronCCIPv6Context(context.SharedDBContext): def __call__(self): ctxt = super(NeutronCCIPv6Context, self).__call__() - print "ctxt:%s" % ctxt if config('prefer-ipv6'): ctxt['bind_host'] = '::' else: ctxt['bind_host'] = '0.0.0.0' - if ctxt.get('database_host'): - db_host = ctxt['database_host'] - ctxt['database_host'] = format_ipv6_addr(db_host) or db_host return ctxt diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 6e84fde0..b352b99d 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -23,14 +23,12 @@ from charmhelpers.core.host import ( from charmhelpers.fetch import ( apt_install, - apt_update, - add_source + apt_update ) from charmhelpers.contrib.openstack.utils import ( configure_installation_source, - openstack_upgrade_available, - lsb_release, + openstack_upgrade_available ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, @@ -45,6 +43,7 @@ from neutron_api_utils import ( api_port, CLUSTER_RES, do_openstack_upgrade, + setup_ipv6 ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -62,7 +61,7 @@ from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.network.ip import ( get_iface_for_address, get_netmask_for_address, - get_ipv6_addr, + get_ipv6_addr ) hooks = Hooks() @@ -92,14 +91,11 @@ def configure_https(): def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) - trusty = lsb_release()['DISTRIB_CODENAME'] == 'trusty' - if config('prefer-ipv6') and trusty: - add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports' - ' main') + if config('prefer-ipv6'): + setup_ipv6() + apt_update() apt_install(determine_packages(), fatal=True) - if config('prefer-ipv6') and trusty: - apt_install('haproxy/trusty-backports', fatal=True) [open_port(port) for port in determine_ports()] @@ -107,6 +103,9 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map(), stopstart=True) def config_changed(): + if config('prefer-ipv6'): + setup_ipv6() + global CONFIGS if openstack_upgrade_available('neutron-server'): do_openstack_upgrade(CONFIGS) diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 43018767..3cd44934 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -17,7 +17,18 @@ from charmhelpers.core.hookenv import ( config, log, ) -from charmhelpers.fetch import apt_update, apt_install, apt_upgrade + +from charmhelpers.fetch import ( + apt_update, + apt_install, + apt_upgrade, + add_source +) + +from charmhelpers.core.host import ( + lsb_release +) + import neutron_api_context TEMPLATES = 'templates/' @@ -197,3 +208,19 @@ def do_openstack_upgrade(configs): # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) + + +def setup_ipv6(): + ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) + if ubuntu_rel < 14.04: + raise Exception("IPv6 is not supported for Ubuntu " + "versions less than Trusty 14.04") + + # NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports + # to support ipv6 address, so check is required to make sure not + # breaking other versions, IPv6 only support for >= Trusty + if ubuntu_rel == 14.04: + add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports' + ' main') + apt_update() + apt_install('haproxy/trusty-backports', fatal=True) From 24aba46151d2bf35dd00f24d50d933d8ad42db6e Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sat, 20 Sep 2014 00:52:38 +0800 Subject: [PATCH 014/125] Sync charm-helpers, use get_ipv6_addr()[0] directly --- .../charmhelpers/contrib/hahelpers/cluster.py | 68 +++- hooks/charmhelpers/contrib/network/ip.py | 109 ++++-- .../contrib/openstack/amulet/deployment.py | 20 +- .../contrib/openstack/amulet/utils.py | 106 ++++-- .../charmhelpers/contrib/openstack/context.py | 28 +- hooks/charmhelpers/contrib/openstack/ip.py | 10 +- .../contrib/openstack/templates/haproxy.cfg | 6 +- hooks/charmhelpers/contrib/openstack/utils.py | 10 +- .../contrib/storage/linux/utils.py | 3 + hooks/charmhelpers/core/hookenv.py | 55 ++- hooks/charmhelpers/core/host.py | 43 ++- hooks/charmhelpers/core/services/__init__.py | 2 + hooks/charmhelpers/core/services/base.py | 313 ++++++++++++++++++ hooks/charmhelpers/core/services/helpers.py | 125 +++++++ hooks/charmhelpers/core/templating.py | 51 +++ hooks/charmhelpers/fetch/__init__.py | 63 +++- hooks/charmhelpers/fetch/archiveurl.py | 40 +++ hooks/neutron_api_hooks.py | 4 +- unit_tests/test_neutron_api_context.py | 7 +- unit_tests/test_neutron_api_hooks.py | 57 ++-- 20 files changed, 973 insertions(+), 147 deletions(-) create mode 100644 hooks/charmhelpers/core/services/__init__.py create mode 100644 hooks/charmhelpers/core/services/base.py create mode 100644 hooks/charmhelpers/core/services/helpers.py create mode 100644 hooks/charmhelpers/core/templating.py diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 505de6b2..7151b1d0 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -6,6 +6,11 @@ # Adam Gandelman # +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + import subprocess import os @@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import ( config as config_get, INFO, ERROR, + WARNING, unit_get, ) @@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception): pass +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 2. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + def is_clustered(): for r_id in (relation_ids('ha') or []): for unit in (relation_list(r_id) or []): @@ -38,7 +67,11 @@ def is_clustered(): return False -def is_leader(resource): +def is_crm_leader(resource): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + """ cmd = [ "crm", "resource", "show", resource @@ -54,15 +87,31 @@ def is_leader(resource): return False -def peer_units(): +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): peers = [] - for r_id in (relation_ids('cluster') or []): + for r_id in (relation_ids(peer_relation) or []): for unit in (relation_list(r_id) or []): peers.append(unit) return peers +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) for peer in peers: remote_unit_no = int(peer.split('/')[1]) @@ -72,16 +121,9 @@ def oldest_peer(peers): def eligible_leader(resource): - if is_clustered(): - if not is_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) def https(): diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index f8cc1975..b859a097 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,10 +1,11 @@ +import glob import sys from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, config, + ERROR, log, ) try: @@ -156,24 +157,6 @@ get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') -def get_ipv6_addr(iface="eth0"): - try: - iface_addrs = netifaces.ifaddresses(iface) - if netifaces.AF_INET6 not in iface_addrs: - raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) - - addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] - ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') - and config('vip') != a['addr']] - if not ipv6_addr: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - - return ipv6_addr[0] - - except ValueError: - raise ValueError("Invalid interface '%s'" % iface) - - def format_ipv6_addr(address): """ IPv6 needs to be wrapped with [] in url link to parse correctly. @@ -185,3 +168,91 @@ def format_ipv6_addr(address): level=ERROR) address = None return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index e476b6f2..9179eeb1 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -4,8 +4,11 @@ from charmhelpers.contrib.amulet.deployment import ( class OpenStackAmuletDeployment(AmuletDeployment): - """This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms.""" + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ def __init__(self, series=None, openstack=None, source=None): """Initialize the deployment environment.""" @@ -40,11 +43,14 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.d.configure(service, config) def _get_openstack_release(self): - """Return an integer representing the enum value of the openstack - release.""" - self.precise_essex, self.precise_folsom, self.precise_grizzly, \ - self.precise_havana, self.precise_icehouse, \ - self.trusty_icehouse = range(6) + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 222281e3..bd327bdc 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,8 +16,11 @@ ERROR = logging.ERROR class OpenStackAmuletUtils(AmuletUtils): - """This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms.""" + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ def __init__(self, log_level=ERROR): """Initialize the deployment environment.""" @@ -25,13 +28,17 @@ class OpenStackAmuletUtils(AmuletUtils): def validate_endpoint_data(self, endpoints, admin_port, internal_port, public_port, expected): - """Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint.""" + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) - if admin_port in ep.adminurl and internal_port in ep.internalurl \ - and public_port in ep.publicurl: + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): found = True actual = {'id': ep.id, 'region': ep.region, @@ -47,8 +54,11 @@ class OpenStackAmuletUtils(AmuletUtils): return 'endpoint not found' def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints.""" + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ self.log.debug('actual: {}'.format(repr(actual))) for k, v in expected.iteritems(): if k in actual: @@ -60,8 +70,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_tenant_data(self, expected, actual): - """Validate a list of actual tenant data vs list of expected tenant - data.""" + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -78,8 +91,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_role_data(self, expected, actual): - """Validate a list of actual role data vs a list of expected role - data.""" + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -95,8 +111,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_user_data(self, expected, actual): - """Validate a list of actual user data vs a list of expected user - data.""" + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,21 +133,24 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_flavor_data(self, expected, actual): - """Validate a list of actual flavors vs a list of expected flavors.""" + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): - """Return True if tenant exists""" + """Return True if tenant exists.""" return tenant in [t.name for t in keystone.tenants.list()] def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) return keystone_client.Client(username=user, password=password, tenant_name=tenant, auth_url=ep) @@ -177,12 +199,40 @@ class OpenStackAmuletUtils(AmuletUtils): image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + return image def delete_image(self, glance, image): """Delete the specified image.""" + num_before = len(list(glance.images.list())) glance.images.delete(image) + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" image = nova.images.find(name=image_name) @@ -199,11 +249,27 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('instance status: {}'.format(status)) count += 1 - if status == 'BUILD': + if status != 'ACTIVE': + self.log.error('instance creation timed out') return None return instance def delete_instance(self, nova, instance): """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index d41b74a2..f40ab846 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -47,6 +47,7 @@ from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, + format_ipv6_addr, ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -168,8 +169,10 @@ class SharedDBContext(OSContextGenerator): for rid in relation_ids('shared-db'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host ctxt = { - 'database_host': rdata.get('db_host'), + 'database_host': host, 'database': self.database, 'database_user': self.user, 'database_password': rdata.get(password_setting), @@ -245,9 +248,12 @@ class IdentityServiceContext(OSContextGenerator): for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + ctxt = { 'service_port': rdata.get('service_port'), - 'service_host': rdata.get('service_host'), + 'service_host': serv_host, 'auth_host': rdata.get('auth_host'), 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), @@ -297,11 +303,13 @@ class AMQPContext(OSContextGenerator): for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True - ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, - unit=unit) + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip else: - ctxt['rabbitmq_host'] = relation_get('private-address', - rid=rid, unit=unit) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -340,8 +348,9 @@ class AMQPContext(OSContextGenerator): and len(related_units(rid)) > 1: rabbitmq_hosts = [] for unit in related_units(rid): - rabbitmq_hosts.append(relation_get('private-address', - rid=rid, unit=unit)) + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) if not context_complete(ctxt): return {} @@ -370,6 +379,7 @@ class CephContext(OSContextGenerator): ceph_addr = \ relation_get('ceph-public-address', rid=rid, unit=unit) or \ relation_get('private-address', rid=rid, unit=unit) + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) ctxt = { @@ -405,7 +415,7 @@ class HAProxyContext(OSContextGenerator): cluster_hosts = {} l_unit = local_unit().replace('/', '-') if config('prefer-ipv6'): - addr = get_ipv6_addr() + addr = get_ipv6_addr()[0] else: addr = unit_get('private-address') cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 7e7a536f..affe8cd1 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -7,6 +7,7 @@ from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, + get_ipv6_addr, ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -64,10 +65,13 @@ def resolve_address(endpoint_type=PUBLIC): vip): resolved_address = vip else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr() + else: + fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( - config(_address_map[endpoint_type]['config']), - unit_get(_address_map[endpoint_type]['fallback']) - ) + config(_address_map[endpoint_type]['config']), fallback_addr) + if resolved_address is None: raise ValueError('Unable to resolve a suitable IP address' ' based on charm state and configuration') diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index a95eddd1..ce0e2738 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -1,6 +1,6 @@ global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice + log {{ local_host }} local0 + log {{ local_host }} local1 notice maxconn 20000 user haproxy group haproxy @@ -17,7 +17,7 @@ defaults timeout client 30000 timeout server 30000 -listen stats :8888 +listen stats {{ stat_port }} mode http stats enable stats hide-version diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 127b03fe..23d237de 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,7 @@ from charmhelpers.contrib.storage.linux.lvm import ( ) from charmhelpers.core.host import lsb_release, mounts, umount -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([ ('1.13.0', 'icehouse'), ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -134,13 +135,8 @@ def get_os_version_codename(codename): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt - apt.init() - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt.config.set("Dir::Cache::pkgcache", "") - - cache = apt.Cache() + cache = apt_cache() try: pkg = cache[package] diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ class Config(dict): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ class Config(dict): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ class Config(dict): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d934f940..b85b0280 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import random import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -320,12 +332,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..87ecb130 --- /dev/null +++ b/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,313 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..4b90589b --- /dev/null +++ b/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,125 @@ +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the interface type, to prevent + potential naming conflicts. + """ + name = None + interface = None + required_keys = [] + + def __init__(self, *args, **kwargs): + super(RelationContext, self).__init__(*args, **kwargs) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a template, for use as a ready action. + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 5be512ce..8e9d3804 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ class BaseFetchHandler(object): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..1b11fa03 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -12,7 +14,17 @@ from charmhelpers.payload.archive import ( ) from charmhelpers.core.host import mkdir +""" +This class is a plugin for charmhelpers.fetch.install_remote. +It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. + +Example usage: +install_remote("https://example.com/some/archive.tar.gz") +# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. + +See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. +""" class ArchiveUrlFetchHandler(BaseFetchHandler): """Handler for archives via generic URLs""" def can_handle(self, source): @@ -61,3 +73,31 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file) + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + if validate == 'sha1' and len(hashsum) != 40: + raise ValueError("HashSum must be = 40 characters when using sha1" + " validation") + if validate == 'md5' and len(hashsum) != 32: + raise ValueError("HashSum must be = 32 characters when using md5" + " validation") + tempfile, headers = urlretrieve(url) + self.validate_file(tempfile, hashsum, validate) + return tempfile + + # Predicate method that returns status of hash matching expected hash. + def validate_file(self, source, hashsum, vmethod='sha1'): + if vmethod != 'sha1' and vmethod != 'md5': + raise ValueError("Validation Method not supported") + + if vmethod == 'md5': + m = hashlib.md5() + if vmethod == 'sha1': + m = hashlib.sha1() + with open(source) as f: + for line in f: + m.update(line) + if hashsum != m.hexdigest(): + msg = "Hash Mismatch on {} expected {} got {}" + raise ValueError(msg.format(source, hashsum, m.hexdigest())) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index b352b99d..37852c83 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -147,7 +147,7 @@ def db_joined(): raise Exception(e) if config('prefer-ipv6'): - host = get_ipv6_addr() + host = get_ipv6_addr()[0] else: host = unit_get('private-address') @@ -265,7 +265,7 @@ def cluster_changed(): for rid in relation_ids('cluster'): relation_set(relation_id=rid, relation_settings={'private-address': - get_ipv6_addr()}) + get_ipv6_addr()[0]}) CONFIGS.write_all() diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 2b0541f4..98378dcb 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -13,6 +13,7 @@ TO_PATCH = [ ] +@patch('charmhelpers.core.hookenv.config') class IdentityServiceContext(CharmTestCase): def setUp(self): super(IdentityServiceContext, self).setUp(context, TO_PATCH) @@ -21,12 +22,14 @@ class IdentityServiceContext(CharmTestCase): self.test_config.set('region', 'region457') self.test_config.set('prefer-ipv6', False) + @patch.object(charmhelpers.contrib.openstack.context, 'format_ipv6_addr') @patch.object(charmhelpers.contrib.openstack.context, 'context_complete') @patch.object(charmhelpers.contrib.openstack.context, 'relation_get') @patch.object(charmhelpers.contrib.openstack.context, 'related_units') @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp): + def test_ids_ctxt(self, _log, _rids, _runits, _rget, _ctxt_comp, + format_ipv6_addr, mock_config): _rids.return_value = 'rid1' _runits.return_value = 'runit' _ctxt_comp.return_value = True @@ -45,7 +48,7 @@ class IdentityServiceContext(CharmTestCase): @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_ids_ctxt_no_rels(self, _log, _rids): + def test_ids_ctxt_no_rels(self, _log, _rids, mock_config): _rids.return_value = [] ids_ctxt = context.IdentityServiceContext() self.assertEquals(ids_ctxt(), None) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 004f8e18..3cc1b90e 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -50,6 +50,7 @@ NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR from random import randrange +@patch('charmhelpers.core.hookenv.config') class NeutronAPIHooksTests(CharmTestCase): def setUp(self): @@ -67,7 +68,7 @@ class NeutronAPIHooksTests(CharmTestCase): hooks.hooks.execute([ 'hooks/{}'.format(hookname)]) - def test_install_hook(self): + def test_install_hook(self, mock_config): _pkgs = ['foo', 'bar'] _ports = [80, 81, 82] _port_calls = [call(port) for port in _ports] @@ -85,7 +86,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(self.execd_preinstall.called) @patch.object(hooks, 'configure_https') - def test_config_changed(self, conf_https): + def test_config_changed(self, conf_https, mock_config): self.openstack_upgrade_available.return_value = True self.relation_ids.side_effect = self._fake_relids _n_api_rel_joined = self.patch('neutron_api_relation_joined') @@ -101,7 +102,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.do_openstack_upgrade.called) - def test_amqp_joined(self): + def test_amqp_joined(self, mock_config): self._call_hook('amqp-relation-joined') self.relation_set.assert_called_with( username='neutron', @@ -109,16 +110,16 @@ class NeutronAPIHooksTests(CharmTestCase): relation_id=None ) - def test_amqp_changed(self): + def test_amqp_changed(self, mock_config): self.CONFIGS.complete_contexts.return_value = ['amqp'] self._call_hook('amqp-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - def test_amqp_departed(self): + def test_amqp_departed(self, mock_config): self._call_hook('amqp-relation-departed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - def test_db_joined(self): + def test_db_joined(self, mock_config): self.is_relation_made.return_value = False self.unit_get.return_value = 'myhostname' self._call_hook('shared-db-relation-joined') @@ -128,7 +129,7 @@ class NeutronAPIHooksTests(CharmTestCase): hostname='myhostname', ) - def test_db_joined_with_postgresql(self): + def test_db_joined_with_postgresql(self, mock_config): self.is_relation_made.return_value = True with self.assertRaises(Exception) as context: @@ -137,7 +138,7 @@ class NeutronAPIHooksTests(CharmTestCase): 'Attempting to associate a mysql database when there ' 'is already associated a postgresql one') - def test_postgresql_db_joined(self): + def test_postgresql_db_joined(self, mock_config): self.unit_get.return_value = 'myhostname' self.is_relation_made.return_value = False self._call_hook('pgsql-db-relation-joined') @@ -145,7 +146,7 @@ class NeutronAPIHooksTests(CharmTestCase): database='neutron', ) - def test_postgresql_joined_with_db(self): + def test_postgresql_joined_with_db(self, mock_config): self.is_relation_made.return_value = True with self.assertRaises(Exception) as context: @@ -154,25 +155,25 @@ class NeutronAPIHooksTests(CharmTestCase): 'Attempting to associate a postgresql database when' ' there is already associated a mysql one') - def test_shared_db_changed(self): + def test_shared_db_changed(self, mock_config): self.CONFIGS.complete_contexts.return_value = ['shared-db'] self._call_hook('shared-db-relation-changed') self.assertTrue(self.CONFIGS.write_all.called) - def test_shared_db_changed_partial_ctxt(self): + def test_shared_db_changed_partial_ctxt(self, mock_config): self.CONFIGS.complete_contexts.return_value = [] self._call_hook('shared-db-relation-changed') self.assertFalse(self.CONFIGS.write_all.called) - def test_pgsql_db_changed(self): + def test_pgsql_db_changed(self, mock_config): self._call_hook('pgsql-db-relation-changed') self.assertTrue(self.CONFIGS.write.called) - def test_amqp_broken(self): + def test_amqp_broken(self, mock_config): self._call_hook('amqp-relation-broken') self.assertTrue(self.CONFIGS.write_all.called) - def test_identity_joined(self): + def test_identity_joined(self, mock_config): self.canonical_url.return_value = 'http://127.0.0.1' self.api_port.return_value = '9696' self.test_config.set('region', 'region1') @@ -190,7 +191,7 @@ class NeutronAPIHooksTests(CharmTestCase): relation_settings=_endpoints ) - def test_identity_changed_partial_ctxt(self): + def test_identity_changed_partial_ctxt(self, mock_config): self.CONFIGS.complete_contexts.return_value = [] _api_rel_joined = self.patch('neutron_api_relation_joined') self.relation_ids.side_effect = self._fake_relids @@ -198,7 +199,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertFalse(_api_rel_joined.called) @patch.object(hooks, 'configure_https') - def test_identity_changed(self, conf_https): + def test_identity_changed(self, conf_https, mock_config): self.CONFIGS.complete_contexts.return_value = ['identity-service'] _api_rel_joined = self.patch('neutron_api_relation_joined') self.relation_ids.side_effect = self._fake_relids @@ -206,7 +207,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) self.assertTrue(_api_rel_joined.called) - def test_neutron_api_relation_no_id_joined(self): + def test_neutron_api_relation_no_id_joined(self, mock_config): host = 'http://127.0.0.1' port = 1234 _id_rel_joined = self.patch('identity_joined') @@ -234,7 +235,7 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - def test_neutron_api_relation_joined(self): + def test_neutron_api_relation_joined(self, mock_config): host = 'http://127.0.0.1' port = 1234 self.canonical_url.return_value = host @@ -252,11 +253,11 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - def test_neutron_api_relation_changed(self): + def test_neutron_api_relation_changed(self, mock_config): self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - def test_neutron_plugin_api_relation_joined(self): + def test_neutron_plugin_api_relation_joined(self, mock_config): _relation_data = { 'neutron-security-groups': False, } @@ -266,12 +267,12 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - def test_cluster_changed(self): + def test_cluster_changed(self, mock_config): self._call_hook('cluster-relation-changed') self.assertTrue(self.CONFIGS.write_all.called) @patch.object(hooks, 'get_hacluster_config') - def test_ha_joined(self, _get_ha_config): + def test_ha_joined(self, _get_ha_config, mock_config): _ha_config = { 'vip': '10.0.0.1', 'vip_cidr': '24', @@ -304,7 +305,7 @@ class NeutronAPIHooksTests(CharmTestCase): ) @patch.object(hooks, 'get_hacluster_config') - def test_ha_joined_with_ipv6(self, _get_ha_config): + def test_ha_joined_with_ipv6(self, _get_ha_config, mock_config): self.test_config.set('prefer-ipv6', 'True') _ha_config = { 'vip': '2001:db8:1::1', @@ -339,7 +340,7 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - def test_ha_changed(self): + def test_ha_changed(self, mock_config): self.test_relation.set({ 'clustered': 'true', }) @@ -351,7 +352,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(_n_api_rel_joined.called) self.assertTrue(_id_rel_joined.called) - def test_ha_changed_not_leader(self): + def test_ha_changed_not_leader(self, mock_config): self.test_relation.set({ 'clustered': 'true', }) @@ -363,7 +364,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertFalse(_n_api_rel_joined.called) self.assertFalse(_id_rel_joined.called) - def test_ha_changed_not_clustered(self): + def test_ha_changed_not_clustered(self, mock_config): self.test_relation.set({ 'clustered': None, }) @@ -375,7 +376,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertFalse(_n_api_rel_joined.called) self.assertFalse(_id_rel_joined.called) - def test_configure_https(self): + def test_configure_https(self, mock_config): self.CONFIGS.complete_contexts.return_value = ['https'] self.relation_ids.side_effect = self._fake_relids _id_rel_joined = self.patch('identity_joined') @@ -384,7 +385,7 @@ class NeutronAPIHooksTests(CharmTestCase): 'openstack_https_frontend']) self.assertTrue(_id_rel_joined.called) - def test_configure_https_nohttps(self): + def test_configure_https_nohttps(self, mock_config): self.CONFIGS.complete_contexts.return_value = [] self.relation_ids.side_effect = self._fake_relids _id_rel_joined = self.patch('identity_joined') From adc2f797c4187d481f027b97851163fd8ce2b4af Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sun, 21 Sep 2014 23:12:06 +0800 Subject: [PATCH 015/125] Exclues vip from get_ipv6_addr(). --- hooks/charmhelpers/contrib/openstack/context.py | 4 +++- hooks/charmhelpers/contrib/openstack/ip.py | 5 ++++- hooks/neutron_api_hooks.py | 6 +++--- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index f40ab846..09eef3e1 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -414,10 +414,12 @@ class HAProxyContext(OSContextGenerator): cluster_hosts = {} l_unit = local_unit().replace('/', '-') + if config('prefer-ipv6'): - addr = get_ipv6_addr()[0] + addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = unit_get('private-address') + cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), addr) diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index affe8cd1..51a11e63 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -66,7 +66,10 @@ def resolve_address(endpoint_type=PUBLIC): resolved_address = vip else: if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr() + list = [] + if config('vip'): + list.append(config('vip')) + fallback_addr = get_ipv6_addr(exc_list=list)[0] else: fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 37852c83..8ef0e2b6 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -147,7 +147,7 @@ def db_joined(): raise Exception(e) if config('prefer-ipv6'): - host = get_ipv6_addr()[0] + host = get_ipv6_addr(exc_list=[config('vip')])[0] else: host = unit_get('private-address') @@ -263,9 +263,9 @@ def neutron_plugin_api_relation_joined(rid=None): def cluster_changed(): if config('prefer-ipv6'): for rid in relation_ids('cluster'): + addr = get_ipv6_addr(exc_list=[config('vip')])[0] relation_set(relation_id=rid, - relation_settings={'private-address': - get_ipv6_addr()[0]}) + relation_settings={'private-address': addr}) CONFIGS.write_all() From 359ba88b7845775f5b2e8a1cd9e123915db0339a Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Mon, 22 Sep 2014 20:41:13 +0800 Subject: [PATCH 016/125] Sync ~xianghui/charm-helpers/format-ipv6, format auth_host --- hooks/charmhelpers/contrib/openstack/context.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 09eef3e1..902a0b1f 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -250,11 +250,13 @@ class IdentityServiceContext(OSContextGenerator): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host ctxt = { 'service_port': rdata.get('service_port'), 'service_host': serv_host, - 'auth_host': rdata.get('auth_host'), + 'auth_host': auth_host, 'auth_port': rdata.get('auth_port'), 'admin_tenant_name': rdata.get('service_tenant'), 'admin_user': rdata.get('service_username'), From 815bebd94fecd9f5a5d8eec93e27a8cc49753123 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 22 Sep 2014 17:40:38 +0100 Subject: [PATCH 017/125] Resync helpers --- charm-helpers-sync.yaml | 2 +- .../charmhelpers/contrib/hahelpers/apache.py | 7 +- .../charmhelpers/contrib/hahelpers/cluster.py | 71 +++++++--- hooks/charmhelpers/contrib/network/ip.py | 102 +++++++++++++++ .../contrib/openstack/amulet/deployment.py | 20 ++- .../contrib/openstack/amulet/utils.py | 106 ++++++++++++--- .../charmhelpers/contrib/openstack/context.py | 123 ++++++++++++++---- hooks/charmhelpers/contrib/openstack/ip.py | 10 +- .../contrib/openstack/templates/haproxy.cfg | 6 +- .../templates/openstack_https_frontend | 17 +-- .../templates/openstack_https_frontend.conf | 17 +-- hooks/charmhelpers/contrib/openstack/utils.py | 10 +- .../contrib/storage/linux/utils.py | 3 + hooks/charmhelpers/core/hookenv.py | 55 +++++--- hooks/charmhelpers/core/host.py | 74 +++++++++-- hooks/charmhelpers/fetch/__init__.py | 84 +++++++++--- hooks/charmhelpers/fetch/archiveurl.py | 52 +++++++- 17 files changed, 611 insertions(+), 148 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..28221425 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/multiple-https-networks destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 8d5fb8ba..6595ddb8 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,7 +20,8 @@ from charmhelpers.core.hookenv import ( ) -def get_cert(): +def get_cert(cn): + # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): @@ -30,10 +31,10 @@ def get_cert(): for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert', + cert = relation_get('ssl_cert_{}'.format(cn), rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key', + key = relation_get('ssl_key_{}'.format(cn), rid=r_id, unit=unit) return (cert, key) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 505de6b2..6d972007 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -6,6 +6,11 @@ # Adam Gandelman # +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + import subprocess import os @@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import ( config as config_get, INFO, ERROR, + WARNING, unit_get, ) @@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception): pass +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 2. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + def is_clustered(): for r_id in (relation_ids('ha') or []): for unit in (relation_list(r_id) or []): @@ -38,7 +67,11 @@ def is_clustered(): return False -def is_leader(resource): +def is_crm_leader(resource): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + """ cmd = [ "crm", "resource", "show", resource @@ -54,15 +87,31 @@ def is_leader(resource): return False -def peer_units(): +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): peers = [] - for r_id in (relation_ids('cluster') or []): + for r_id in (relation_ids(peer_relation) or []): for unit in (relation_list(r_id) or []): peers.append(unit) return peers +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) for peer in peers: remote_unit_no = int(peer.split('/')[1]) @@ -72,16 +121,9 @@ def oldest_peer(peers): def eligible_leader(resource): - if is_clustered(): - if not is_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) def https(): @@ -97,10 +139,9 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ssl_cert', rid=r_id, unit=unit), - relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 0972e91a..b859a097 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,4 @@ +import glob import sys from functools import partial @@ -154,3 +155,104 @@ def _get_for_address(address, key): get_iface_for_address = partial(_get_for_address, key='iface') get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def format_ipv6_addr(address): + """ + IPv6 needs to be wrapped with [] in url link to parse correctly. + """ + if is_ipv6(address): + address = "[%s]" % address + else: + log("Not an valid ipv6 address: %s" % address, + level=ERROR) + address = None + return address + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IP address for a given interface, if any, or []. + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + if not exc_list: + exc_list = [] + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception('Unknown inet type ' + str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("%s not found " % (iface)) + else: + return [] + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + return addresses + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): + """ + Return the assigned IPv6 address for a given interface, if any, or []. + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + remotly_addressable = [] + for address in addresses: + if not address.startswith('fe80'): + remotly_addressable.append(address) + if fatal and not remotly_addressable: + raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) + return remotly_addressable + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of bridges on the system or [] + """ + b_rgex = vnic_dir + '/*/bridge' + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """ + Return a list of nics comprising a given bridge on the system or [] + """ + brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + + +def is_bridge_member(nic): + """ + Check if a given nic is a member of a bridge + """ + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index e476b6f2..9179eeb1 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -4,8 +4,11 @@ from charmhelpers.contrib.amulet.deployment import ( class OpenStackAmuletDeployment(AmuletDeployment): - """This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms.""" + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ def __init__(self, series=None, openstack=None, source=None): """Initialize the deployment environment.""" @@ -40,11 +43,14 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.d.configure(service, config) def _get_openstack_release(self): - """Return an integer representing the enum value of the openstack - release.""" - self.precise_essex, self.precise_folsom, self.precise_grizzly, \ - self.precise_havana, self.precise_icehouse, \ - self.trusty_icehouse = range(6) + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) releases = { ('precise', None): self.precise_essex, ('precise', 'cloud:precise-folsom'): self.precise_folsom, diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 222281e3..bd327bdc 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -16,8 +16,11 @@ ERROR = logging.ERROR class OpenStackAmuletUtils(AmuletUtils): - """This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms.""" + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ def __init__(self, log_level=ERROR): """Initialize the deployment environment.""" @@ -25,13 +28,17 @@ class OpenStackAmuletUtils(AmuletUtils): def validate_endpoint_data(self, endpoints, admin_port, internal_port, public_port, expected): - """Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint.""" + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) - if admin_port in ep.adminurl and internal_port in ep.internalurl \ - and public_port in ep.publicurl: + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): found = True actual = {'id': ep.id, 'region': ep.region, @@ -47,8 +54,11 @@ class OpenStackAmuletUtils(AmuletUtils): return 'endpoint not found' def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints.""" + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ self.log.debug('actual: {}'.format(repr(actual))) for k, v in expected.iteritems(): if k in actual: @@ -60,8 +70,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_tenant_data(self, expected, actual): - """Validate a list of actual tenant data vs list of expected tenant - data.""" + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -78,8 +91,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_role_data(self, expected, actual): - """Validate a list of actual role data vs a list of expected role - data.""" + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -95,8 +111,11 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_user_data(self, expected, actual): - """Validate a list of actual user data vs a list of expected user - data.""" + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,21 +133,24 @@ class OpenStackAmuletUtils(AmuletUtils): return ret def validate_flavor_data(self, expected, actual): - """Validate a list of actual flavors vs a list of expected flavors.""" + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): - """Return True if tenant exists""" + """Return True if tenant exists.""" return tenant in [t.name for t in keystone.tenants.list()] def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" - service_ip = \ - keystone_sentry.relation('shared-db', - 'mysql:shared-db')['private-address'] + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) return keystone_client.Client(username=user, password=password, tenant_name=tenant, auth_url=ep) @@ -177,12 +199,40 @@ class OpenStackAmuletUtils(AmuletUtils): image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + return image def delete_image(self, glance, image): """Delete the specified image.""" + num_before = len(list(glance.images.list())) glance.images.delete(image) + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" image = nova.images.find(name=image_name) @@ -199,11 +249,27 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('instance status: {}'.format(status)) count += 1 - if status == 'BUILD': + if status != 'ACTIVE': + self.log.error('instance creation timed out') return None return instance def delete_instance(self, nova, instance): """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92c41b23..bd280df5 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -8,7 +8,6 @@ from subprocess import ( check_call ) - from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -28,6 +27,11 @@ from charmhelpers.core.hookenv import ( INFO ) +from charmhelpers.core.host import ( + mkdir, + write_file +) + from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, @@ -38,13 +42,18 @@ from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, + install_ca_cert, ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) -from charmhelpers.contrib.network.ip import get_address_in_network +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv6_addr, + is_address_in_network +) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -401,9 +410,12 @@ class HAProxyContext(OSContextGenerator): cluster_hosts = {} l_unit = local_unit().replace('/', '-') - cluster_hosts[l_unit] = \ - get_address_in_network(config('os-internal-network'), - unit_get('private-address')) + if config('prefer-ipv6'): + addr = get_ipv6_addr() + else: + addr = unit_get('private-address') + cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), + addr) for rid in relation_ids('cluster'): for unit in related_units(rid): @@ -414,6 +426,16 @@ class HAProxyContext(OSContextGenerator): ctxt = { 'units': cluster_hosts, } + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + ctxt['stat_port'] = ':::8888' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + ctxt['stat_port'] = ':8888' + if len(cluster_hosts.keys()) > 1: # Enable haproxy when we have enough peers. log('Ensuring haproxy enabled in /etc/default/haproxy.') @@ -474,22 +496,30 @@ class ApacheSSLContext(OSContextGenerator): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] check_call(cmd) - def configure_cert(self): - if not os.path.isdir('/etc/apache2/ssl'): - os.mkdir('/etc/apache2/ssl') + def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - if not os.path.isdir(ssl_dir): - os.mkdir(ssl_dir) - cert, key = get_cert() - with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: - cert_out.write(b64decode(cert)) - with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: - key_out.write(b64decode(key)) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)), + content=b64decode(key)) + + def configure_ca(self): ca_cert = get_ca_cert() if ca_cert: - with open(CA_CERT_PATH, 'w') as ca_out: - ca_out.write(b64decode(ca_cert)) - check_call(['update-ca-certificates']) + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + '''Figure out which canonical names clients will access this service''' + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + return list(set(cns)) def __call__(self): if isinstance(self.external_ports, basestring): @@ -497,21 +527,47 @@ class ApacheSSLContext(OSContextGenerator): if (not self.external_ports or not https()): return {} - self.configure_cert() + self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, - 'private_address': unit_get('private-address'), - 'endpoints': [] + 'endpoints': [], + 'ext_ports': [] } - if is_clustered(): - ctxt['private_address'] = config('vip') - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) - portmap = (int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) + + for cn in self.canonical_names(): + self.configure_cert(cn) + + addresses = [] + vips = [] + if config('vip'): + vips = config('vip').split() + + for network_type in ['os-internal-network', + 'os-admin-network', + 'os-public-network']: + address = get_address_in_network(config(network_type), + unit_get('private-address')) + if len(vips) > 0 and is_clustered(): + for vip in vips: + if is_address_in_network(config(network_type), + vip): + addresses.append((address, vip)) + break + elif is_clustered(): + addresses.append((address, config('vip'))) + else: + addresses.append((address, address)) + + for address, endpoint in set(addresses): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port) + int_port = determine_api_port(api_port) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) return ctxt @@ -753,6 +809,17 @@ class SubordinateConfigContext(OSContextGenerator): return ctxt +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + return ctxt + + class SyslogContext(OSContextGenerator): def __call__(self): diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 7e7a536f..affe8cd1 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -7,6 +7,7 @@ from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, + get_ipv6_addr, ) from charmhelpers.contrib.hahelpers.cluster import is_clustered @@ -64,10 +65,13 @@ def resolve_address(endpoint_type=PUBLIC): vip): resolved_address = vip else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr() + else: + fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( - config(_address_map[endpoint_type]['config']), - unit_get(_address_map[endpoint_type]['fallback']) - ) + config(_address_map[endpoint_type]['config']), fallback_addr) + if resolved_address is None: raise ValueError('Unable to resolve a suitable IP address' ' based on charm state and configuration') diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index a95eddd1..ce0e2738 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -1,6 +1,6 @@ global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice + log {{ local_host }} local0 + log {{ local_host }} local1 notice maxconn 20000 user haproxy group haproxy @@ -17,7 +17,7 @@ defaults timeout client 30000 timeout server 30000 -listen stats :8888 +listen stats {{ stat_port }} mode http stats enable stats hide-version diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 127b03fe..23d237de 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,7 +23,7 @@ from charmhelpers.contrib.storage.linux.lvm import ( ) from charmhelpers.core.host import lsb_release, mounts, umount -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([ ('1.13.0', 'icehouse'), ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), + ('2.0.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -134,13 +135,8 @@ def get_os_version_codename(codename): def get_os_codename_package(package, fatal=True): '''Derive OpenStack release codename from an installed package.''' import apt_pkg as apt - apt.init() - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt.config.set("Dir::Cache::pkgcache", "") - - cache = apt.Cache() + cache = apt_cache() try: pkg = cache[package] diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 8d0f6116..1b958712 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -46,5 +46,8 @@ def is_device_mounted(device): :returns: boolean: True if the path represents a mounted device, False if it doesn't. ''' + is_partition = bool(re.search(r".*[0-9]+\b", device)) out = check_output(['mount']) + if is_partition: + return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index c9530433..324987e6 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -156,12 +156,15 @@ def hook_name(): class Config(dict): - """A Juju charm config dictionary that can write itself to - disk (as json) and track which values have changed since - the previous hook invocation. + """A dictionary representation of the charm's config.yaml, with some + extra features: - Do not instantiate this object directly - instead call - ``hookenv.config()`` + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. Example usage:: @@ -170,8 +173,8 @@ class Config(dict): >>> config = hookenv.config() >>> config['foo'] 'bar' + >>> # store a new key/value for later use >>> config['mykey'] = 'myval' - >>> config.save() >>> # user runs `juju set mycharm foo=baz` @@ -188,22 +191,34 @@ class Config(dict): >>> # keys/values that we add are preserved across hooks >>> config['mykey'] 'myval' - >>> # don't forget to save at the end of hook! - >>> config.save() """ CONFIG_FILE_NAME = '.juju-persistent-config' def __init__(self, *args, **kw): super(Config, self).__init__(*args, **kw) + self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() + def __getitem__(self, key): + """For regular dict lookups, check the current juju config first, + then the previous (saved) copy. This ensures that user-saved values + will be returned by a dict lookup. + + """ + try: + return dict.__getitem__(self, key) + except KeyError: + return (self._prev_dict or {})[key] + def load_previous(self, path=None): - """Load previous copy of config from disk so that current values - can be compared to previous values. + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. :param path: @@ -218,8 +233,8 @@ class Config(dict): self._prev_dict = json.load(f) def changed(self, key): - """Return true if the value for this key has changed since - the last save. + """Return True if the current value for this key is different from + the previous value. """ if self._prev_dict is None: @@ -228,7 +243,7 @@ class Config(dict): def previous(self, key): """Return previous value for this key, or None if there - is no "previous" value. + is no previous value. """ if self._prev_dict: @@ -238,7 +253,13 @@ class Config(dict): def save(self): """Save this config to disk. - Preserves items in _prev_dict that do not exist in self. + If the charm is using the :mod:`Services Framework ` + or :meth:'@hook ' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. """ if self._prev_dict: @@ -285,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None): raise -def relation_set(relation_id=None, relation_settings={}, **kwargs): +def relation_set(relation_id=None, relation_settings=None, **kwargs): """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) @@ -477,6 +499,9 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d934f940..3ac70143 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -12,6 +12,8 @@ import random import string import subprocess import hashlib +import shutil +from contextlib import contextmanager from collections import OrderedDict @@ -52,7 +54,7 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status']) + output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError: return False else: @@ -62,6 +64,16 @@ def service_running(service): return False +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return False + else: + return True + + def adduser(username, password=None, shell='/bin/bash', system_user=False): """Add a user to the system""" try: @@ -197,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -208,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing @@ -320,12 +357,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg + from charmhelpers.fetch import apt_cache if not pkgcache: - apt_pkg.init() - # Force Apt to build its cache in memory. That way we avoid race - # conditions with other applications building the cache in the same - # place. - apt_pkg.config.set("Dir::Cache::pkgcache", "") - pkgcache = apt_pkg.Cache() + pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@contextmanager +def chdir(d): + cur = os.getcwd() + try: + yield os.chdir(d) + finally: + os.chdir(cur) + + +def chownr(path, owner, group): + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + + for root, dirs, files in os.walk(path): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + os.chown(full, uid, gid) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 5be512ce..20a20ac6 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -1,4 +1,5 @@ import importlib +from tempfile import NamedTemporaryFile import time from yaml import safe_load from charmhelpers.core.host import ( @@ -116,14 +117,7 @@ class BaseFetchHandler(object): def filter_installed_packages(packages): """Returns a list of packages that require installation""" - import apt_pkg - apt_pkg.init() - - # Tell apt to build an in-memory cache to prevent race conditions (if - # another process is already building the cache). - apt_pkg.config.set("Dir::Cache::pkgcache", "") - - cache = apt_pkg.Cache() + cache = apt_cache() _pkgs = [] for package in packages: try: @@ -136,6 +130,16 @@ def filter_installed_packages(packages): return _pkgs +def apt_cache(in_memory=True): + """Build and return an apt cache""" + import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache() + + def apt_install(packages, options=None, fatal=False): """Install one or more packages""" if options is None: @@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False): def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples: + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ if source is None: log('Source is not present. Skipping') return @@ -225,10 +250,23 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + else: + raise SourceConfigError("Unknown source: {!r}".format(source)) + if key: - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile() as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) def configure_sources(update=False, @@ -238,7 +276,8 @@ def configure_sources(update=False, Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. + The frament needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). Example config: install_sources: | @@ -272,22 +311,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 87e7071a..d1dcbc33 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,6 +1,8 @@ import os import urllib2 +from urllib import urlretrieve import urlparse +import hashlib from charmhelpers.fetch import ( BaseFetchHandler, @@ -10,11 +12,19 @@ from charmhelpers.payload.archive import ( get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -48,7 +64,29 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the :param:`source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -60,4 +98,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From c03254bed48e517b054aba048b2710568cf5834d Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 22 Sep 2014 21:21:38 +0100 Subject: [PATCH 018/125] synced lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 --- charm-helpers-sync.yaml | 2 +- hooks/charmhelpers/contrib/network/ip.py | 8 +- .../charmhelpers/contrib/openstack/context.py | 2 +- hooks/charmhelpers/contrib/openstack/ip.py | 5 +- hooks/charmhelpers/core/host.py | 31 ++++- hooks/charmhelpers/core/services/helpers.py | 124 +++++++++++++++++- hooks/charmhelpers/fetch/__init__.py | 21 ++- hooks/charmhelpers/fetch/archiveurl.py | 88 +++++++------ 8 files changed, 218 insertions(+), 63 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..2140b49d 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index b859a097..37ecbbed 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -5,7 +5,9 @@ from functools import partial from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - ERROR, log, + WARNING, + ERROR, + log ) try: @@ -164,9 +166,9 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not an valid ipv6 address: %s" % address, - level=ERROR) + log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None + return address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 902a0b1f..210238e6 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -47,7 +47,7 @@ from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, - format_ipv6_addr, + format_ipv6_addr ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 51a11e63..bc84fc45 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -66,10 +66,7 @@ def resolve_address(endpoint_type=PUBLIC): resolved_address = vip else: if config('prefer-ipv6'): - list = [] - if config('vip'): - list.append(config('vip')) - fallback_addr = get_ipv6_addr(exc_list=list)[0] + fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) resolved_address = get_address_in_network( diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index b85b0280..3ac70143 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -209,10 +209,15 @@ def mounts(): return system_mounts -def file_hash(path): - """Generate a md5 hash of the contents of 'path' or None if not found """ +def file_hash(path, hash_type='md5'): + """ + Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ if os.path.exists(path): - h = hashlib.md5() + h = getattr(hashlib, hash_type)() with open(path, 'r') as source: h.update(source.read()) # IGNORE:E1101 - it does have update return h.hexdigest() @@ -220,6 +225,26 @@ def file_hash(path): return None +def check_hash(path, checksum, hash_type='md5'): + """ + Validate a file using a cryptographic checksum. + + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + pass + + def restart_on_change(restart_map, stopstart=False): """Restart services based on configuration files changing diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 4b90589b..7067b94b 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,5 @@ +import os +import yaml from charmhelpers.core import hookenv from charmhelpers.core import templating @@ -19,15 +21,21 @@ class RelationContext(dict): the `name` attribute that are complete will used to populate the dictionary values (see `get_data`, below). - The generated context will be namespaced under the interface type, to prevent - potential naming conflicts. + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` """ name = None interface = None required_keys = [] - def __init__(self, *args, **kwargs): - super(RelationContext, self).__init__(*args, **kwargs) + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) self.get_data() def __bool__(self): @@ -101,9 +109,115 @@ class RelationContext(dict): return {} +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + class TemplateCallback(ManagerCallback): """ - Callback class that will render a template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file """ def __init__(self, source, target, owner='root', group='root', perms=0444): self.source = source diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 8e9d3804..20a20ac6 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -311,22 +311,35 @@ def configure_sources(update=False, apt_update(fatal=True) -def install_remote(source): +def install_remote(source, *args, **kwargs): """ Install a file tree from a remote source The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] - Schemes supported are based on this modules submodules - Options supported are submodule-specific""" + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ # We ONLY check for True here because can_handle may return a string # explaining why it can't handle a given source. handlers = [h for h in plugins() if h.can_handle(source) is True] installed_to = None for handler in handlers: try: - installed_to = handler.install(source) + installed_to = handler.install(source, *args, **kwargs) except UnhandledSource: pass if not installed_to: diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 1b11fa03..d1dcbc33 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -12,21 +12,19 @@ from charmhelpers.payload.archive import ( get_archive_handler, extract, ) -from charmhelpers.core.host import mkdir +from charmhelpers.core.host import mkdir, check_hash -""" -This class is a plugin for charmhelpers.fetch.install_remote. -It grabs, validates and installs remote archives fetched over "http", "https", "ftp" or "file" protocols. The contents of the archive are installed in $CHARM_DIR/fetched/. - -Example usage: -install_remote("https://example.com/some/archive.tar.gz") -# Installs the contents of archive.tar.gz in $CHARM_DIR/fetched/. - -See charmhelpers.fetch.archiveurl.get_archivehandler for supported archive types. -""" class ArchiveUrlFetchHandler(BaseFetchHandler): - """Handler for archives via generic URLs""" + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): @@ -36,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): return False def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ # propogate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse.urlparse(source) @@ -60,7 +64,29 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): os.unlink(dest) raise e - def install(self, source): + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the :param:`source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate :param:`checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): @@ -72,32 +98,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - return extract(dld_file) - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - if validate == 'sha1' and len(hashsum) != 40: - raise ValueError("HashSum must be = 40 characters when using sha1" - " validation") - if validate == 'md5' and len(hashsum) != 32: - raise ValueError("HashSum must be = 32 characters when using md5" - " validation") - tempfile, headers = urlretrieve(url) - self.validate_file(tempfile, hashsum, validate) - return tempfile - - # Predicate method that returns status of hash matching expected hash. - def validate_file(self, source, hashsum, vmethod='sha1'): - if vmethod != 'sha1' and vmethod != 'md5': - raise ValueError("Validation Method not supported") - - if vmethod == 'md5': - m = hashlib.md5() - if vmethod == 'sha1': - m = hashlib.sha1() - with open(source) as f: - for line in f: - m.update(line) - if hashsum != m.hexdigest(): - msg = "Hash Mismatch on {} expected {} got {}" - raise ValueError(msg.format(source, hashsum, m.hexdigest())) + options = urlparse.parse_qs(url_parts.fragment) + for key, value in options.items(): + if key in hashlib.algorithms: + check_hash(dld_file, value, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) From de9f583b6d7136e955c12444ccb730d3baa1c795 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 23 Sep 2014 11:21:41 +0100 Subject: [PATCH 019/125] reset charm-helpers sync path to lp:charm-helpers --- charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 2140b49d..8af0007c 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~cts-engineering/charms/trusty/charm-helpers/ipv6 +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 11f36e16629d14fe9dcec0c0abd304e9fa548684 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 23 Sep 2014 12:00:59 +0100 Subject: [PATCH 020/125] Fixup unit tests, resync helpers --- hooks/charmhelpers/core/hookenv.py | 10 ++++++---- unit_tests/test_neutron_api_context.py | 3 +++ unit_tests/test_neutron_api_hooks.py | 2 ++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index e81ac10f..7aac849f 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -89,6 +89,9 @@ class HAProxyContextTest(CharmTestCase): service_ports = {'neutron-server': [9696, 9686]} ctxt_data = { + 'haproxy_host': '0.0.0.0', + 'local_host': '127.0.0.1', + 'stat_port': ':8888', 'units': unit_addresses, 'service_ports': service_ports, 'neutron_bind_port': 9686, diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 04d73901..023e017e 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -14,6 +14,8 @@ utils.restart_map = MagicMock() import neutron_api_hooks as hooks +hooks.hooks._config_save = False + utils.register_configs = _reg utils.restart_map = _map From f19bdaafa7f7f73cb6ae56b1c483510979ecdedf Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 23 Sep 2014 15:04:15 +0100 Subject: [PATCH 021/125] Alway notify other services, irrespective of whether the current unit is the lead unit --- hooks/neutron_api_hooks.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 0a99387e..0be2db43 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -294,11 +294,8 @@ def ha_joined(): def ha_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: - log('ha_changed: hacluster subordinate not fully clustered.:' - + str(clustered)) - return - if not is_leader(CLUSTER_RES): - log('ha_changed: hacluster complete but we are not leader.') + log('ha_changed: hacluster subordinate' + ' not fully clustered: %s' % clustered) return log('Cluster configured, notifying other services and updating ' 'keystone endpoint configuration') From 78314640f2911d0773e52b55fe4ea2fde1e10bbe Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:32:51 +0100 Subject: [PATCH 022/125] [hopem] Adds ipv6 privacy extensions deploy note to config.yaml --- config.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/config.yaml b/config.yaml index e1636aff..065e689a 100644 --- a/config.yaml +++ b/config.yaml @@ -124,6 +124,14 @@ options: SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. prefer-ipv6: - default: false type: boolean - description: "Enable IPv6" + default: False + description: | + If True enables IPv6 support. The charm will expect network interfaces + to be configured with an IPv6 address. If set to False (default) IPv4 + is expected. + . + NOTE: these charms do not currently support IPv6 privacy extension. In + order for this charm to function correctly, the privacy must be + disabled and a non-temporary address must be configured/available on + your network interface. From 609349336346798dc557558868b56f00ac299c44 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 25 Sep 2014 17:44:01 +0100 Subject: [PATCH 023/125] Fixed minor typo in config.yaml --- config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 065e689a..63bb1dc7 100644 --- a/config.yaml +++ b/config.yaml @@ -132,6 +132,6 @@ options: is expected. . NOTE: these charms do not currently support IPv6 privacy extension. In - order for this charm to function correctly, the privacy must be + order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. From 8883915773971124fbe6982e1c9427ab6c136d0c Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 26 Sep 2014 11:08:24 +0100 Subject: [PATCH 024/125] Resync trunk helpers, always redo actions when clustered --- charm-helpers-sync.yaml | 2 +- .../charmhelpers/contrib/hahelpers/apache.py | 12 ++++-- .../contrib/openstack/amulet/deployment.py | 37 ++++++++++++++++++- .../contrib/openstack/amulet/utils.py | 9 +++-- .../charmhelpers/contrib/openstack/context.py | 15 +++++++- .../contrib/openstack/templates/haproxy.cfg | 9 +++++ hooks/charmhelpers/core/host.py | 12 +++--- hooks/charmhelpers/fetch/__init__.py | 3 +- hooks/charmhelpers/fetch/archiveurl.py | 13 ++++--- hooks/neutron_api_hooks.py | 2 - unit_tests/test_neutron_api_hooks.py | 15 -------- 11 files changed, 87 insertions(+), 42 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 28221425..8af0007c 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/multiple-https-networks +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 6595ddb8..6616ffff 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,7 +20,7 @@ from charmhelpers.core.hookenv import ( ) -def get_cert(cn): +def get_cert(cn=None): # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') @@ -28,13 +28,19 @@ def get_cert(cn): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert_{}'.format(cn), + cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key_{}'.format(cn), + key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index bd280df5..2190e6bd 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -427,6 +427,11 @@ class HAProxyContext(OSContextGenerator): 'units': cluster_hosts, } + if config('haproxy-server-timeout'): + ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): + ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') + if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' @@ -500,9 +505,15 @@ class ApacheSSLContext(OSContextGenerator): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) mkdir(path=ssl_dir) cert, key = get_cert(cn) - write_file(path=os.path.join(ssl_dir, 'cert_{}'.format(cn)), + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), content=b64decode(cert)) - write_file(path=os.path.join(ssl_dir, 'key_{}'.format(cn)), + write_file(path=os.path.join(ssl_dir, key_filename), content=b64decode(key)) def configure_ca(self): diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index ce0e2738..888ee060 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,8 +14,17 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 +{% if haproxy-client-timeout -%} + timeout client {{ haproxy-client-timeout }} +{% else -%} timeout client 30000 +{% endif -%} + +{% if haproxy-server-timeout -%} + timeout server {{ haproxy-server-timeout }} +{% else -%} timeout server 30000 +{% endif -%} listen stats {{ stat_port }} mode http diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 3ac70143..d7ce1e4c 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -229,12 +229,12 @@ def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum + """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 20a20ac6..32a673d6 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index d1dcbc33..8c045650 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -74,18 +74,19 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): """ Download and install an archive file, with optional checksum validation. - The checksum can also be given on the :param:`source` URL's fragment. + The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 0be2db43..db414383 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -40,13 +40,11 @@ from neutron_api_utils import ( restart_map, NEUTRON_CONF, api_port, - CLUSTER_RES, do_openstack_upgrade, ) from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, - is_leader, ) from charmhelpers.payload.execd import execd_preinstall diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 023e017e..75a33ca1 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -32,7 +32,6 @@ TO_PATCH = [ 'determine_ports', 'do_openstack_upgrade', 'execd_preinstall', - 'is_leader', 'is_relation_made', 'log', 'neutron_plugin_attribute', @@ -309,7 +308,6 @@ class NeutronAPIHooksTests(CharmTestCase): self.test_relation.set({ 'clustered': 'true', }) - self.is_leader.return_value = True self.relation_ids.side_effect = self._fake_relids _n_api_rel_joined = self.patch('neutron_api_relation_joined') _id_rel_joined = self.patch('identity_joined') @@ -317,23 +315,10 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(_n_api_rel_joined.called) self.assertTrue(_id_rel_joined.called) - def test_ha_changed_not_leader(self): - self.test_relation.set({ - 'clustered': 'true', - }) - self.is_leader.return_value = False - self.relation_ids.side_effect = self._fake_relids - _n_api_rel_joined = self.patch('neutron_api_relation_joined') - _id_rel_joined = self.patch('identity_joined') - self._call_hook('ha-relation-changed') - self.assertFalse(_n_api_rel_joined.called) - self.assertFalse(_id_rel_joined.called) - def test_ha_changed_not_clustered(self): self.test_relation.set({ 'clustered': None, }) - self.is_leader.return_value = False self.relation_ids.side_effect = self._fake_relids _n_api_rel_joined = self.patch('neutron_api_relation_joined') _id_rel_joined = self.patch('identity_joined') From 3a4c5e7a57a4c98a47ab723bc49f6f0fbdf49ad1 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 26 Sep 2014 12:09:43 +0100 Subject: [PATCH 025/125] Resync helpers --- hooks/charmhelpers/contrib/openstack/context.py | 4 ++-- .../charmhelpers/contrib/openstack/templates/haproxy.cfg | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 2190e6bd..43529d5c 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -428,9 +428,9 @@ class HAProxyContext(OSContextGenerator): } if config('haproxy-server-timeout'): - ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') if config('haproxy-client-timeout'): - ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 888ee060..54c2d976 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,14 +14,14 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 -{% if haproxy-client-timeout -%} - timeout client {{ haproxy-client-timeout }} +{% if haproxy_client_timeout -%} + timeout client {{ haproxy_client_timeout }} {% else -%} timeout client 30000 {% endif -%} -{% if haproxy-server-timeout -%} - timeout server {{ haproxy-server-timeout }} +{% if haproxy_server_timeout -%} + timeout server {{ haproxy_server_timeout }} {% else -%} timeout server 30000 {% endif -%} From 0781a760c820d975c8ed3b82fcafa76e607ae653 Mon Sep 17 00:00:00 2001 From: James Page Date: Fri, 26 Sep 2014 12:20:17 +0100 Subject: [PATCH 026/125] Fixup cluster relation to use internal address if set --- hooks/charmhelpers/core/services/__init__.py | 2 + hooks/charmhelpers/core/services/base.py | 313 +++++++++++++++++++ hooks/charmhelpers/core/services/helpers.py | 239 ++++++++++++++ hooks/charmhelpers/core/templating.py | 51 +++ hooks/cluster-relation-joined | 1 + hooks/neutron_api_hooks.py | 12 +- unit_tests/test_neutron_api_hooks.py | 3 + 7 files changed, 620 insertions(+), 1 deletion(-) create mode 100644 hooks/charmhelpers/core/services/__init__.py create mode 100644 hooks/charmhelpers/core/services/base.py create mode 100644 hooks/charmhelpers/core/services/helpers.py create mode 100644 hooks/charmhelpers/core/templating.py create mode 120000 hooks/cluster-relation-joined diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..e8039a84 --- /dev/null +++ b/hooks/charmhelpers/core/services/__init__.py @@ -0,0 +1,2 @@ +from .base import * +from .helpers import * diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py new file mode 100644 index 00000000..87ecb130 --- /dev/null +++ b/hooks/charmhelpers/core/services/base.py @@ -0,0 +1,313 @@ +import os +import re +import json +from collections import Iterable + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": , + "required_data": , + "provided_data": , + "data_ready": , + "data_lost": , + "start": , + "stop": , + "ports": , + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = {} + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.provide_data() + self.reconfigure_services() + cfg = hookenv.config() + if cfg.implicit_save: + cfg.save() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + """ + hook_name = hookenv.hook_name() + for service in self.services.values(): + for provider in service.get('provided_data', []): + if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): + data = provider.provide_data() + _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data + if _ready: + hookenv.relation_set(None, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + new_ports = service.get('ports', []) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port): + old_port = int(old_port) + if old_port not in new_ports: + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + if event_name == 'start': + hookenv.open_port(port) + elif event_name == 'stop': + hookenv.close_port(port) + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..7067b94b --- /dev/null +++ b/hooks/charmhelpers/core/services/helpers.py @@ -0,0 +1,239 @@ +import os +import yaml +from charmhelpers.core import hookenv +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + required_keys = [] + + def __init__(self, name=None, additional_required_keys=None): + if name is not None: + self.name = name + if additional_required_keys is not None: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + required_keys = ['host', 'user', 'password', 'database'] + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + required_keys = ['host', 'port'] + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready action. + + :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + """ + def __init__(self, source, target, owner='root', group='root', perms=0444): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + context = {} + for ctx in service.get('required_data', []): + context.update(ctx) + templating.render(self.source, self.target, context, + self.owner, self.group, self.perms) + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py new file mode 100644 index 00000000..2c638853 --- /dev/null +++ b/hooks/charmhelpers/core/templating.py @@ -0,0 +1,51 @@ +import os + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. + + The context should be a dict containing the values to be replaced in the + template. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + Note: Using this requires python-jinja2; if it is not installed, calling + this will attempt to use charmhelpers.fetch.apt_install to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + loader = Environment(loader=FileSystemLoader(templates_dir)) + try: + source = source + template = loader.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + host.mkdir(os.path.dirname(target)) + host.write_file(target, content, owner, group, perms) diff --git a/hooks/cluster-relation-joined b/hooks/cluster-relation-joined new file mode 120000 index 00000000..1fb10fd5 --- /dev/null +++ b/hooks/cluster-relation-joined @@ -0,0 +1 @@ +neutron_api_hooks.py \ No newline at end of file diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index db414383..a72a654f 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -56,7 +56,8 @@ from charmhelpers.contrib.openstack.ip import ( from charmhelpers.contrib.network.ip import ( get_iface_for_address, - get_netmask_for_address + get_netmask_for_address, + get_address_in_network ) hooks = Hooks() @@ -108,6 +109,7 @@ def config_changed(): amqp_joined(relation_id=r_id) for r_id in relation_ids('identity-service'): identity_joined(rid=r_id) + [cluster_joined(rid) for rid in relation_ids('cluster')] @hooks.hook('amqp-relation-joined') @@ -241,6 +243,14 @@ def neutron_plugin_api_relation_joined(rid=None): relation_set(relation_id=rid, **relation_data) +@hooks.hook('cluster-relation-joined') +def cluster_joined(relation_id=None): + address = get_address_in_network(config('os-internal-network'), + unit_get('private-address')) + relation_set(relation_id=relation_id, + relation_settings={'private-address': address}) + + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') @restart_on_change(restart_map(), stopstart=True) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 75a33ca1..ebe0aa15 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -43,6 +43,7 @@ TO_PATCH = [ 'unit_get', 'get_iface_for_address', 'get_netmask_for_address', + 'get_address_in_network', ] NEUTRON_CONF_DIR = "/etc/neutron" @@ -94,11 +95,13 @@ class NeutronAPIHooksTests(CharmTestCase): self.patch('neutron_plugin_api_relation_joined') _amqp_rel_joined = self.patch('amqp_joined') _id_rel_joined = self.patch('identity_joined') + _id_cluster_joined = self.patch('cluster_joined') self._call_hook('config-changed') self.assertTrue(_n_api_rel_joined.called) self.assertTrue(_n_plugin_api_rel_joined.called) self.assertTrue(_amqp_rel_joined.called) self.assertTrue(_id_rel_joined.called) + self.assertTrue(_id_cluster_joined.called) self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.do_openstack_upgrade.called) From f173172e23d2d8c72e2202338f50fc0d6df5a26f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 13:41:15 +0100 Subject: [PATCH 027/125] synced charm-helpers --- .../charmhelpers/contrib/hahelpers/apache.py | 13 +- .../charmhelpers/contrib/hahelpers/cluster.py | 3 +- hooks/charmhelpers/contrib/network/ip.py | 68 +++++++++-- .../contrib/openstack/amulet/deployment.py | 37 +++++- .../contrib/openstack/amulet/utils.py | 9 +- .../charmhelpers/contrib/openstack/context.py | 114 ++++++++++++++---- .../contrib/openstack/templates/haproxy.cfg | 9 ++ .../templates/openstack_https_frontend | 17 +-- .../templates/openstack_https_frontend.conf | 17 +-- hooks/charmhelpers/contrib/openstack/utils.py | 17 ++- hooks/charmhelpers/core/hookenv.py | 10 +- hooks/charmhelpers/core/host.py | 12 +- hooks/charmhelpers/fetch/__init__.py | 3 +- hooks/charmhelpers/fetch/archiveurl.py | 13 +- 14 files changed, 260 insertions(+), 82 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 8d5fb8ba..6616ffff 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import ( ) -def get_cert(): +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config cert = config_get('ssl_cert') key = config_get('ssl_key') if not (cert and key): log("Inspecting identity-service relations for SSL certificate.", level=INFO) cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): if not cert: - cert = relation_get('ssl_cert', + cert = relation_get(ssl_cert_attr, rid=r_id, unit=unit) if not key: - key = relation_get('ssl_key', + key = relation_get(ssl_key_attr, rid=r_id, unit=unit) return (cert, key) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 7151b1d0..6d972007 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -139,10 +139,9 @@ def https(): return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN rel_state = [ relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ssl_cert', rid=r_id, unit=unit), - relation_get('ssl_key', rid=r_id, unit=unit), relation_get('ca_cert', rid=r_id, unit=unit), ] # NOTE: works around (LP: #1203241) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 37ecbbed..19f654d3 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,4 +1,6 @@ import glob +import re +import subprocess import sys from functools import partial @@ -172,7 +174,8 @@ def format_ipv6_addr(address): return address -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): """ Return the assigned IP address for a given interface, if any, or []. """ @@ -212,26 +215,67 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=T if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) return addresses get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IPv6 address for a given interface, if any, or []. +def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. """ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', inc_aliases=inc_aliases, fatal=fatal, exc_list=exc_list) - remotly_addressable = [] - for address in addresses: - if not address.startswith('fe80'): - remotly_addressable.append(address) - if fatal and not remotly_addressable: - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) - return remotly_addressable + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd) + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' doesn't have a scope global " + "non-temporary ipv6 address." % iface) + + return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 9179eeb1..10d3b506 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,6 @@ +from bzrlib.branch import Branch +import os +import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -16,11 +19,41 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.openstack = openstack self.source = source + def _is_dev_branch(self): + """Determine if branch being tested is a dev (i.e. next) branch.""" + branch = Branch.open(os.getcwd()) + parent = branch.get_parent() + pattern = re.compile("^.*/next/$") + if (pattern.match(parent)): + return True + else: + return False + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + If the branch being tested is a dev branch, then determine the + development branch locations for the other services. Otherwise, + the default charm store branches will be used.""" + name = 0 + if self._is_dev_branch(): + updated_services = [] + for svc in other_services: + if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: + location = 'lp:charms/{}'.format(svc[name]) + else: + temp = 'lp:~openstack-charmers/charms/trusty/{}/next' + location = temp.format(svc[name]) + updated_services.append(svc + (location,)) + other_services = updated_services + return other_services + def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin.""" + """Add services to the deployment and set openstack-origin/source.""" + name = 0 + other_services = self._determine_branch_locations(other_services) super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) - name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index bd327bdc..0f312b99 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) - if not os.path.exists(cirros_img): + if not os.path.exists(local_path): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, cirros_img) + opener.retrieve(cirros_url, local_path) f.close() - with open(cirros_img) as f: + with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 210238e6..82c5534c 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -8,7 +8,6 @@ from subprocess import ( check_call ) - from charmhelpers.fetch import ( apt_install, filter_installed_packages, @@ -28,6 +27,11 @@ from charmhelpers.core.hookenv import ( INFO ) +from charmhelpers.core.host import ( + mkdir, + write_file +) + from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, @@ -38,6 +42,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, + install_ca_cert, ) from charmhelpers.contrib.openstack.neutron import ( @@ -47,7 +52,8 @@ from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, - format_ipv6_addr + format_ipv6_addr, + is_address_in_network ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -435,6 +441,11 @@ class HAProxyContext(OSContextGenerator): 'units': cluster_hosts, } + if config('haproxy-server-timeout'): + ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): + ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') + if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' @@ -504,22 +515,36 @@ class ApacheSSLContext(OSContextGenerator): cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] check_call(cmd) - def configure_cert(self): - if not os.path.isdir('/etc/apache2/ssl'): - os.mkdir('/etc/apache2/ssl') + def configure_cert(self, cn=None): ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - if not os.path.isdir(ssl_dir): - os.mkdir(ssl_dir) - cert, key = get_cert() - with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: - cert_out.write(b64decode(cert)) - with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: - key_out.write(b64decode(key)) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert)) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key)) + + def configure_ca(self): ca_cert = get_ca_cert() if ca_cert: - with open(CA_CERT_PATH, 'w') as ca_out: - ca_out.write(b64decode(ca_cert)) - check_call(['update-ca-certificates']) + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + '''Figure out which canonical names clients will access this service''' + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + return list(set(cns)) def __call__(self): if isinstance(self.external_ports, basestring): @@ -527,21 +552,47 @@ class ApacheSSLContext(OSContextGenerator): if (not self.external_ports or not https()): return {} - self.configure_cert() + self.configure_ca() self.enable_modules() ctxt = { 'namespace': self.service_namespace, - 'private_address': unit_get('private-address'), - 'endpoints': [] + 'endpoints': [], + 'ext_ports': [] } - if is_clustered(): - ctxt['private_address'] = config('vip') - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) - portmap = (int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) + + for cn in self.canonical_names(): + self.configure_cert(cn) + + addresses = [] + vips = [] + if config('vip'): + vips = config('vip').split() + + for network_type in ['os-internal-network', + 'os-admin-network', + 'os-public-network']: + address = get_address_in_network(config(network_type), + unit_get('private-address')) + if len(vips) > 0 and is_clustered(): + for vip in vips: + if is_address_in_network(config(network_type), + vip): + addresses.append((address, vip)) + break + elif is_clustered(): + addresses.append((address, config('vip'))) + else: + addresses.append((address, address)) + + for address, endpoint in set(addresses): + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port) + int_port = determine_api_port(api_port) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) return ctxt @@ -801,3 +852,16 @@ class SyslogContext(OSContextGenerator): 'use_syslog': config('use-syslog') } return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return { + 'bind_host': '::' + } + else: + return { + 'bind_host': '0.0.0.0' + } diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index ce0e2738..888ee060 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,8 +14,17 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 +{% if haproxy-client-timeout -%} + timeout client {{ haproxy-client-timeout }} +{% else -%} timeout client 30000 +{% endif -%} + +{% if haproxy-server-timeout -%} + timeout server {{ haproxy-server-timeout }} +{% else -%} timeout server 30000 +{% endif -%} listen stats {{ stat_port }} mode http diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf index e02dc751..ce28fa3f 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -1,16 +1,18 @@ {% if endpoints -%} -{% for ext, int in endpoints -%} -Listen {{ ext }} -NameVirtualHost *:{{ ext }} - - ServerName {{ private_address }} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} + + ServerName {{ endpoint }} SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} ProxyPass / http://localhost:{{ int }}/ ProxyPassReverse / http://localhost:{{ int }}/ ProxyPreserveHost on +{% endfor -%} Order deny,allow Allow from all @@ -19,5 +21,4 @@ NameVirtualHost *:{{ ext }} Order allow,deny Allow from all -{% endfor -%} {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 23d237de..7eecff05 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -13,7 +13,9 @@ from charmhelpers.core.hookenv import ( log as juju_log, charm_dir, ERROR, - INFO + INFO, + relation_ids, + relation_set ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -22,6 +24,10 @@ from charmhelpers.contrib.storage.linux.lvm import ( remove_lvm_physical_volume, ) +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr +) + from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.fetch import apt_install, apt_cache from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk @@ -457,3 +463,12 @@ def get_hostname(address, fqdn=True): return result else: return result.split('.')[0] + + +def sync_db_with_multi_ipv6_addresses(): + hosts = get_ipv6_addr(dynamic_only=False) + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, + database=config('database'), + username=config('database-user'), + hostname=hosts) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 324987e6..af8fe2db 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -486,9 +486,10 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self): + def __init__(self, config_save=True): super(Hooks, self).__init__() self._hooks = {} + self._config_save = config_save def register(self, name, function): """Register a hook""" @@ -499,9 +500,10 @@ class Hooks(object): hook_name = os.path.basename(args[0]) if hook_name in self._hooks: self._hooks[hook_name]() - cfg = config() - if cfg.implicit_save: - cfg.save() + if self._config_save: + cfg = config() + if cfg.implicit_save: + cfg.save() else: raise UnregisteredHookError(hook_name) diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 3ac70143..d7ce1e4c 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -68,8 +68,8 @@ def service_available(service_name): """Determine whether a system service is available""" try: subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as e: + return 'unrecognized service' not in e.output else: return True @@ -229,12 +229,12 @@ def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. :raises ChecksumError: If the file fails the checksum + """ actual_checksum = file_hash(path, hash_type) if checksum != actual_checksum: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 20a20ac6..32a673d6 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -208,7 +208,8 @@ def add_source(source, key=None): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples: + add-apt-repository(1). Examples:: + ppa:charmers/example deb https://stub:key@private.example.com/ubuntu trusty main diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index d1dcbc33..8c045650 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -74,18 +74,19 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): """ Download and install an archive file, with optional checksum validation. - The checksum can also be given on the :param:`source` URL's fragment. + The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate :param:`checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') From 8685e0a03ceca84142d5c2ca0e7f9102932dbc82 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 15:51:57 +0100 Subject: [PATCH 028/125] synced charm-helpers --- hooks/charmhelpers/contrib/openstack/context.py | 4 ++-- .../charmhelpers/contrib/openstack/templates/haproxy.cfg | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 82c5534c..755e1a25 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -442,9 +442,9 @@ class HAProxyContext(OSContextGenerator): } if config('haproxy-server-timeout'): - ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') if config('haproxy-client-timeout'): - ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 888ee060..54c2d976 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,14 +14,14 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 -{% if haproxy-client-timeout -%} - timeout client {{ haproxy-client-timeout }} +{% if haproxy_client_timeout -%} + timeout client {{ haproxy_client_timeout }} {% else -%} timeout client 30000 {% endif -%} -{% if haproxy-server-timeout -%} - timeout server {{ haproxy-server-timeout }} +{% if haproxy_server_timeout -%} + timeout server {{ haproxy_server_timeout }} {% else -%} timeout server 30000 {% endif -%} From b573cc0b52e29cddf52972276150543fb5e3f3f3 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 26 Sep 2014 15:46:49 +0000 Subject: [PATCH 029/125] Run db migrations for neutron --- hooks/neutron_api_context.py | 6 ++--- hooks/neutron_api_hooks.py | 42 ++++++++++++++++++++++++++------- hooks/neutron_api_utils.py | 16 +++++++++++++ templates/icehouse/neutron.conf | 1 + 4 files changed, 54 insertions(+), 11 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index bdb7df3d..ef59672f 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -67,9 +67,9 @@ class NeutronCCContext(context.NeutronContext): determine_api_port(api_port('neutron-server')) for rid in relation_ids('neutron-api'): for unit in related_units(rid): - ctxt['nova_url'] = relation_get(attribute='nova_url', - rid=rid, - unit=unit) + rdata = relation_get(rid=rid, unit=unit) + ctxt['nova_url'] = rdata.get('nova_url') + ctxt['restart_trigger'] = rdata.get('restart_trigger') if ctxt['nova_url']: return ctxt return ctxt diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 0a99387e..f01c9462 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -18,7 +18,8 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.core.host import ( - restart_on_change + restart_on_change, + service_restart, ) from charmhelpers.fetch import ( @@ -34,14 +35,15 @@ from charmhelpers.contrib.openstack.neutron import ( ) from neutron_api_utils import ( - determine_packages, - determine_ports, - register_configs, - restart_map, + CLUSTER_RES, NEUTRON_CONF, api_port, - CLUSTER_RES, + determine_packages, + determine_ports, do_openstack_upgrade, + migrate_neutron_database, + register_configs, + restart_map, ) from charmhelpers.contrib.hahelpers.cluster import ( @@ -128,6 +130,30 @@ def amqp_changed(): CONFIGS.write(NEUTRON_CONF) +def conditional_neutron_migration(): + # This is an attempt to stop a race over the db migration between nova-cc + # and neutron-api by having the migration master decided by the presence + # of the neutron-api relation. In the long term this should only be done + # the neutron-api charm and nova-cc should play no hand in it + # * neutron-api refuses to run migrations until neutron-api relation is + # present + # * nova-cc refuses to run migration if neutron-api relations is present + clustered = relation_get('clustered') + if not relation_ids('neutron-api'): + log('Not running neutron database migration, no nova-cloud-controller' + 'is present.') + else: + if clustered: + if is_leader(CLUSTER_RES): + migrate_neutron_database() + service_restart('neutron-server') + else: + log('Not running neutron database migration, not leader') + else: + migrate_neutron_database() + service_restart('neutron-server') + + @hooks.hook('shared-db-relation-joined') def db_joined(): if is_relation_made('pgsql-db'): @@ -161,7 +187,7 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write_all() - + conditional_neutron_migration() @hooks.hook('pgsql-db-relation-changed') @restart_on_change(restart_map()) @@ -169,7 +195,7 @@ def postgresql_neutron_db_changed(): plugin = config('neutron-plugin') # DB config might have been moved to main neutron.conf in H? CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) - + conditional_neutron_migration() @hooks.hook('amqp-relation-broken', 'identity-service-relation-broken', diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 3a000068..75ffd5d5 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -19,6 +19,7 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.fetch import apt_update, apt_install, apt_upgrade import neutron_api_context +import subprocess TEMPLATES = 'templates/' @@ -196,3 +197,18 @@ def do_openstack_upgrade(configs): # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) + migrate_neutron_database() + + +def migrate_neutron_database(): + '''Runs neutron-db-manage to initialize a new database or migrate existing''' + log('Migrating the neutron database.') + plugin = config('neutron-plugin') + cmd = ['neutron-db-manage', + '--config-file', NEUTRON_CONF, + '--config-file', neutron_plugin_attribute(plugin, + 'config', + 'neutron'), + 'upgrade', + 'head'] + subprocess.check_output(cmd) diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf index 9a47965c..6029f5ae 100644 --- a/templates/icehouse/neutron.conf +++ b/templates/icehouse/neutron.conf @@ -1,6 +1,7 @@ ############################################################################### # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. +## Restart trigger {{ restart_trigger }} ############################################################################### [DEFAULT] verbose = {{ verbose }} From c797c0ec85864fe603ea9e9b79870f7cd2403520 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 17:07:52 +0100 Subject: [PATCH 030/125] synced charm-helpers --- hooks/charmhelpers/contrib/openstack/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 7eecff05..2c4b52eb 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -4,6 +4,7 @@ from collections import OrderedDict import subprocess +import json import os import socket import sys @@ -471,4 +472,4 @@ def sync_db_with_multi_ipv6_addresses(): relation_set(relation_id=rid, database=config('database'), username=config('database-user'), - hostname=hosts) + hostname=json.dumps(hosts)) From 1f52fab4a9ea062893025251989bcfb5a37a3e8b Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Sat, 27 Sep 2014 01:23:25 +0800 Subject: [PATCH 031/125] sync_db_with_multi_ipv6_addresses --- hooks/neutron_api_hooks.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8ef0e2b6..3f5e1894 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -28,7 +28,8 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, - openstack_upgrade_available + openstack_upgrade_available, + sync_db_with_multi_ipv6_addresses ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, @@ -105,6 +106,7 @@ def install(): def config_changed(): if config('prefer-ipv6'): setup_ipv6() + sync_db_with_multi_ipv6_addresses() global CONFIGS if openstack_upgrade_available('neutron-server'): @@ -148,12 +150,12 @@ def db_joined(): if config('prefer-ipv6'): host = get_ipv6_addr(exc_list=[config('vip')])[0] + sync_db_with_multi_ipv6_addresses() else: host = unit_get('private-address') - - relation_set(database=config('database'), - username=config('database-user'), - hostname=host) + relation_set(database=config('database'), + username=config('database-user'), + hostname=host) @hooks.hook('pgsql-db-relation-joined') From 3e5158553f0c0e36220c3b042861ed1b887d610a Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 19:30:49 +0100 Subject: [PATCH 032/125] minor cleanup --- hooks/neutron_api_hooks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 3f5e1894..678394c1 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -149,7 +149,6 @@ def db_joined(): raise Exception(e) if config('prefer-ipv6'): - host = get_ipv6_addr(exc_list=[config('vip')])[0] sync_db_with_multi_ipv6_addresses() else: host = unit_get('private-address') From 54b4e8138dae29d758b40b1c5258ed46aff81c6f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 19:56:11 +0100 Subject: [PATCH 033/125] more --- hooks/charmhelpers/contrib/openstack/utils.py | 6 +++--- hooks/neutron_api_hooks.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 2c4b52eb..715ea9e1 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -466,10 +466,10 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] -def sync_db_with_multi_ipv6_addresses(): +def sync_db_with_multi_ipv6_addresses(database, database_user): hosts = get_ipv6_addr(dynamic_only=False) for rid in relation_ids('shared-db'): relation_set(relation_id=rid, - database=config('database'), - username=config('database-user'), + database=database, + username=database_user, hostname=json.dumps(hosts)) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 678394c1..179210ce 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -106,7 +106,8 @@ def install(): def config_changed(): if config('prefer-ipv6'): setup_ipv6() - sync_db_with_multi_ipv6_addresses() + sync_db_with_multi_ipv6_addresses(config('database'), + config('database-user')) global CONFIGS if openstack_upgrade_available('neutron-server'): @@ -149,7 +150,8 @@ def db_joined(): raise Exception(e) if config('prefer-ipv6'): - sync_db_with_multi_ipv6_addresses() + sync_db_with_multi_ipv6_addresses(config('database'), + config('database-user')) else: host = unit_get('private-address') relation_set(database=config('database'), From 270b91bea2ddf7db61c0039f83b47ae9cd1ac961 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 26 Sep 2014 20:03:53 +0100 Subject: [PATCH 034/125] fixed unit test errors --- unit_tests/test_neutron_api_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index 4f5b53b6..a025d9e9 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -102,7 +102,7 @@ class TestNeutronAPIUtils(CharmTestCase): (ML2CONF, { 'services': ['neutron-server'], }), - (nutils.APACHE_24_CONF, { + (nutils.APACHE_CONF, { 'services': ['apache2'], }), (nutils.HAPROXY_CONF, { @@ -126,7 +126,7 @@ class TestNeutronAPIUtils(CharmTestCase): confs = ['/etc/neutron/neutron.conf', '/etc/default/neutron-server', '/etc/neutron/plugins/ml2/ml2_conf.ini', - '/etc/apache2/sites-available/openstack_https_frontend.conf', + '/etc/apache2/sites-available/openstack_https_frontend', '/etc/haproxy/haproxy.cfg'] self.assertItemsEqual(_regconfs.configs, confs) From b92e7f2e8ed81de377161eb7476a55ae9ba6d222 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Sun, 28 Sep 2014 14:14:19 +0000 Subject: [PATCH 035/125] Fixed lint --- hooks/neutron_api_context.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index edd9fbb1..ff09e584 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -71,8 +71,8 @@ class NeutronCCContext(context.NeutronContext): rid=rid, unit=unit) cell_type = relation_get(attribute='cell_type', - rid=rid, - unit=unit) + rid=rid, + unit=unit) if cell_type and not cell_type == "api": continue if ctxt['nova_url']: From a2fd5f45f3ddd824aa9a3540f6af671155f951b7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 29 Sep 2014 16:12:37 +0100 Subject: [PATCH 036/125] Added unit tests for db migration and fixed lint --- hooks/neutron_api_hooks.py | 4 +- hooks/neutron_api_utils.py | 2 +- unit_tests/test_neutron_api_context.py | 34 ++++++++++------- unit_tests/test_neutron_api_hooks.py | 51 +++++++++++++++++++++++++- unit_tests/test_neutron_api_utils.py | 14 ++++++- 5 files changed, 86 insertions(+), 19 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index f01c9462..8411ce1b 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -187,7 +187,8 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write_all() - conditional_neutron_migration() + conditional_neutron_migration() + @hooks.hook('pgsql-db-relation-changed') @restart_on_change(restart_map()) @@ -197,6 +198,7 @@ def postgresql_neutron_db_changed(): CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) conditional_neutron_migration() + @hooks.hook('amqp-relation-broken', 'identity-service-relation-broken', 'shared-db-relation-broken', diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 75ffd5d5..aa5e174e 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -201,7 +201,7 @@ def do_openstack_upgrade(configs): def migrate_neutron_database(): - '''Runs neutron-db-manage to initialize a new database or migrate existing''' + '''Runs neutron-db-manage to init a new database or migrate existing''' log('Migrating the neutron database.') plugin = config('neutron-plugin') cmd = ['neutron-db-manage', diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index e81ac10f..e94a3498 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -1,6 +1,5 @@ from test_utils import CharmTestCase -from test_utils import patch_open -from mock import patch, MagicMock +from mock import patch import neutron_api_context as context import charmhelpers TO_PATCH = [ @@ -56,6 +55,7 @@ class HAProxyContextTest(CharmTestCase): super(HAProxyContextTest, self).setUp(context, TO_PATCH) self.determine_api_port.return_value = 9686 self.determine_apache_port.return_value = 9686 + self.api_port = 9696 def tearDown(self): super(HAProxyContextTest, self).tearDown() @@ -65,7 +65,8 @@ class HAProxyContextTest(CharmTestCase): def test_context_No_peers(self, _log, _rids): _rids.return_value = [] hap_ctxt = context.HAProxyContext() - self.assertTrue('units' not in hap_ctxt()) + with patch('__builtin__.__import__'): + self.assertTrue('units' not in hap_ctxt()) @patch.object(charmhelpers.contrib.openstack.context, 'config') @patch.object(charmhelpers.contrib.openstack.context, 'local_unit') @@ -74,8 +75,10 @@ class HAProxyContextTest(CharmTestCase): @patch.object(charmhelpers.contrib.openstack.context, 'related_units') @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') - def test_context_peers(self, _log, _rids, _runits, _rget, _uget, - _lunit, _config): + @patch('__builtin__.__import__') + @patch('__builtin__.open') + def test_context_peers(self, _open, _import, _log, _rids, _runits, _rget, + _uget, _lunit, _config): unit_addresses = { 'neutron-api-0': '10.10.10.10', 'neutron-api-1': '10.10.10.11', @@ -93,11 +96,10 @@ class HAProxyContextTest(CharmTestCase): 'service_ports': service_ports, 'neutron_bind_port': 9686, } - with patch_open() as (_open, _file): - _file.write = MagicMock() - hap_ctxt = context.HAProxyContext() - self.assertEquals(hap_ctxt(), ctxt_data) - _file.write.assert_called_with('ENABLED=1\n') + _import().api_port.return_value = 9696 + hap_ctxt = context.HAProxyContext() + self.assertEquals(hap_ctxt(), ctxt_data) + _open.assert_called_with('/etc/default/haproxy', 'w') class NeutronAPIContextsTest(CharmTestCase): @@ -119,28 +121,32 @@ class NeutronAPIContextsTest(CharmTestCase): @patch.object(context.NeutronCCContext, 'network_manager') @patch.object(context.NeutronCCContext, 'plugin') - def test_neutroncc_context_no_setting(self, plugin, nm): + @patch('__builtin__.__import__') + def test_neutroncc_context_no_setting(self, _import, plugin, nm): plugin.return_value = None - napi_ctxt = context.NeutronCCContext() ctxt_data = { 'debug': True, 'external_network': 'bob', 'neutron_bind_port': self.api_port, 'verbose': True, } + napi_ctxt = context.NeutronCCContext() with patch.object(napi_ctxt, '_ensure_packages'): self.assertEquals(ctxt_data, napi_ctxt()) @patch.object(context.NeutronCCContext, 'network_manager') @patch.object(context.NeutronCCContext, 'plugin') - def test_neutroncc_context_api_rel(self, plugin, nm): + @patch('__builtin__.__import__') + def test_neutroncc_context_api_rel(self, _import, plugin, nm): nova_url = 'http://127.0.0.10' plugin.return_value = None self.related_units.return_value = ['unit1'] self.relation_ids.return_value = ['rid2'] - self.test_relation.set({'nova_url': nova_url}) + self.test_relation.set({'nova_url': nova_url, + 'restart_trigger': 'bob'}) napi_ctxt = context.NeutronCCContext() self.assertEquals(nova_url, napi_ctxt()['nova_url']) + self.assertEquals('bob', napi_ctxt()['restart_trigger']) self.assertEquals(self.api_port, napi_ctxt()['neutron_bind_port']) def test_neutroncc_context_manager(self): diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 04d73901..2c0b2794 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -42,6 +42,8 @@ TO_PATCH = [ 'unit_get', 'get_iface_for_address', 'get_netmask_for_address', + 'migrate_neutron_database', + 'service_restart', ] NEUTRON_CONF_DIR = "/etc/neutron" @@ -154,19 +156,23 @@ class NeutronAPIHooksTests(CharmTestCase): 'Attempting to associate a postgresql database when' ' there is already associated a mysql one') - def test_shared_db_changed(self): + @patch.object(hooks, 'conditional_neutron_migration') + def test_shared_db_changed(self, cond_neutron_mig): self.CONFIGS.complete_contexts.return_value = ['shared-db'] self._call_hook('shared-db-relation-changed') self.assertTrue(self.CONFIGS.write_all.called) + cond_neutron_mig.assert_called_with() def test_shared_db_changed_partial_ctxt(self): self.CONFIGS.complete_contexts.return_value = [] self._call_hook('shared-db-relation-changed') self.assertFalse(self.CONFIGS.write_all.called) - def test_pgsql_db_changed(self): + @patch.object(hooks, 'conditional_neutron_migration') + def test_pgsql_db_changed(self, cond_neutron_mig): self._call_hook('pgsql-db-relation-changed') self.assertTrue(self.CONFIGS.write.called) + cond_neutron_mig.assert_called_with() def test_amqp_broken(self): self._call_hook('amqp-relation-broken') @@ -356,3 +362,44 @@ class NeutronAPIHooksTests(CharmTestCase): self.check_call.assert_called_with(['a2dissite', 'openstack_https_frontend']) self.assertTrue(_id_rel_joined.called) + + def test_conditional_neutron_migration_no_ncc_rel(self): + self.test_relation.set({ + 'clustered': 'false', + }) + self.relation_ids.return_value = [] + hooks.conditional_neutron_migration() + self.log.assert_called_with( + 'Not running neutron database migration, no nova-cloud-controller' + 'is present.' + ) + + def test_conditional_neutron_migration_ncc_rel_leader(self): + self.test_relation.set({ + 'clustered': 'true', + }) + self.is_leader.return_value = True + hooks.conditional_neutron_migration() + self.migrate_neutron_database.assert_called_with() + self.service_restart.assert_called_with('neutron-server') + + def test_conditional_neutron_migration_ncc_rel_notleader(self): + self.test_relation.set({ + 'clustered': 'true', + }) + self.is_leader.return_value = False + hooks.conditional_neutron_migration() + self.assertFalse(self.migrate_neutron_database.called) + self.assertFalse(self.service_restart.called) + self.log.assert_called_with( + 'Not running neutron database migration, not leader' + ) + + def test_conditional_neutron_migration_not_clustered(self): + self.test_relation.set({ + 'clustered': 'false', + }) + self.relation_ids.return_value = ['nova-cc/o'] + hooks.conditional_neutron_migration() + self.migrate_neutron_database.assert_called_with() + self.service_restart.assert_called_with('neutron-server') diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index 4f5b53b6..827d6a75 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -5,7 +5,9 @@ import charmhelpers.contrib.openstack.templating as templating templating.OSConfigRenderer = MagicMock() -import neutron_api_utils as nutils +with patch('charmhelpers.core.hookenv.config') as config: + config.return_value = 'neutron' + import neutron_api_utils as nutils from test_utils import ( CharmTestCase, @@ -26,6 +28,7 @@ TO_PATCH = [ 'log', 'neutron_plugin_attribute', 'os_release', + 'subprocess', ] @@ -166,3 +169,12 @@ class TestNeutronAPIUtils(CharmTestCase): self.configure_installation_source.assert_called_with( 'cloud:precise-havana' ) + + def test_migrate_neutron_database(self): + nutils.migrate_neutron_database() + cmd = ['neutron-db-manage', + '--config-file', '/etc/neutron/neutron.conf', + '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'upgrade', + 'head'] + self.subprocess.check_output.assert_called_with(cmd) From 999cfae8c15d44c1d37b1467a105b64abfc54873 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 30 Sep 2014 09:05:41 +0100 Subject: [PATCH 037/125] Fixup unit tests for new context helpers --- charm-helpers-sync.yaml | 2 +- .../contrib/openstack/amulet/deployment.py | 37 +------ .../contrib/openstack/amulet/utils.py | 9 +- .../charmhelpers/contrib/openstack/context.py | 100 ++++++++++++------ .../contrib/openstack/templates/haproxy.cfg | 29 +++-- hooks/neutron_api_hooks.py | 15 ++- unit_tests/test_neutron_api_context.py | 19 +++- 7 files changed, 112 insertions(+), 99 deletions(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..28221425 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/multiple-https-networks destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 10d3b506..9179eeb1 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,6 +1,3 @@ -from bzrlib.branch import Branch -import os -import re from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -19,41 +16,11 @@ class OpenStackAmuletDeployment(AmuletDeployment): self.openstack = openstack self.source = source - def _is_dev_branch(self): - """Determine if branch being tested is a dev (i.e. next) branch.""" - branch = Branch.open(os.getcwd()) - parent = branch.get_parent() - pattern = re.compile("^.*/next/$") - if (pattern.match(parent)): - return True - else: - return False - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - If the branch being tested is a dev branch, then determine the - development branch locations for the other services. Otherwise, - the default charm store branches will be used.""" - name = 0 - if self._is_dev_branch(): - updated_services = [] - for svc in other_services: - if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: - location = 'lp:charms/{}'.format(svc[name]) - else: - temp = 'lp:~openstack-charmers/charms/trusty/{}/next' - location = temp.format(svc[name]) - updated_services.append(svc + (location,)) - other_services = updated_services - return other_services - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" - name = 0 - other_services = self._determine_branch_locations(other_services) + """Add services to the deployment and set openstack-origin.""" super(OpenStackAmuletDeployment, self)._add_services(this_service, other_services) + name = 0 services = other_services services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..bd327bdc 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -187,16 +187,15 @@ class OpenStackAmuletUtils(AmuletUtils): f = opener.open("http://download.cirros-cloud.net/version/released") version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) - local_path = os.path.join('tests', cirros_img) + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) - if not os.path.exists(local_path): + if not os.path.exists(cirros_img): cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", version, cirros_img) - opener.retrieve(cirros_url, local_path) + opener.retrieve(cirros_url, cirros_img) f.close() - with open(local_path) as f: + with open(cirros_img) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 43529d5c..3af65252 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -52,7 +52,8 @@ from charmhelpers.contrib.openstack.neutron import ( from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, - is_address_in_network + is_address_in_network, + get_netmask_for_address ) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -396,6 +397,9 @@ class CephContext(OSContextGenerator): return ctxt +ADDRESS_TYPES = ['admin', 'internal', 'public'] + + class HAProxyContext(OSContextGenerator): interfaces = ['cluster'] @@ -408,30 +412,57 @@ class HAProxyContext(OSContextGenerator): if not relation_ids('cluster'): return {} - cluster_hosts = {} l_unit = local_unit().replace('/', '-') if config('prefer-ipv6'): addr = get_ipv6_addr() else: addr = unit_get('private-address') - cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), - addr) - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _unit = unit.replace('/', '-') - addr = relation_get('private-address', rid=rid, unit=unit) - cluster_hosts[_unit] = addr + cluster_hosts = {} + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in ADDRESS_TYPES: + laddr = get_address_in_network( + config('os-{}-network'.format(addr_type))) + if laddr: + cluster_hosts[laddr] = {} + cluster_hosts[laddr]['network'] = "{}/{}".format( + laddr, + get_netmask_for_address(laddr) + ) + cluster_hosts[laddr]['backends'] = {} + cluster_hosts[laddr]['backends'][l_unit] = laddr + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) no split configurations found, just use + # private addresses + if len(cluster_hosts) < 1: + cluster_hosts[addr] = {} + cluster_hosts[addr]['network'] = "{}/{}".format( + addr, + get_netmask_for_address(addr) + ) + cluster_hosts[addr]['backends'] = {} + cluster_hosts[addr]['backends'][l_unit] = addr + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + cluster_hosts[addr]['backends'][_unit] = _laddr ctxt = { - 'units': cluster_hosts, + 'frontends': cluster_hosts, } - if config('haproxy-server-timeout'): - ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') - if config('haproxy-client-timeout'): - ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') - if config('prefer-ipv6'): ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' @@ -441,12 +472,13 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_host'] = '0.0.0.0' ctxt['stat_port'] = ':8888' - if len(cluster_hosts.keys()) > 1: - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - return ctxt + for frontend in cluster_hosts: + if len(cluster_hosts[frontend]['backends']) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt log('HAProxy context is incomplete, this unit has no peers.') return {} @@ -708,22 +740,22 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): - """ - Responsible for adding user-defined config-flags in charm config to a - template context. + """ + Responsible for adding user-defined config-flags in charm config to a + template context. - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ - def __call__(self): - config_flags = config('config-flags') - if not config_flags: - return {} + def __call__(self): + config_flags = config('config-flags') + if not config_flags: + return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + flags = config_flags_parser(config_flags) + return {'user_config_flags': flags} class SubordinateConfigContext(OSContextGenerator): diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 54c2d976..f6bfb65b 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -14,17 +14,8 @@ defaults retries 3 timeout queue 1000 timeout connect 1000 -{% if haproxy_client_timeout -%} - timeout client {{ haproxy_client_timeout }} -{% else -%} timeout client 30000 -{% endif -%} - -{% if haproxy_server_timeout -%} - timeout server {{ haproxy_server_timeout }} -{% else -%} timeout server 30000 -{% endif -%} listen stats {{ stat_port }} mode http @@ -34,17 +25,21 @@ listen stats {{ stat_port }} stats uri / stats auth admin:password -{% if units -%} +{% if frontends -%} {% for service, ports in service_ports.iteritems() -%} -listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} - balance roundrobin - {% for unit, address in units.iteritems() -%} - server {{ unit }} {{ address }}:{{ ports[1] }} check +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + bind :::{{ ports[0] }} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} {% endfor %} -listen {{ service }}_ipv6 :::{{ ports[0] }} - balance roundrobin - {% for unit, address in units.iteritems() -%} +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% for unit, address in frontends[frontend]['backends'].iteritems() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} {% endfor -%} +{% endfor -%} {% endif -%} diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index a72a654f..30c43443 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -60,6 +60,8 @@ from charmhelpers.contrib.network.ip import ( get_address_in_network ) +from charmhelpers.contrib.openstack.context import ADDRESS_TYPES + hooks = Hooks() CONFIGS = register_configs() @@ -245,10 +247,15 @@ def neutron_plugin_api_relation_joined(rid=None): @hooks.hook('cluster-relation-joined') def cluster_joined(relation_id=None): - address = get_address_in_network(config('os-internal-network'), - unit_get('private-address')) - relation_set(relation_id=relation_id, - relation_settings={'private-address': address}) + for addr_type in ADDRESS_TYPES: + address = get_address_in_network( + config('os-{}-network'.format(addr_type)) + ) + if address: + relation_set( + relation_id=relation_id, + relation_settings={'{}-address'.format(addr_type): address} + ) @hooks.hook('cluster-relation-changed', diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 7aac849f..499ae115 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -14,6 +14,7 @@ TO_PATCH = [ class IdentityServiceContext(CharmTestCase): + def setUp(self): super(IdentityServiceContext, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get @@ -67,6 +68,10 @@ class HAProxyContextTest(CharmTestCase): hap_ctxt = context.HAProxyContext() self.assertTrue('units' not in hap_ctxt()) + @patch.object( + charmhelpers.contrib.openstack.context, 'get_netmask_for_address') + @patch.object( + charmhelpers.contrib.openstack.context, 'get_address_in_network') @patch.object(charmhelpers.contrib.openstack.context, 'config') @patch.object(charmhelpers.contrib.openstack.context, 'local_unit') @patch.object(charmhelpers.contrib.openstack.context, 'unit_get') @@ -75,7 +80,8 @@ class HAProxyContextTest(CharmTestCase): @patch.object(charmhelpers.contrib.openstack.context, 'relation_ids') @patch.object(charmhelpers.contrib.openstack.context, 'log') def test_context_peers(self, _log, _rids, _runits, _rget, _uget, - _lunit, _config): + _lunit, _config, _get_address_in_network, + _get_netmask_for_address): unit_addresses = { 'neutron-api-0': '10.10.10.10', 'neutron-api-1': '10.10.10.11', @@ -86,13 +92,20 @@ class HAProxyContextTest(CharmTestCase): _lunit.return_value = "neutron-api/1" _uget.return_value = unit_addresses['neutron-api-1'] _config.return_value = None + _get_address_in_network.return_value = None + _get_netmask_for_address.return_value = '255.255.255.0' service_ports = {'neutron-server': [9696, 9686]} - + self.maxDiff = None ctxt_data = { 'haproxy_host': '0.0.0.0', 'local_host': '127.0.0.1', 'stat_port': ':8888', - 'units': unit_addresses, + 'frontends': { + '10.10.10.11': { + 'network': '10.10.10.11/255.255.255.0', + 'backends': unit_addresses, + } + }, 'service_ports': service_ports, 'neutron_bind_port': 9686, } From 769199292ef99e300ce54ba0c7598f841c8c7232 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 30 Sep 2014 09:10:40 +0100 Subject: [PATCH 038/125] Reassert that neutron-api is the endpoint for the neutron-api service when joining with the cloud-controller incase it has registered the endpoint --- hooks/neutron_api_hooks.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8411ce1b..1762f394 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -1,6 +1,7 @@ #!/usr/bin/python import sys +import uuid from subprocess import check_call from charmhelpers.core.hookenv import ( @@ -208,7 +209,7 @@ def relation_broken(): @hooks.hook('identity-service-relation-joined') -def identity_joined(rid=None): +def identity_joined(rid=None, relation_trigger=False): public_url = '{}:{}'.format(canonical_url(CONFIGS, PUBLIC), api_port('neutron-server')) admin_url = '{}:{}'.format(canonical_url(CONFIGS, ADMIN), @@ -216,14 +217,16 @@ def identity_joined(rid=None): internal_url = '{}:{}'.format(canonical_url(CONFIGS, INTERNAL), api_port('neutron-server') ) - endpoints = { + rel_settings = { 'quantum_service': 'quantum', 'quantum_region': config('region'), 'quantum_public_url': public_url, 'quantum_admin_url': admin_url, 'quantum_internal_url': internal_url, } - relation_set(relation_id=rid, relation_settings=endpoints) + if relation_trigger: + rel_settings['relation_trigger'] = str(uuid.uuid4()) + relation_set(relation_id=rid, relation_settings=rel_settings) @hooks.hook('identity-service-relation-changed') @@ -254,7 +257,7 @@ def neutron_api_relation_joined(rid=None): # Nova-cc may have grabbed the quantum endpoint so kick identity-service # relation to register that its here for r_id in relation_ids('identity-service'): - identity_joined(rid=r_id) + identity_joined(rid=r_id, relation_trigger=True) @hooks.hook('neutron-api-relation-changed') From e6238071b6096d86b6dec14593cd1bc0d930deed Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 30 Sep 2014 10:45:19 +0100 Subject: [PATCH 039/125] Unconditionally enable the l2_population driver. --- templates/icehouse/ml2_conf.ini | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/templates/icehouse/ml2_conf.ini b/templates/icehouse/ml2_conf.ini index 5474b098..1bbf45e6 100644 --- a/templates/icehouse/ml2_conf.ini +++ b/templates/icehouse/ml2_conf.ini @@ -6,7 +6,7 @@ [ml2] type_drivers = gre,vxlan tenant_network_types = gre,vxlan -mechanism_drivers = openvswitch +mechanism_drivers = openvswitch,l2population [ml2_type_gre] tunnel_id_ranges = 1:1000 @@ -20,7 +20,6 @@ local_ip = {{ local_ip }} [agent] tunnel_types = gre -l2_population = {{ l2_population }} [securitygroup] {% if neutron_security_groups -%} From 0531c71d1d0a34af402f55a57d8bb8b72d68bc6d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 30 Sep 2014 10:57:04 +0100 Subject: [PATCH 040/125] Write out configurations in ha-changed to avoid missing switch to VIP's --- hooks/neutron_api_hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 30c43443..f97c0e48 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -306,6 +306,7 @@ def ha_joined(): @hooks.hook('ha-relation-changed') +@restart_on_change(restart_map()) def ha_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: @@ -318,6 +319,7 @@ def ha_changed(): identity_joined(rid=rid) for rid in relation_ids('neutron-api'): neutron_api_relation_joined(rid=rid) + CONFIGS.write_all() def main(): From 9948e8fc5fb4d3e52c25031381fba49307d7a34b Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 30 Sep 2014 11:04:14 +0100 Subject: [PATCH 041/125] Backout last change --- hooks/neutron_api_hooks.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index f97c0e48..30c43443 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -306,7 +306,6 @@ def ha_joined(): @hooks.hook('ha-relation-changed') -@restart_on_change(restart_map()) def ha_changed(): clustered = relation_get('clustered') if not clustered or clustered in [None, 'None', '']: @@ -319,7 +318,6 @@ def ha_changed(): identity_joined(rid=rid) for rid in relation_ids('neutron-api'): neutron_api_relation_joined(rid=rid) - CONFIGS.write_all() def main(): From c413323e0d8492f9ff3f089f94c7449728725a0b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 30 Sep 2014 13:54:40 +0100 Subject: [PATCH 042/125] synced charm-helpers --- hooks/charmhelpers/contrib/network/ip.py | 41 ++++++++++++++++++- hooks/charmhelpers/contrib/openstack/utils.py | 19 ++++++--- 2 files changed, 54 insertions(+), 6 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 19f654d3..9a3c2bfa 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -5,6 +5,7 @@ import sys from functools import partial +from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( WARNING, @@ -222,12 +223,50 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None, +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """If no iface provided, inject net iface inferred from unit private + address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, dynamic_only=True): """Get assigned IPv6 address for a given interface. Returns list of addresses found. If no address found, returns empty list. + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + We currently only support scope global IPv6 addresses i.e. non-temporary addresses. If no global IPv6 address is found, return the first one found in the ipv6 address list. diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 715ea9e1..91b8c7b8 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -466,10 +466,19 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] -def sync_db_with_multi_ipv6_addresses(database, database_user): +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + keys = kwargs.keys() + for key in keys: + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + for rid in relation_ids('shared-db'): - relation_set(relation_id=rid, - database=database, - username=database_user, - hostname=json.dumps(hosts)) + relation_set(relation_id=rid, **kwargs) From dc63e453a64ad22e9c31c783a7b39bbf624485ba Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 30 Sep 2014 14:10:00 +0100 Subject: [PATCH 043/125] [trivial] Change default mcastport to avoid conflicts with nova-cc --- config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index a9e47a13..9d7a7cd8 100644 --- a/config.yaml +++ b/config.yaml @@ -73,7 +73,7 @@ options: with the other members of the HA Cluster. ha-mcastport: type: int - default: 5404 + default: 5414 description: | Default multicast port number that will be used to communicate between HA Cluster nodes. From a45fbf654ef08b5f000fd5421570ce8806fd239d Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 30 Sep 2014 14:34:17 +0100 Subject: [PATCH 044/125] Pick a better default port --- config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 9d7a7cd8..87f8c5f3 100644 --- a/config.yaml +++ b/config.yaml @@ -73,7 +73,7 @@ options: with the other members of the HA Cluster. ha-mcastport: type: int - default: 5414 + default: 5424 description: | Default multicast port number that will be used to communicate between HA Cluster nodes. From 95df1deddeae38d64a259106c982944e189431c6 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 30 Sep 2014 14:49:52 +0100 Subject: [PATCH 045/125] fixed ipv6 compatibility check --- hooks/neutron_api_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 2074a64d..88b283bf 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -227,9 +227,9 @@ def migrate_neutron_database(): def setup_ipv6(): - ubuntu_rel = float(lsb_release()['DISTRIB_RELEASE']) - if ubuntu_rel < 'trusty': - raise Exception("IPv6 is not supported for Ubuntu " + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower() + if ubuntu_rel < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") # NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports From f0dcfbfe6ec15d49c42667813fe58fe33b84ce36 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 30 Sep 2014 15:38:57 +0000 Subject: [PATCH 046/125] Add support for using vxlan network type driver --- config.yaml | 9 +++++++++ hooks/neutron_api_context.py | 1 + hooks/neutron_api_hooks.py | 1 + templates/icehouse/ml2_conf.ini | 2 +- 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 87f8c5f3..c3098f32 100644 --- a/config.yaml +++ b/config.yaml @@ -57,6 +57,15 @@ options: ovs - OpenvSwitch Plugin nvp - Nicira Network Virtualization Platform . + neutron-ml2-typedriver: + default: gre + type: string + description: | + ML2 network type driver to use; supports + . + gre + vxlan + . # HA configuration settings vip: type: string diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index ca3317e1..f8ba49b6 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -70,6 +70,7 @@ class NeutronCCContext(context.NeutronContext): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() ctxt['l2_population'] = self.neutron_l2_population + ctxt['ml2_typedriver'] = config('neutron-ml2-typedriver') ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 75927765..76539b0e 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -272,6 +272,7 @@ def neutron_plugin_api_relation_joined(rid=None): relation_data = { 'neutron-security-groups': config('neutron-security-groups'), 'l2-population': get_l2population(), + 'neutron-ml2-typedriver': config('neutron-ml2-typedriver'), } relation_set(relation_id=rid, **relation_data) diff --git a/templates/icehouse/ml2_conf.ini b/templates/icehouse/ml2_conf.ini index 1bbf45e6..0c9d7438 100644 --- a/templates/icehouse/ml2_conf.ini +++ b/templates/icehouse/ml2_conf.ini @@ -19,7 +19,7 @@ enable_tunneling = True local_ip = {{ local_ip }} [agent] -tunnel_types = gre +tunnel_types = {{ ml2_typedriver }} [securitygroup] {% if neutron_security_groups -%} From 0f36f8777df9bf55b2db2028a8935440a68cdc9b Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 1 Oct 2014 00:15:33 +0800 Subject: [PATCH 047/125] Refactor code. --- hooks/neutron_api_context.py | 11 ----------- hooks/neutron_api_hooks.py | 6 +++--- hooks/neutron_api_utils.py | 2 +- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 5132869c..ca3317e1 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -111,14 +111,3 @@ class HAProxyContext(context.HAProxyContext): # for haproxy.conf ctxt['service_ports'] = port_mapping return ctxt - - -class NeutronCCIPv6Context(context.SharedDBContext): - def __call__(self): - ctxt = super(NeutronCCIPv6Context, self).__call__() - if config('prefer-ipv6'): - ctxt['bind_host'] = '::' - else: - ctxt['bind_host'] = '0.0.0.0' - - return ctxt diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 854894af..cca99071 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -24,8 +24,7 @@ from charmhelpers.core.host import ( ) from charmhelpers.fetch import ( - apt_install, - apt_update + apt_install, apt_update ) from charmhelpers.contrib.openstack.utils import ( @@ -343,7 +342,8 @@ def ha_joined(): ) vip_group.append(vip_key) - relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) + if len(vip_group) >= 1: + relation_set(groups={'grp_neutron_vips': ' '.join(vip_group)}) init_services = { 'res_neutron_haproxy': 'haproxy' diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 88b283bf..8ed5772e 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -74,7 +74,7 @@ BASE_RESOURCE_MAP = OrderedDict([ neutron_api_context.IdentityServiceContext(), neutron_api_context.NeutronCCContext(), context.SyslogContext(), - neutron_api_context.NeutronCCIPv6Context()], + context.BindHostContext()], }), (NEUTRON_DEFAULT, { 'services': ['neutron-server'], From 18e08d951f35f97f27aa97a86adee5a793e9b2bd Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 1 Oct 2014 00:18:53 +0800 Subject: [PATCH 048/125] Revert unneccessary change. --- hooks/neutron_api_hooks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index cca99071..ba93a524 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -37,15 +37,15 @@ from charmhelpers.contrib.openstack.neutron import ( ) from neutron_api_utils import ( + CLUSTER_RES, + NEUTRON_CONF, + api_port, determine_packages, determine_ports, + do_openstack_upgrade, migrate_neutron_database, register_configs, restart_map, - NEUTRON_CONF, - api_port, - CLUSTER_RES, - do_openstack_upgrade, setup_ipv6 ) from neutron_api_context import get_l2population From 34f623c63d45d924b301b0c482efed7384096ae5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 1 Oct 2014 08:31:39 +0000 Subject: [PATCH 049/125] Don't run neutron db migrations pre-juno as neutron-server does it itself --- hooks/neutron_api_hooks.py | 4 ++++ unit_tests/test_neutron_api_hooks.py | 15 +++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 75927765..cf19f00d 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -30,6 +30,7 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, openstack_upgrade_available, + os_release, ) from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, @@ -144,6 +145,9 @@ def conditional_neutron_migration(): if not relation_ids('neutron-api'): log('Not running neutron database migration, no nova-cloud-controller' 'is present.') + elif os_release('nova-common') <= 'icehouse': + log('Not running neutron database migration as migrations are handled' + 'by the neutron-server process.') else: if clustered: if is_leader(CLUSTER_RES): diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 775f8395..fd046d60 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -39,6 +39,7 @@ TO_PATCH = [ 'neutron_plugin_attribute', 'open_port', 'openstack_upgrade_available', + 'os_release', 'relation_get', 'relation_ids', 'relation_set', @@ -379,11 +380,23 @@ class NeutronAPIHooksTests(CharmTestCase): 'is present.' ) + def test_conditional_neutron_migration_icehouse(self): + self.test_relation.set({ + 'clustered': 'false', + }) + self.os_release.return_value = 'icehouse' + hooks.conditional_neutron_migration() + self.log.assert_called_with( + 'Not running neutron database migration as migrations are handled' + 'by the neutron-server process.' + ) + def test_conditional_neutron_migration_ncc_rel_leader(self): self.test_relation.set({ 'clustered': 'true', }) self.is_leader.return_value = True + self.os_release.return_value = 'juno' hooks.conditional_neutron_migration() self.migrate_neutron_database.assert_called_with() self.service_restart.assert_called_with('neutron-server') @@ -393,6 +406,7 @@ class NeutronAPIHooksTests(CharmTestCase): 'clustered': 'true', }) self.is_leader.return_value = False + self.os_release.return_value = 'juno' hooks.conditional_neutron_migration() self.assertFalse(self.migrate_neutron_database.called) self.assertFalse(self.service_restart.called) @@ -405,6 +419,7 @@ class NeutronAPIHooksTests(CharmTestCase): 'clustered': 'false', }) self.relation_ids.return_value = ['nova-cc/o'] + self.os_release.return_value = 'juno' hooks.conditional_neutron_migration() self.migrate_neutron_database.assert_called_with() self.service_restart.assert_called_with('neutron-server') From dfd225c696082dae97aac6c2ac1aa1cd38bd7d9a Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 1 Oct 2014 18:00:51 +0100 Subject: [PATCH 050/125] adding cluster-relation-joined symlink --- hooks/cluster-relation-joined | 1 + 1 file changed, 1 insertion(+) create mode 120000 hooks/cluster-relation-joined diff --git a/hooks/cluster-relation-joined b/hooks/cluster-relation-joined new file mode 120000 index 00000000..1fb10fd5 --- /dev/null +++ b/hooks/cluster-relation-joined @@ -0,0 +1 @@ +neutron_api_hooks.py \ No newline at end of file From a4c22f06fb8a561ac097bdc1323fd5b1d529a91e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 2 Oct 2014 10:49:32 +0000 Subject: [PATCH 051/125] Switch to a more descriptive name for the overlay network type --- config.yaml | 4 ++-- hooks/neutron_api_context.py | 2 +- hooks/neutron_api_hooks.py | 2 +- templates/icehouse/ml2_conf.ini | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/config.yaml b/config.yaml index c3098f32..f38584fc 100644 --- a/config.yaml +++ b/config.yaml @@ -57,11 +57,11 @@ options: ovs - OpenvSwitch Plugin nvp - Nicira Network Virtualization Platform . - neutron-ml2-typedriver: + overlay-network-type: default: gre type: string description: | - ML2 network type driver to use; supports + Overlay network type to use chooese one of: . gre vxlan diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index f8ba49b6..58d812f8 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -70,7 +70,7 @@ class NeutronCCContext(context.NeutronContext): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() ctxt['l2_population'] = self.neutron_l2_population - ctxt['ml2_typedriver'] = config('neutron-ml2-typedriver') + ctxt['overlay_network_type'] = config('overlay-network-type') ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 76539b0e..88888dce 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -272,7 +272,7 @@ def neutron_plugin_api_relation_joined(rid=None): relation_data = { 'neutron-security-groups': config('neutron-security-groups'), 'l2-population': get_l2population(), - 'neutron-ml2-typedriver': config('neutron-ml2-typedriver'), + 'overlay-network-type': config('overlay-network-type'), } relation_set(relation_id=rid, **relation_data) diff --git a/templates/icehouse/ml2_conf.ini b/templates/icehouse/ml2_conf.ini index 0c9d7438..11ca3576 100644 --- a/templates/icehouse/ml2_conf.ini +++ b/templates/icehouse/ml2_conf.ini @@ -19,7 +19,7 @@ enable_tunneling = True local_ip = {{ local_ip }} [agent] -tunnel_types = {{ ml2_typedriver }} +tunnel_types = {{ overlay_network_type }} [securitygroup] {% if neutron_security_groups -%} From a9c58661d4db539c38f7a111cfc2953bfc543b2e Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 2 Oct 2014 14:02:53 +0000 Subject: [PATCH 052/125] Fix unit tests for vxlan --- unit_tests/test_neutron_api_context.py | 1 + unit_tests/test_neutron_api_hooks.py | 1 + 2 files changed, 2 insertions(+) diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 0df6eda7..28d61a4e 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -130,6 +130,7 @@ class NeutronAPIContextsTest(CharmTestCase): 'neutron_bind_port': self.api_port, 'verbose': True, 'l2_population': True, + 'overlay_network_type': 'gre', } napi_ctxt = context.NeutronCCContext() with patch.object(napi_ctxt, '_ensure_packages'): diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 775f8395..e77de642 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -269,6 +269,7 @@ class NeutronAPIHooksTests(CharmTestCase): _relation_data = { 'neutron-security-groups': False, 'l2-population': False, + 'overlay-network-type': 'gre', } self.get_l2population.return_value = False self._call_hook('neutron-plugin-api-relation-joined') From 06675264ed68fcdc2a1a4dbcd67f743c5ffa5519 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 2 Oct 2014 14:50:53 +0000 Subject: [PATCH 053/125] Correct typo in config.yaml. Validate overlay-network-type setting. Additional nuit tests --- config.yaml | 2 +- hooks/neutron_api_context.py | 2 ++ unit_tests/test_neutron_api_context.py | 27 ++++++++++++++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index f38584fc..fc512fc9 100644 --- a/config.yaml +++ b/config.yaml @@ -61,7 +61,7 @@ options: default: gre type: string description: | - Overlay network type to use chooese one of: + Overlay network type to use choose one of: . gre vxlan diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 58d812f8..aabd48c6 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -71,6 +71,8 @@ class NeutronCCContext(context.NeutronContext): ctxt = super(NeutronCCContext, self).__call__() ctxt['l2_population'] = self.neutron_l2_population ctxt['overlay_network_type'] = config('overlay-network-type') + if ctxt['overlay_network_type'] not in [None, 'vxlan', 'gre']: + raise Exception('Unsupported overlay-network-type') ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 28d61a4e..029d5348 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -136,6 +136,33 @@ class NeutronAPIContextsTest(CharmTestCase): with patch.object(napi_ctxt, '_ensure_packages'): self.assertEquals(ctxt_data, napi_ctxt()) + @patch.object(context.NeutronCCContext, 'network_manager') + @patch.object(context.NeutronCCContext, 'plugin') + @patch('__builtin__.__import__') + def test_neutroncc_context_vxlan(self, _import, plugin, nm): + plugin.return_value = None + self.test_config.set('overlay-network-type', 'vxlan') + ctxt_data = { + 'debug': True, + 'external_network': 'bob', + 'neutron_bind_port': self.api_port, + 'verbose': True, + 'l2_population': True, + 'overlay_network_type': 'vxlan', + } + napi_ctxt = context.NeutronCCContext() + with patch.object(napi_ctxt, '_ensure_packages'): + self.assertEquals(ctxt_data, napi_ctxt()) + + @patch.object(context.NeutronCCContext, 'network_manager') + @patch.object(context.NeutronCCContext, 'plugin') + @patch('__builtin__.__import__') + def test_neutroncc_context_unsupported_overlay(self, _import, plugin, nm): + plugin.return_value = None + self.test_config.set('overlay-network-type', 'bobswitch') + with self.assertRaises(Exception) as context: + context.NeutronCCContext() + @patch.object(context.NeutronCCContext, 'network_manager') @patch.object(context.NeutronCCContext, 'plugin') @patch('__builtin__.__import__') From 21651d5e9020cef80d9da191f7dc705a13044c33 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 2 Oct 2014 14:53:16 +0000 Subject: [PATCH 054/125] None would not be a super-great overlay network settings --- hooks/neutron_api_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index aabd48c6..985e5eb1 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -71,7 +71,7 @@ class NeutronCCContext(context.NeutronContext): ctxt = super(NeutronCCContext, self).__call__() ctxt['l2_population'] = self.neutron_l2_population ctxt['overlay_network_type'] = config('overlay-network-type') - if ctxt['overlay_network_type'] not in [None, 'vxlan', 'gre']: + if ctxt['overlay_network_type'] not in ['vxlan', 'gre']: raise Exception('Unsupported overlay-network-type') ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') From 315703a8a0f803869fc76036551bfc93ab377892 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 2 Oct 2014 20:27:45 +0100 Subject: [PATCH 055/125] Resync --- hooks/charmhelpers/contrib/network/ip.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 9a3c2bfa..17df06fc 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -57,6 +57,8 @@ def get_address_in_network(network, fallback=None, fatal=False): else: if fatal: not_found_error_out() + else: + return None _validate_cidr(network) network = netaddr.IPNetwork(network) From 022f187de1d5060a11e6ca7b41c660c96d5d3a5b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 3 Oct 2014 09:35:19 +0000 Subject: [PATCH 056/125] Centralise processing of config('overlay-network-type') to a single static method and tidy up lint --- config.yaml | 7 +++++++ hooks/neutron_api_context.py | 15 ++++++++++++--- hooks/neutron_api_hooks.py | 7 +++++-- unit_tests/test_neutron_api_hooks.py | 4 +++- 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/config.yaml b/config.yaml index fc512fc9..d16f5ba2 100644 --- a/config.yaml +++ b/config.yaml @@ -69,6 +69,7 @@ options: # HA configuration settings vip: type: string + default: description: | Virtual IP(s) to use to front API services in HA configuration. . @@ -98,6 +99,7 @@ options: # by default all access is over 'private-address' os-admin-network: type: string + default: description: | The IP address and netmask of the OpenStack Admin network (e.g., 192.168.0.0/24) @@ -105,6 +107,7 @@ options: This network will be used for admin endpoints. os-internal-network: type: string + default: description: | The IP address and netmask of the OpenStack Internal network (e.g., 192.168.0.0/24) @@ -112,6 +115,7 @@ options: This network will be used for internal endpoints. os-public-network: type: string + default: description: | The IP address and netmask of the OpenStack Public network (e.g., 192.168.0.0/24) @@ -119,6 +123,7 @@ options: This network will be used for public endpoints. ssl_cert: type: string + default: description: | SSL certificate to install and use for API ports. Setting this value and ssl_key will enable reverse proxying, point Neutron's entry in the @@ -126,9 +131,11 @@ options: issued by Keystone (if it is configured to do so). ssl_key: type: string + default: description: SSL key to use with certificate specified as ssl_cert. ssl_ca: type: string + default: description: | SSL CA to use with the certificate and key provided - this is only required if you are providing a privately signed ssl_cert and ssl_key. diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 985e5eb1..ccee2511 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -16,6 +16,13 @@ def get_l2population(): return config('l2-population') if plugin == "ovs" else False +def get_overlay_network_type(): + overlay_net = config('overlay-network-type') + if overlay_net not in ['vxlan', 'gre']: + raise Exception('Unsupported overlay-network-type') + return overlay_net + + class ApacheSSLContext(context.ApacheSSLContext): interfaces = ['https'] @@ -58,6 +65,10 @@ class NeutronCCContext(context.NeutronContext): def neutron_l2_population(self): return get_l2population() + @property + def neutron_overlay_network_type(self): + return get_overlay_network_type() + # Do not need the plugin agent installed on the api server def _ensure_packages(self): pass @@ -70,9 +81,7 @@ class NeutronCCContext(context.NeutronContext): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() ctxt['l2_population'] = self.neutron_l2_population - ctxt['overlay_network_type'] = config('overlay-network-type') - if ctxt['overlay_network_type'] not in ['vxlan', 'gre']: - raise Exception('Unsupported overlay-network-type') + ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 88888dce..44e6cc1e 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -46,7 +46,10 @@ from neutron_api_utils import ( register_configs, restart_map, ) -from neutron_api_context import get_l2population +from neutron_api_context import ( + get_l2population, + get_overlay_network_type, +) from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, @@ -272,7 +275,7 @@ def neutron_plugin_api_relation_joined(rid=None): relation_data = { 'neutron-security-groups': config('neutron-security-groups'), 'l2-population': get_l2population(), - 'overlay-network-type': config('overlay-network-type'), + 'overlay-network-type': get_overlay_network_type(), } relation_set(relation_id=rid, **relation_data) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index e77de642..105bcc6f 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -33,6 +33,7 @@ TO_PATCH = [ 'get_iface_for_address', 'get_l2population', 'get_netmask_for_address', + 'get_overlay_network_type', 'is_leader', 'is_relation_made', 'log', @@ -269,9 +270,10 @@ class NeutronAPIHooksTests(CharmTestCase): _relation_data = { 'neutron-security-groups': False, 'l2-population': False, - 'overlay-network-type': 'gre', + 'overlay-network-type': 'vxlan', } self.get_l2population.return_value = False + self.get_overlay_network_type.return_value = 'vxlan' self._call_hook('neutron-plugin-api-relation-joined') self.relation_set.assert_called_with( relation_id=None, From 7eb176b47fbcf7b113fce01e16777eb3b3e50c56 Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 6 Oct 2014 22:21:47 +0100 Subject: [PATCH 057/125] [trivial] resync helpers --- hooks/charmhelpers/contrib/openstack/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 91b8c7b8..b0d1b03a 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -78,6 +78,8 @@ SWIFT_CODENAMES = OrderedDict([ ('1.12.0', 'icehouse'), ('1.11.0', 'icehouse'), ('2.0.0', 'juno'), + ('2.1.0', 'juno'), + ('2.2.0', 'juno'), ]) DEFAULT_LOOPBACK_SIZE = '5G' From eea94f3ba760fab18f56ca3506b02dc5b984fc7f Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 7 Oct 2014 09:24:32 +0100 Subject: [PATCH 058/125] Add support for hyperv driver + vlan and flat network types --- templates/icehouse/ml2_conf.ini | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/templates/icehouse/ml2_conf.ini b/templates/icehouse/ml2_conf.ini index 11ca3576..fd1f80d2 100644 --- a/templates/icehouse/ml2_conf.ini +++ b/templates/icehouse/ml2_conf.ini @@ -4,9 +4,9 @@ # Configuration file maintained by Juju. Local changes may be overwritten. ############################################################################### [ml2] -type_drivers = gre,vxlan -tenant_network_types = gre,vxlan -mechanism_drivers = openvswitch,l2population +type_drivers = gre,vxlan,vlan,flat +tenant_network_types = gre,vxlan,vlan,flat +mechanism_drivers = openvswitch,hyperv,l2population [ml2_type_gre] tunnel_id_ranges = 1:1000 @@ -14,6 +14,12 @@ tunnel_id_ranges = 1:1000 [ml2_type_vxlan] vni_ranges = 1001:2000 +[ml2_type_vlan] +network_vlan_ranges = physnet1:1000:2000 + +[ml2_type_flat] +flat_networks = physnet1 + [ovs] enable_tunneling = True local_ip = {{ local_ip }} From 598d83a626bd46f44d7c8f555588cfce7e5dd828 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 7 Oct 2014 10:03:12 +0100 Subject: [PATCH 059/125] [trivial] Resync to fixup MAAS support --- hooks/charmhelpers/contrib/openstack/context.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 94f8bbe1..173089f4 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -57,6 +57,8 @@ from charmhelpers.contrib.network.ip import ( is_address_in_network ) +from charmhelpers.contrib.openstack.utils import get_host_ip + CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -429,7 +431,7 @@ class HAProxyContext(OSContextGenerator): if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: - addr = unit_get('private-address') + addr = get_host_ip(unit_get('private-address')) cluster_hosts = {} From d40d306229cb7322134f6169972a24ef96f95b4c Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 7 Oct 2014 13:29:50 +0100 Subject: [PATCH 060/125] Add support for worker process configuration --- config.yaml | 7 +++++++ .../charmhelpers/contrib/openstack/context.py | 19 +++++++++++++++++++ hooks/neutron_api_utils.py | 3 ++- templates/icehouse/neutron.conf | 1 + 4 files changed, 29 insertions(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index 84f1cf36..2fb3fffa 100644 --- a/config.yaml +++ b/config.yaml @@ -158,4 +158,11 @@ options: order for this charm to function correctly, the privacy extension must be disabled and a non-temporary address must be configured/available on your network interface. + worker-multiplier: + type: int + default: 2 + description: | + The CPU core multiplier to use when configuring worker processes for + Neutron. By default, the number of workers for each daemon is set to + twice the number of CPU cores a service unit has. diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 173089f4..538dc913 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -903,3 +903,22 @@ class BindHostContext(OSContextGenerator): return { 'bind_host': '0.0.0.0' } + + +class WorkerConfigContext(OSContextGenerator): + + @property + def num_cpus(self): + try: + from psutil import NUM_CPUS + except ImportError: + apt_install('python-psutil', fatal=True) + from psutil import NUM_CPUS + return NUM_CPUS + + def __call__(self): + multiplier = config('worker-multiplier') or 1 + ctxt = { + "workers": self.num_cpus * multiplier + } + return ctxt diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 8ed5772e..a11f2d07 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -74,7 +74,8 @@ BASE_RESOURCE_MAP = OrderedDict([ neutron_api_context.IdentityServiceContext(), neutron_api_context.NeutronCCContext(), context.SyslogContext(), - context.BindHostContext()], + context.BindHostContext(), + context.WorkerConfigContext()], }), (NEUTRON_DEFAULT, { 'services': ['neutron-server'], diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf index 014eaf14..34433ce8 100644 --- a/templates/icehouse/neutron.conf +++ b/templates/icehouse/neutron.conf @@ -12,6 +12,7 @@ lock_path = $state_path/lock bind_host = {{ bind_host }} auth_strategy = keystone notification_driver = neutron.openstack.common.notifier.rpc_notifier +api_workers = {{ workers }} {% if neutron_bind_port -%} bind_port = {{ neutron_bind_port }} From 136ef97fe489b122809bf36a37c9e98cc35c52a2 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 9 Oct 2014 11:34:27 +0100 Subject: [PATCH 061/125] [hopem] synced lp:charm-helpers --- hooks/charmhelpers/contrib/network/ip.py | 10 ++++--- hooks/charmhelpers/core/sysctl.py | 34 ++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 hooks/charmhelpers/core/sysctl.py diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 17df06fc..e62e5655 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -140,7 +140,8 @@ def _get_for_address(address, key): if address.version == 4 and netifaces.AF_INET in addresses: addr = addresses[netifaces.AF_INET][0]['addr'] netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr if address in cidr: if key == 'iface': return iface @@ -149,11 +150,14 @@ def _get_for_address(address, key): if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + cidr = network.cidr if address in cidr: if key == 'iface': return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] else: return addr[key] return None diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..0f299630 --- /dev/null +++ b/hooks/charmhelpers/core/sysctl.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +__author__ = 'Jorge Niedbalski R. ' + +import yaml + +from subprocess import check_call + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) + + +def create(sysctl_dict, sysctl_file): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } + :type sysctl_dict: dict + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :returns: None + """ + sysctl_dict = yaml.load(sysctl_dict) + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + level=DEBUG) + + check_call(["sysctl", "-p", sysctl_file]) From e26ddcfff01038c327d5107660a92e2480cc5c9b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Oct 2014 12:44:39 +0000 Subject: [PATCH 062/125] Fix race which causes neutron migrations not to be run --- hooks/neutron_api_hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index ad0c63d2..732219f7 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -290,6 +290,7 @@ def neutron_api_relation_joined(rid=None): @restart_on_change(restart_map()) def neutron_api_relation_changed(): CONFIGS.write(NEUTRON_CONF) + conditional_neutron_migration() @hooks.hook('neutron-plugin-api-relation-joined') From 4e34b0abce6af7cf508ef9aaab0affbf8d7a2826 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Oct 2014 14:00:48 +0000 Subject: [PATCH 063/125] Update unit test --- unit_tests/test_neutron_api_hooks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 213ff349..e350f87a 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -267,9 +267,11 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - def test_neutron_api_relation_changed(self): + @patch.object(hooks, 'conditional_neutron_migration') + def test_neutron_api_relation_changed(self, cond_neutron_mig): self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) + cond_neutron_mig.assert_called_with() def test_neutron_plugin_api_relation_joined_nol2(self): _relation_data = { From 4cfb2d42c2487653643e86aaec126db765805f80 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Oct 2014 15:02:40 +0000 Subject: [PATCH 064/125] Add nsx support --- config.yaml | 31 ++++++++++++++++++++++++++++++- hooks/neutron_api_context.py | 12 ++++++++++++ templates/icehouse/nsx.ini | 11 +++++++++++ 3 files changed, 53 insertions(+), 1 deletion(-) create mode 100644 templates/icehouse/nsx.ini diff --git a/config.yaml b/config.yaml index 2fb3fffa..36104bc9 100644 --- a/config.yaml +++ b/config.yaml @@ -165,4 +165,33 @@ options: The CPU core multiplier to use when configuring worker processes for Neutron. By default, the number of workers for each daemon is set to twice the number of CPU cores a service unit has. - + # Neutron NVP and VMware NSX plugin configuration + nvp-controllers: + type: string + default: + description: Space delimited addresses of NVP/NSX controllers + nvp-username: + type: string + default: admin + description: Username to connect to NVP/NSX controllers with + nvp-password: + type: string + default: admin + description: Password to connect to NVP/NSX controllers with + nvp-cluster-name: + type: string + default: example + description: Name of the NVP cluster configuration to create (grizzly only) + nvp-tz-uuid: + type: string + default: + description: | + This is uuid of the default NVP/NSX Transport zone that will be used for + creating tunneled isolated Quantum networks. It needs to be created + in NVP before starting Quantum with the nvp plugin. + nvp-l3-uuid: + type: string + default: + description: | + This is uuid of the default NVP/NSX L3 Gateway Service. + # end of NVP/NSX configuration diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index ccee2511..29547e37 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -80,6 +80,18 @@ class NeutronCCContext(context.NeutronContext): def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() + ctxt['external_network'] = config('neutron-external-network') + if config('neutron-plugin') == 'nsx': + ctxt['nvp_username'] = config('nvp-username') + ctxt['nvp_password'] = config('nvp-password') + ctxt['nvp_cluster_name'] = config('nvp-cluster-name') + ctxt['nvp_tz_uuid'] = config('nvp-tz-uuid') + ctxt['nvp_l3_uuid'] = config('nvp-l3-uuid') + if 'nvp-controllers' in config(): + ctxt['nvp_controllers'] = \ + ','.join(config('nvp-controllers').split()) + ctxt['nvp_controllers_list'] = \ + config('nvp-controllers').split() ctxt['l2_population'] = self.neutron_l2_population ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') diff --git a/templates/icehouse/nsx.ini b/templates/icehouse/nsx.ini new file mode 100644 index 00000000..805d28a1 --- /dev/null +++ b/templates/icehouse/nsx.ini @@ -0,0 +1,11 @@ +# icehouse +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +nsx_user = {{ nvp_username }} +nsx_password = {{ nvp_password }} +nsx_controllers = {{ nvp_controllers }} +default_tz_uuid = {{ nvp_tz_uuid }} +default_l3_gw_service_uuid = {{ nvp_l3_uuid }} From 5b530dd34c7641698fd8e62aa297338dda2f1efd Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 13 Oct 2014 15:13:09 +0000 Subject: [PATCH 065/125] A switch in config may change the required packages so install as needed --- hooks/neutron_api_hooks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 732219f7..5258b06a 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -24,7 +24,9 @@ from charmhelpers.core.host import ( ) from charmhelpers.fetch import ( - apt_install, apt_update + apt_install, + apt_update, + filter_installed_packages, ) from charmhelpers.contrib.openstack.utils import ( @@ -112,6 +114,8 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map(), stopstart=True) def config_changed(): + apt_install(filter_installed_packages(determine_packages()), + fatal=True) if config('prefer-ipv6'): setup_ipv6() sync_db_with_multi_ipv6_addresses(config('database'), From 993cf0ae494a206c608f65b51dfb5941de2c40ee Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Oct 2014 06:14:47 +0000 Subject: [PATCH 066/125] Lint tidy up --- hooks/neutron_api_context.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 29547e37..3a9d76ba 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -82,11 +82,11 @@ class NeutronCCContext(context.NeutronContext): ctxt = super(NeutronCCContext, self).__call__() ctxt['external_network'] = config('neutron-external-network') if config('neutron-plugin') == 'nsx': - ctxt['nvp_username'] = config('nvp-username') - ctxt['nvp_password'] = config('nvp-password') - ctxt['nvp_cluster_name'] = config('nvp-cluster-name') - ctxt['nvp_tz_uuid'] = config('nvp-tz-uuid') - ctxt['nvp_l3_uuid'] = config('nvp-l3-uuid') + ctxt['nvp_username'] = config('nvp-username') + ctxt['nvp_password'] = config('nvp-password') + ctxt['nvp_cluster_name'] = config('nvp-cluster-name') + ctxt['nvp_tz_uuid'] = config('nvp-tz-uuid') + ctxt['nvp_l3_uuid'] = config('nvp-l3-uuid') if 'nvp-controllers' in config(): ctxt['nvp_controllers'] = \ ','.join(config('nvp-controllers').split()) From 620f738ddb51cb14b3e01a33e185bbcde9b766f5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Oct 2014 06:48:48 +0000 Subject: [PATCH 067/125] Update unit tests and pass nsx settings to nsx plugin --- hooks/neutron_api_context.py | 1 - hooks/neutron_api_hooks.py | 20 ++++++++++++---- unit_tests/test_neutron_api_context.py | 32 +++++++++++++++++++++++--- unit_tests/test_neutron_api_hooks.py | 2 ++ 4 files changed, 46 insertions(+), 9 deletions(-) diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 3a9d76ba..081c7814 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -80,7 +80,6 @@ class NeutronCCContext(context.NeutronContext): def __call__(self): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() - ctxt['external_network'] = config('neutron-external-network') if config('neutron-plugin') == 'nsx': ctxt['nvp_username'] = config('nvp-username') ctxt['nvp_password'] = config('nvp-password') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 5258b06a..acff240b 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -299,11 +299,21 @@ def neutron_api_relation_changed(): @hooks.hook('neutron-plugin-api-relation-joined') def neutron_plugin_api_relation_joined(rid=None): - relation_data = { - 'neutron-security-groups': config('neutron-security-groups'), - 'l2-population': get_l2population(), - 'overlay-network-type': get_overlay_network_type(), - } + if config('neutron-plugin') == 'nsx': + relation_data = { + 'nvp-username': config('nvp-username'), + 'nvp-password': config('nvp-password'), + 'nvp-cluster-name': config('nvp-cluster-name'), + 'nvp-tz-uuid': config('nvp-tz-uuid'), + 'nvp-l3-uuid': config('nvp-l3-uuid'), + 'nvp-controllers': config('nvp-controllers'), + } + else: + relation_data = { + 'neutron-security-groups': config('neutron-security-groups'), + 'l2-population': get_l2population(), + 'overlay-network-type': get_overlay_network_type(), + } relation_set(relation_id=rid, **relation_data) diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 437395b5..9a09e6bd 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -122,10 +122,10 @@ class HAProxyContextTest(CharmTestCase): _open.assert_called_with('/etc/default/haproxy', 'w') -class NeutronAPIContextsTest(CharmTestCase): +class NeutronCCContextTest(CharmTestCase): def setUp(self): - super(NeutronAPIContextsTest, self).setUp(context, TO_PATCH) + super(NeutronCCContextTest, self).setUp(context, TO_PATCH) self.relation_get.side_effect = self.test_relation.get self.config.side_effect = self.test_config.get self.api_port = 9696 @@ -135,9 +135,15 @@ class NeutronAPIContextsTest(CharmTestCase): self.test_config.set('debug', True) self.test_config.set('verbose', True) self.test_config.set('neutron-external-network', 'bob') + self.test_config.set('nvp-username', 'bob') + self.test_config.set('nvp-password', 'hardpass') + self.test_config.set('nvp-cluster-name', 'nsxclus') + self.test_config.set('nvp-tz-uuid', 'tzuuid') + self.test_config.set('nvp-l3-uuid', 'l3uuid') + self.test_config.set('nvp-controllers', 'ctrl1 ctrl2') def tearDown(self): - super(NeutronAPIContextsTest, self).tearDown() + super(NeutronCCContextTest, self).tearDown() @patch.object(context.NeutronCCContext, 'network_manager') @patch.object(context.NeutronCCContext, 'plugin') @@ -209,3 +215,23 @@ class NeutronAPIContextsTest(CharmTestCase): with patch.object(napi_ctxt, '_ensure_packages') as ep: napi_ctxt._ensure_packages() ep.assert_has_calls([]) + + @patch.object(context.NeutronCCContext, 'network_manager') + @patch.object(context.NeutronCCContext, 'plugin') + @patch('__builtin__.__import__') + def test_neutroncc_context_nsx(self, _import, plugin, nm): + plugin.return_value = 'nsx' + self.related_units.return_value = [] + self.test_config.set('neutron-plugin', 'nsx') + napi_ctxt = context.NeutronCCContext()() + expect = { + 'nvp_cluster_name': 'nsxclus', + 'nvp_controllers': 'ctrl1,ctrl2', + 'nvp_controllers_list': ['ctrl1', 'ctrl2'], + 'nvp_l3_uuid': 'l3uuid', + 'nvp_password': 'hardpass', + 'nvp_tz_uuid': 'tzuuid', + 'nvp_username': 'bob', + } + for key in expect.iterkeys(): + self.assertEquals(napi_ctxt[key], expect[key]) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index e350f87a..b4d40ce2 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -33,6 +33,7 @@ TO_PATCH = [ 'determine_ports', 'do_openstack_upgrade', 'execd_preinstall', + 'filter_installed_packages', 'get_l2population', 'get_overlay_network_type', 'is_relation_made', @@ -111,6 +112,7 @@ class NeutronAPIHooksTests(CharmTestCase): self.assertTrue(_id_cluster_joined.called) self.assertTrue(self.CONFIGS.write_all.called) self.assertTrue(self.do_openstack_upgrade.called) + self.assertTrue(self.apt_install.called) def test_amqp_joined(self): self._call_hook('amqp-relation-joined') From 4df06762aa7df95bf86c5312028a48236b003365 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 14 Oct 2014 07:47:50 +0000 Subject: [PATCH 068/125] Switch from the old nvp name to the new nsx name --- config.yaml | 32 +++++++++++--------------- hooks/neutron_api_context.py | 19 ++++++++------- hooks/neutron_api_hooks.py | 12 +++++----- templates/icehouse/nsx.ini | 10 ++++---- unit_tests/test_neutron_api_context.py | 24 +++++++++---------- 5 files changed, 45 insertions(+), 52 deletions(-) diff --git a/config.yaml b/config.yaml index 36104bc9..b26759a5 100644 --- a/config.yaml +++ b/config.yaml @@ -55,7 +55,7 @@ options: Neutron plugin to use for network management; supports . ovs - OpenvSwitch Plugin - nvp - Nicira Network Virtualization Platform + nsx - VMWare NSX . overlay-network-type: default: gre @@ -165,33 +165,29 @@ options: The CPU core multiplier to use when configuring worker processes for Neutron. By default, the number of workers for each daemon is set to twice the number of CPU cores a service unit has. - # Neutron NVP and VMware NSX plugin configuration - nvp-controllers: + # VMware NSX plugin configuration + nsx-controllers: type: string default: - description: Space delimited addresses of NVP/NSX controllers - nvp-username: + description: Space delimited addresses of NSX controllers + nsx-username: type: string default: admin - description: Username to connect to NVP/NSX controllers with - nvp-password: + description: Username to connect to NSX controllers with + nsx-password: type: string default: admin - description: Password to connect to NVP/NSX controllers with - nvp-cluster-name: - type: string - default: example - description: Name of the NVP cluster configuration to create (grizzly only) - nvp-tz-uuid: + description: Password to connect to NSX controllers with + nsx-tz-uuid: type: string default: description: | - This is uuid of the default NVP/NSX Transport zone that will be used for + This is uuid of the default NSX Transport zone that will be used for creating tunneled isolated Quantum networks. It needs to be created - in NVP before starting Quantum with the nvp plugin. - nvp-l3-uuid: + in NSX before starting Quantum with the nsx plugin. + nsx-l3-uuid: type: string default: description: | - This is uuid of the default NVP/NSX L3 Gateway Service. - # end of NVP/NSX configuration + This is uuid of the default NSX L3 Gateway Service. + # end of NSX configuration diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index 081c7814..e17c4f78 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -81,16 +81,15 @@ class NeutronCCContext(context.NeutronContext): from neutron_api_utils import api_port ctxt = super(NeutronCCContext, self).__call__() if config('neutron-plugin') == 'nsx': - ctxt['nvp_username'] = config('nvp-username') - ctxt['nvp_password'] = config('nvp-password') - ctxt['nvp_cluster_name'] = config('nvp-cluster-name') - ctxt['nvp_tz_uuid'] = config('nvp-tz-uuid') - ctxt['nvp_l3_uuid'] = config('nvp-l3-uuid') - if 'nvp-controllers' in config(): - ctxt['nvp_controllers'] = \ - ','.join(config('nvp-controllers').split()) - ctxt['nvp_controllers_list'] = \ - config('nvp-controllers').split() + ctxt['nsx_username'] = config('nsx-username') + ctxt['nsx_password'] = config('nsx-password') + ctxt['nsx_tz_uuid'] = config('nsx-tz-uuid') + ctxt['nsx_l3_uuid'] = config('nsx-l3-uuid') + if 'nsx-controllers' in config(): + ctxt['nsx_controllers'] = \ + ','.join(config('nsx-controllers').split()) + ctxt['nsx_controllers_list'] = \ + config('nsx-controllers').split() ctxt['l2_population'] = self.neutron_l2_population ctxt['overlay_network_type'] = self.neutron_overlay_network_type ctxt['external_network'] = config('neutron-external-network') diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index acff240b..c1f39c2f 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -301,12 +301,12 @@ def neutron_api_relation_changed(): def neutron_plugin_api_relation_joined(rid=None): if config('neutron-plugin') == 'nsx': relation_data = { - 'nvp-username': config('nvp-username'), - 'nvp-password': config('nvp-password'), - 'nvp-cluster-name': config('nvp-cluster-name'), - 'nvp-tz-uuid': config('nvp-tz-uuid'), - 'nvp-l3-uuid': config('nvp-l3-uuid'), - 'nvp-controllers': config('nvp-controllers'), + 'nsx-username': config('nsx-username'), + 'nsx-password': config('nsx-password'), + 'nsx-cluster-name': config('nsx-cluster-name'), + 'nsx-tz-uuid': config('nsx-tz-uuid'), + 'nsx-l3-uuid': config('nsx-l3-uuid'), + 'nsx-controllers': config('nsx-controllers'), } else: relation_data = { diff --git a/templates/icehouse/nsx.ini b/templates/icehouse/nsx.ini index 805d28a1..ffcaec2c 100644 --- a/templates/icehouse/nsx.ini +++ b/templates/icehouse/nsx.ini @@ -4,8 +4,8 @@ # Configuration file maintained by Juju. Local changes may be overwritten. ############################################################################### [DEFAULT] -nsx_user = {{ nvp_username }} -nsx_password = {{ nvp_password }} -nsx_controllers = {{ nvp_controllers }} -default_tz_uuid = {{ nvp_tz_uuid }} -default_l3_gw_service_uuid = {{ nvp_l3_uuid }} +nsx_user = {{ nsx_username }} +nsx_password = {{ nsx_password }} +nsx_controllers = {{ nsx_controllers }} +default_tz_uuid = {{ nsx_tz_uuid }} +default_l3_gw_service_uuid = {{ nsx_l3_uuid }} diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 9a09e6bd..3a43e5ee 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -135,12 +135,11 @@ class NeutronCCContextTest(CharmTestCase): self.test_config.set('debug', True) self.test_config.set('verbose', True) self.test_config.set('neutron-external-network', 'bob') - self.test_config.set('nvp-username', 'bob') - self.test_config.set('nvp-password', 'hardpass') - self.test_config.set('nvp-cluster-name', 'nsxclus') - self.test_config.set('nvp-tz-uuid', 'tzuuid') - self.test_config.set('nvp-l3-uuid', 'l3uuid') - self.test_config.set('nvp-controllers', 'ctrl1 ctrl2') + self.test_config.set('nsx-username', 'bob') + self.test_config.set('nsx-password', 'hardpass') + self.test_config.set('nsx-tz-uuid', 'tzuuid') + self.test_config.set('nsx-l3-uuid', 'l3uuid') + self.test_config.set('nsx-controllers', 'ctrl1 ctrl2') def tearDown(self): super(NeutronCCContextTest, self).tearDown() @@ -225,13 +224,12 @@ class NeutronCCContextTest(CharmTestCase): self.test_config.set('neutron-plugin', 'nsx') napi_ctxt = context.NeutronCCContext()() expect = { - 'nvp_cluster_name': 'nsxclus', - 'nvp_controllers': 'ctrl1,ctrl2', - 'nvp_controllers_list': ['ctrl1', 'ctrl2'], - 'nvp_l3_uuid': 'l3uuid', - 'nvp_password': 'hardpass', - 'nvp_tz_uuid': 'tzuuid', - 'nvp_username': 'bob', + 'nsx_controllers': 'ctrl1,ctrl2', + 'nsx_controllers_list': ['ctrl1', 'ctrl2'], + 'nsx_l3_uuid': 'l3uuid', + 'nsx_password': 'hardpass', + 'nsx_tz_uuid': 'tzuuid', + 'nsx_username': 'bob', } for key in expect.iterkeys(): self.assertEquals(napi_ctxt[key], expect[key]) From 30bbb399206dd2d4f7d177495d1eab40968b0d20 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 16 Oct 2014 12:54:45 +0000 Subject: [PATCH 069/125] Stamp neutron database before upgrade. --- hooks/neutron_api_hooks.py | 2 +- hooks/neutron_api_utils.py | 22 +++++++++++++-- unit_tests/test_neutron_api_utils.py | 40 +++++++++++++++++++--------- 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index c1f39c2f..1f113cb0 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -165,7 +165,7 @@ def conditional_neutron_migration(): if not relation_ids('neutron-api'): log('Not running neutron database migration, no nova-cloud-controller' 'is present.') - elif os_release('nova-common') <= 'icehouse': + elif os_release('neutron-server') <= 'icehouse': log('Not running neutron database migration as migrations are handled' 'by the neutron-server process.') else: diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index a11f2d07..e7f096e7 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -159,7 +159,7 @@ def resource_map(): def register_configs(release=None): - release = release or os_release('nova-common') + release = release or os_release('neutron-server') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) for cfg, rscs in resource_map().iteritems(): @@ -189,6 +189,7 @@ def do_openstack_upgrade(configs): :param configs: The charms main OSConfigRenderer object. """ + cur_os_rel = os_release('neutron-server') new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) @@ -210,11 +211,28 @@ def do_openstack_upgrade(configs): # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) + + if cur_os_rel == 'icehouse': + stamp_neutron_database('icehouse') migrate_neutron_database() +def stamp_neutron_database(release): + '''Stamp the database with the current release before upgrade.''' + log('Stamping the neutron database with release %s.' % release) + plugin = config('neutron-plugin') + cmd = ['neutron-db-manage', + '--config-file', NEUTRON_CONF, + '--config-file', neutron_plugin_attribute(plugin, + 'config', + 'neutron'), + 'stamp', + release] + subprocess.check_output(cmd) + + def migrate_neutron_database(): - '''Runs neutron-db-manage to init a new database or migrate existing''' + '''Initializes a new database or upgrades an existing database.''' log('Migrating the neutron database.') plugin = config('neutron-plugin') cmd = ['neutron-db-manage', diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index 827d6a75..5f68a9b6 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -146,29 +146,45 @@ class TestNeutronAPIUtils(CharmTestCase): nutils.keystone_ca_cert_b64() self.assertTrue(self.b64encode.called) - def test_do_openstack_upgrade(self): + @patch.object(nutils, 'migrate_neutron_database') + @patch.object(nutils, 'stamp_neutron_database') + def test_do_openstack_upgrade(self, stamp_neutron_db, migrate_neutron_db): self.config.side_effect = self.test_config.get - self.test_config.set('openstack-origin', 'cloud:precise-havana') - self.get_os_codename_install_source.return_value = 'havana' + self.test_config.set('openstack-origin', 'cloud:trusty-juno') + self.os_release.side_effect = 'icehouse' + self.get_os_codename_install_source.return_value = 'juno' configs = MagicMock() nutils.do_openstack_upgrade(configs) - configs.set_release.assert_called_with(openstack_release='havana') + self.os_release.assert_called_with('neutron-server') self.log.assert_called() + self.configure_installation_source.assert_called_with( + 'cloud:trusty-juno' + ) self.apt_update.assert_called_with(fatal=True) dpkg_opts = [ '--option', 'Dpkg::Options::=--force-confnew', '--option', 'Dpkg::Options::=--force-confdef', ] + self.apt_upgrade.assert_called_with(options=dpkg_opts, + fatal=True, + dist=True) pkgs = nutils.BASE_PACKAGES pkgs.sort() - self.apt_install.assert_called_with( - options=dpkg_opts, - packages=pkgs, - fatal=True - ) - self.configure_installation_source.assert_called_with( - 'cloud:precise-havana' - ) + self.apt_install.assert_called_with(packages=pkgs, + options=dpkg_opts, + fatal=True) + configs.set_release.assert_called_with(openstack_release='juno') + stamp_neutron_db.assert_called() + migrate_neutron_db.assert_called() + + def test_stamp_neutron_database(self): + nutils.stamp_neutron_database('icehouse') + cmd = ['neutron-db-manage', + '--config-file', '/etc/neutron/neutron.conf', + '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'stamp', + 'icehouse'] + self.subprocess.check_output.assert_called_with(cmd) def test_migrate_neutron_database(self): nutils.migrate_neutron_database() From 73317cdf388805dd3f6690f5c6d1e91a434f0341 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 20 Oct 2014 07:43:06 +0000 Subject: [PATCH 070/125] Fix db migration running when db relation has not been setup --- hooks/neutron_api_hooks.py | 3 ++- unit_tests/test_neutron_api_hooks.py | 9 +++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index c1f39c2f..3ccc8d51 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -294,7 +294,8 @@ def neutron_api_relation_joined(rid=None): @restart_on_change(restart_map()) def neutron_api_relation_changed(): CONFIGS.write(NEUTRON_CONF) - conditional_neutron_migration() + if 'shared-db' in CONFIGS.complete_contexts(): + conditional_neutron_migration() @hooks.hook('neutron-plugin-api-relation-joined') diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index b4d40ce2..fe7258a1 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -271,10 +271,19 @@ class NeutronAPIHooksTests(CharmTestCase): @patch.object(hooks, 'conditional_neutron_migration') def test_neutron_api_relation_changed(self, cond_neutron_mig): + self.CONFIGS.complete_contexts.return_value = ['shared-db'] self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) cond_neutron_mig.assert_called_with() + @patch.object(hooks, 'conditional_neutron_migration') + def test_neutron_api_relation_changed_incomplere_ctxt(self, + cond_neutron_mig): + self.CONFIGS.complete_contexts.return_value = [] + self._call_hook('neutron-api-relation-changed') + self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) + self.assertFalse(cond_neutron_mig.called) + def test_neutron_plugin_api_relation_joined_nol2(self): _relation_data = { 'neutron-security-groups': False, From f9012c76a056116b8316948fa126d37832aa02cc Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 20 Oct 2014 11:31:51 +0100 Subject: [PATCH 071/125] Optimize hook links --- hooks/neutron-plugin-api-relation-broken | 1 - hooks/neutron-plugin-api-relation-changed | 1 - hooks/neutron-plugin-api-relation-departed | 1 - 3 files changed, 3 deletions(-) delete mode 120000 hooks/neutron-plugin-api-relation-broken delete mode 120000 hooks/neutron-plugin-api-relation-changed delete mode 120000 hooks/neutron-plugin-api-relation-departed diff --git a/hooks/neutron-plugin-api-relation-broken b/hooks/neutron-plugin-api-relation-broken deleted file mode 120000 index 1fb10fd5..00000000 --- a/hooks/neutron-plugin-api-relation-broken +++ /dev/null @@ -1 +0,0 @@ -neutron_api_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-changed b/hooks/neutron-plugin-api-relation-changed deleted file mode 120000 index 1fb10fd5..00000000 --- a/hooks/neutron-plugin-api-relation-changed +++ /dev/null @@ -1 +0,0 @@ -neutron_api_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-departed b/hooks/neutron-plugin-api-relation-departed deleted file mode 120000 index 1fb10fd5..00000000 --- a/hooks/neutron-plugin-api-relation-departed +++ /dev/null @@ -1 +0,0 @@ -neutron_api_hooks.py \ No newline at end of file From f356970aebaa150de60a1f2f47f09b2d77478441 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 21 Oct 2014 13:07:03 +0000 Subject: [PATCH 072/125] Remove code to run neutron-db-manage from neutron-api as it's racey with the nova-cc --- hooks/neutron_api_hooks.py | 36 ------------- hooks/neutron_api_utils.py | 34 ------------- unit_tests/test_neutron_api_hooks.py | 76 ++-------------------------- unit_tests/test_neutron_api_utils.py | 26 +--------- 4 files changed, 5 insertions(+), 167 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 4a8effd5..8e3cc5ab 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -20,7 +20,6 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.host import ( restart_on_change, - service_restart, ) from charmhelpers.fetch import ( @@ -32,7 +31,6 @@ from charmhelpers.fetch import ( from charmhelpers.contrib.openstack.utils import ( configure_installation_source, openstack_upgrade_available, - os_release, sync_db_with_multi_ipv6_addresses ) from charmhelpers.contrib.openstack.neutron import ( @@ -40,13 +38,11 @@ from charmhelpers.contrib.openstack.neutron import ( ) from neutron_api_utils import ( - CLUSTER_RES, NEUTRON_CONF, api_port, determine_packages, determine_ports, do_openstack_upgrade, - migrate_neutron_database, register_configs, restart_map, setup_ipv6 @@ -58,7 +54,6 @@ from neutron_api_context import ( from charmhelpers.contrib.hahelpers.cluster import ( get_hacluster_config, - is_leader, ) from charmhelpers.payload.execd import execd_preinstall @@ -153,33 +148,6 @@ def amqp_changed(): CONFIGS.write(NEUTRON_CONF) -def conditional_neutron_migration(): - # This is an attempt to stop a race over the db migration between nova-cc - # and neutron-api by having the migration master decided by the presence - # of the neutron-api relation. In the long term this should only be done - # the neutron-api charm and nova-cc should play no hand in it - # * neutron-api refuses to run migrations until neutron-api relation is - # present - # * nova-cc refuses to run migration if neutron-api relations is present - clustered = relation_get('clustered') - if not relation_ids('neutron-api'): - log('Not running neutron database migration, no nova-cloud-controller' - 'is present.') - elif os_release('neutron-server') <= 'icehouse': - log('Not running neutron database migration as migrations are handled' - 'by the neutron-server process.') - else: - if clustered: - if is_leader(CLUSTER_RES): - migrate_neutron_database() - service_restart('neutron-server') - else: - log('Not running neutron database migration, not leader') - else: - migrate_neutron_database() - service_restart('neutron-server') - - @hooks.hook('shared-db-relation-joined') def db_joined(): if is_relation_made('pgsql-db'): @@ -218,7 +186,6 @@ def db_changed(): log('shared-db relation incomplete. Peer not ready?') return CONFIGS.write_all() - conditional_neutron_migration() @hooks.hook('pgsql-db-relation-changed') @@ -227,7 +194,6 @@ def postgresql_neutron_db_changed(): plugin = config('neutron-plugin') # DB config might have been moved to main neutron.conf in H? CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) - conditional_neutron_migration() @hooks.hook('amqp-relation-broken', @@ -294,8 +260,6 @@ def neutron_api_relation_joined(rid=None): @restart_on_change(restart_map()) def neutron_api_relation_changed(): CONFIGS.write(NEUTRON_CONF) - if 'shared-db' in CONFIGS.complete_contexts(): - conditional_neutron_migration() @hooks.hook('neutron-plugin-api-relation-joined') diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index e7f096e7..4179440a 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -30,7 +30,6 @@ from charmhelpers.core.host import ( ) import neutron_api_context -import subprocess TEMPLATES = 'templates/' @@ -189,7 +188,6 @@ def do_openstack_upgrade(configs): :param configs: The charms main OSConfigRenderer object. """ - cur_os_rel = os_release('neutron-server') new_src = config('openstack-origin') new_os_rel = get_os_codename_install_source(new_src) @@ -212,38 +210,6 @@ def do_openstack_upgrade(configs): # set CONFIGS to load templates from new release configs.set_release(openstack_release=new_os_rel) - if cur_os_rel == 'icehouse': - stamp_neutron_database('icehouse') - migrate_neutron_database() - - -def stamp_neutron_database(release): - '''Stamp the database with the current release before upgrade.''' - log('Stamping the neutron database with release %s.' % release) - plugin = config('neutron-plugin') - cmd = ['neutron-db-manage', - '--config-file', NEUTRON_CONF, - '--config-file', neutron_plugin_attribute(plugin, - 'config', - 'neutron'), - 'stamp', - release] - subprocess.check_output(cmd) - - -def migrate_neutron_database(): - '''Initializes a new database or upgrades an existing database.''' - log('Migrating the neutron database.') - plugin = config('neutron-plugin') - cmd = ['neutron-db-manage', - '--config-file', NEUTRON_CONF, - '--config-file', neutron_plugin_attribute(plugin, - 'config', - 'neutron'), - 'upgrade', - 'head'] - subprocess.check_output(cmd) - def setup_ipv6(): ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower() diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index fe7258a1..747d66a0 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -41,7 +41,6 @@ TO_PATCH = [ 'neutron_plugin_attribute', 'open_port', 'openstack_upgrade_available', - 'os_release', 'relation_get', 'relation_ids', 'relation_set', @@ -49,9 +48,6 @@ TO_PATCH = [ 'get_iface_for_address', 'get_netmask_for_address', 'get_address_in_network', - 'migrate_neutron_database', - 'service_restart', - 'is_leader', ] NEUTRON_CONF_DIR = "/etc/neutron" @@ -167,23 +163,19 @@ class NeutronAPIHooksTests(CharmTestCase): 'Attempting to associate a postgresql database when' ' there is already associated a mysql one') - @patch.object(hooks, 'conditional_neutron_migration') - def test_shared_db_changed(self, cond_neutron_mig): + def test_shared_db_changed(self): self.CONFIGS.complete_contexts.return_value = ['shared-db'] self._call_hook('shared-db-relation-changed') self.assertTrue(self.CONFIGS.write_all.called) - cond_neutron_mig.assert_called_with() def test_shared_db_changed_partial_ctxt(self): self.CONFIGS.complete_contexts.return_value = [] self._call_hook('shared-db-relation-changed') self.assertFalse(self.CONFIGS.write_all.called) - @patch.object(hooks, 'conditional_neutron_migration') - def test_pgsql_db_changed(self, cond_neutron_mig): + def test_pgsql_db_changed(self): self._call_hook('pgsql-db-relation-changed') self.assertTrue(self.CONFIGS.write.called) - cond_neutron_mig.assert_called_with() def test_amqp_broken(self): self._call_hook('amqp-relation-broken') @@ -269,20 +261,15 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) - @patch.object(hooks, 'conditional_neutron_migration') - def test_neutron_api_relation_changed(self, cond_neutron_mig): + def test_neutron_api_relation_changed(self): self.CONFIGS.complete_contexts.return_value = ['shared-db'] self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - cond_neutron_mig.assert_called_with() - @patch.object(hooks, 'conditional_neutron_migration') - def test_neutron_api_relation_changed_incomplere_ctxt(self, - cond_neutron_mig): + def test_neutron_api_relation_changed_incomplere_ctxt(self): self.CONFIGS.complete_contexts.return_value = [] self._call_hook('neutron-api-relation-changed') self.assertTrue(self.CONFIGS.write.called_with(NEUTRON_CONF)) - self.assertFalse(cond_neutron_mig.called) def test_neutron_plugin_api_relation_joined_nol2(self): _relation_data = { @@ -410,58 +397,3 @@ class NeutronAPIHooksTests(CharmTestCase): self.check_call.assert_called_with(['a2dissite', 'openstack_https_frontend']) self.assertTrue(_id_rel_joined.called) - - def test_conditional_neutron_migration_no_ncc_rel(self): - self.test_relation.set({ - 'clustered': 'false', - }) - self.relation_ids.return_value = [] - hooks.conditional_neutron_migration() - self.log.assert_called_with( - 'Not running neutron database migration, no nova-cloud-controller' - 'is present.' - ) - - def test_conditional_neutron_migration_icehouse(self): - self.test_relation.set({ - 'clustered': 'false', - }) - self.os_release.return_value = 'icehouse' - hooks.conditional_neutron_migration() - self.log.assert_called_with( - 'Not running neutron database migration as migrations are handled' - 'by the neutron-server process.' - ) - - def test_conditional_neutron_migration_ncc_rel_leader(self): - self.test_relation.set({ - 'clustered': 'true', - }) - self.is_leader.return_value = True - self.os_release.return_value = 'juno' - hooks.conditional_neutron_migration() - self.migrate_neutron_database.assert_called_with() - self.service_restart.assert_called_with('neutron-server') - - def test_conditional_neutron_migration_ncc_rel_notleader(self): - self.test_relation.set({ - 'clustered': 'true', - }) - self.is_leader.return_value = False - self.os_release.return_value = 'juno' - hooks.conditional_neutron_migration() - self.assertFalse(self.migrate_neutron_database.called) - self.assertFalse(self.service_restart.called) - self.log.assert_called_with( - 'Not running neutron database migration, not leader' - ) - - def test_conditional_neutron_migration_not_clustered(self): - self.test_relation.set({ - 'clustered': 'false', - }) - self.relation_ids.return_value = ['nova-cc/o'] - self.os_release.return_value = 'juno' - hooks.conditional_neutron_migration() - self.migrate_neutron_database.assert_called_with() - self.service_restart.assert_called_with('neutron-server') diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index 5f68a9b6..b3314862 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -28,7 +28,6 @@ TO_PATCH = [ 'log', 'neutron_plugin_attribute', 'os_release', - 'subprocess', ] @@ -146,16 +145,13 @@ class TestNeutronAPIUtils(CharmTestCase): nutils.keystone_ca_cert_b64() self.assertTrue(self.b64encode.called) - @patch.object(nutils, 'migrate_neutron_database') - @patch.object(nutils, 'stamp_neutron_database') - def test_do_openstack_upgrade(self, stamp_neutron_db, migrate_neutron_db): + def test_do_openstack_upgrade(self): self.config.side_effect = self.test_config.get self.test_config.set('openstack-origin', 'cloud:trusty-juno') self.os_release.side_effect = 'icehouse' self.get_os_codename_install_source.return_value = 'juno' configs = MagicMock() nutils.do_openstack_upgrade(configs) - self.os_release.assert_called_with('neutron-server') self.log.assert_called() self.configure_installation_source.assert_called_with( 'cloud:trusty-juno' @@ -174,23 +170,3 @@ class TestNeutronAPIUtils(CharmTestCase): options=dpkg_opts, fatal=True) configs.set_release.assert_called_with(openstack_release='juno') - stamp_neutron_db.assert_called() - migrate_neutron_db.assert_called() - - def test_stamp_neutron_database(self): - nutils.stamp_neutron_database('icehouse') - cmd = ['neutron-db-manage', - '--config-file', '/etc/neutron/neutron.conf', - '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'stamp', - 'icehouse'] - self.subprocess.check_output.assert_called_with(cmd) - - def test_migrate_neutron_database(self): - nutils.migrate_neutron_database() - cmd = ['neutron-db-manage', - '--config-file', '/etc/neutron/neutron.conf', - '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'upgrade', - 'head'] - self.subprocess.check_output.assert_called_with(cmd) From b6ff05ddfe2fd8a09348dee2ce3388aa3942900b Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 22 Oct 2014 17:31:29 +0100 Subject: [PATCH 073/125] Tweak rpc workers and max_pool_size for scaling --- templates/icehouse/neutron.conf | 1 + templates/parts/section-database | 1 + 2 files changed, 2 insertions(+) diff --git a/templates/icehouse/neutron.conf b/templates/icehouse/neutron.conf index 34433ce8..519a6278 100644 --- a/templates/icehouse/neutron.conf +++ b/templates/icehouse/neutron.conf @@ -13,6 +13,7 @@ bind_host = {{ bind_host }} auth_strategy = keystone notification_driver = neutron.openstack.common.notifier.rpc_notifier api_workers = {{ workers }} +rpc_workers = {{ workers }} {% if neutron_bind_port -%} bind_port = {{ neutron_bind_port }} diff --git a/templates/parts/section-database b/templates/parts/section-database index ae4a5ba0..be0071fa 100644 --- a/templates/parts/section-database +++ b/templates/parts/section-database @@ -1,4 +1,5 @@ {% if database_host -%} [database] connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} +max_pool_size = {{ workers * 4 }} {% endif -%} From 6e03a8c2477124356f51397d184759ddded123c3 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 29 Oct 2014 22:30:36 -0500 Subject: [PATCH 074/125] [bradm] initial nrpe checks --- config.yaml | 10 + files/nrpe-external-master/check_upstart_job | 72 ++++++ .../contrib/charmsupport/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 218 ++++++++++++++++++ .../contrib/charmsupport/volumes.py | 156 +++++++++++++ hooks/neutron_api_hooks.py | 17 ++ hooks/nrpe-external-master-relation-changed | 1 + hooks/nrpe-external-master-relation-joined | 1 + metadata.yaml | 3 + 9 files changed, 478 insertions(+) create mode 100755 files/nrpe-external-master/check_upstart_job create mode 100644 hooks/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 120000 hooks/nrpe-external-master-relation-changed create mode 120000 hooks/nrpe-external-master-relation-joined diff --git a/config.yaml b/config.yaml index b26759a5..cf19920c 100644 --- a/config.yaml +++ b/config.yaml @@ -191,3 +191,13 @@ options: description: | This is uuid of the default NSX L3 Gateway Service. # end of NSX configuration + nagios_context: + default: "juju" + type: string + description: | + Used by the nrpe-external-master subordinate charm. + A string that will be prepended to instance name to set the host name + in nagios. So for instance the hostname would be something like: + juju-myservice-0 + If you're running multiple environments with the same services in them + this allows you to differentiate between them. diff --git a/files/nrpe-external-master/check_upstart_job b/files/nrpe-external-master/check_upstart_job new file mode 100755 index 00000000..94efb95e --- /dev/null +++ b/files/nrpe-external-master/check_upstart_job @@ -0,0 +1,72 @@ +#!/usr/bin/python + +# +# Copyright 2012, 2013 Canonical Ltd. +# +# Author: Paul Collins +# +# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html +# + +import sys + +import dbus + + +class Upstart(object): + def __init__(self): + self._bus = dbus.SystemBus() + self._upstart = self._bus.get_object('com.ubuntu.Upstart', + '/com/ubuntu/Upstart') + def get_job(self, job_name): + path = self._upstart.GetJobByName(job_name, + dbus_interface='com.ubuntu.Upstart0_6') + return self._bus.get_object('com.ubuntu.Upstart', path) + + def get_properties(self, job): + path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') + instance = self._bus.get_object('com.ubuntu.Upstart', path) + return instance.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + + def get_job_instances(self, job_name): + job = self.get_job(job_name) + paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') + return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] + + def get_job_instance_properties(self, job): + return job.GetAll('com.ubuntu.Upstart0_6.Instance', + dbus_interface=dbus.PROPERTIES_IFACE) + +try: + upstart = Upstart() + try: + job = upstart.get_job(sys.argv[1]) + props = upstart.get_properties(job) + + if props['state'] == 'running': + print 'OK: %s is running' % sys.argv[1] + sys.exit(0) + else: + print 'CRITICAL: %s is not running' % sys.argv[1] + sys.exit(2) + + except dbus.DBusException as e: + instances = upstart.get_job_instances(sys.argv[1]) + propses = [upstart.get_job_instance_properties(instance) for instance in instances] + states = dict([(props['name'], props['state']) for props in propses]) + if len(states) != states.values().count('running'): + not_running = [] + for name in states.keys(): + if states[name] != 'running': + not_running.append(name) + print 'CRITICAL: %d instances of %s not running: %s' % \ + (len(not_running), sys.argv[1], not_running.join(', ')) + sys.exit(2) + else: + print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) + +except dbus.DBusException as e: + print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] + sys.exit(2) + diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..f3bfe3f3 --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,218 @@ +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/', + os.path.join(os.environ['CHARM_DIR'], + 'files/nrpe-external-master'), + '/usr/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname) + + def write_service_config(self, nagios_context, hostname): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_context, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + self.unit_name = local_unit().replace('/', '-') + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + for rid in relation_ids("local-monitors"): + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..0f905dff --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,156 @@ +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas: + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage: + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8e3cc5ab..25818b3e 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -73,6 +73,8 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.context import ADDRESS_TYPES +from charmhelpers.contrib.charmsupport.nrpe import NRPE + hooks = Hooks() CONFIGS = register_configs() @@ -120,6 +122,7 @@ def config_changed(): if openstack_upgrade_available('neutron-server'): do_openstack_upgrade(CONFIGS) configure_https() + update_nrpe_config() CONFIGS.write_all() for r_id in relation_ids('neutron-api'): neutron_api_relation_joined(rid=r_id) @@ -369,6 +372,20 @@ def ha_changed(): neutron_api_relation_joined(rid=rid) +@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +def update_nrpe_config(): + nrpe = NRPE() + apt_install('python-dbus') + + nrpe.add_check( + shortname='neutron-api', + description='neutron-server process', + check_cmd = 'check_upstart_job neutron-server', + ) + + nrpe.write() + + def main(): try: hooks.execute(sys.argv) diff --git a/hooks/nrpe-external-master-relation-changed b/hooks/nrpe-external-master-relation-changed new file mode 120000 index 00000000..1fb10fd5 --- /dev/null +++ b/hooks/nrpe-external-master-relation-changed @@ -0,0 +1 @@ +neutron_api_hooks.py \ No newline at end of file diff --git a/hooks/nrpe-external-master-relation-joined b/hooks/nrpe-external-master-relation-joined new file mode 120000 index 00000000..1fb10fd5 --- /dev/null +++ b/hooks/nrpe-external-master-relation-joined @@ -0,0 +1 @@ +neutron_api_hooks.py \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index 4e3c7bcf..773a75ad 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -15,6 +15,9 @@ description: | categories: - openstack provides: + nrpe-external-master: + interface: nrpe-external-master + scope: container neutron-api: interface: neutron-api neutron-plugin-api: From ae4948df1fc8d68739f0ae81b9cd19ec783c008a Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 30 Oct 2014 16:02:56 +1000 Subject: [PATCH 075/125] [bradm] Added charmsupport to charmhelpers --- charm-helpers-sync.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..077973b4 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -9,3 +9,4 @@ include: - contrib.storage.linux - payload.execd - contrib.network.ip + - contrib.charmsupport From dbe897e5b38b9f56dd412b381bc963f0ef4e6608 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 31 Oct 2014 14:51:44 +1000 Subject: [PATCH 076/125] [bradm] Added support to get nagios hostname from nrpe relation --- hooks/charmhelpers/contrib/charmsupport/nrpe.py | 8 ++++++-- hooks/neutron_api_hooks.py | 9 ++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3bfe3f3..51b62d39 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -129,6 +129,7 @@ define service {{ os.path.join(os.environ['CHARM_DIR'], 'files/nrpe-external-master'), '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', ) parts = shlex.split(check_cmd) for path in search_path: @@ -181,12 +182,15 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self): + def __init__(self, hostname=None): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] self.unit_name = local_unit().replace('/', '-') - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] def add_check(self, *args, **kwargs): diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 25818b3e..8d091303 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -14,6 +14,7 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, relation_set, + relations_of_type, open_port, unit_get, ) @@ -374,7 +375,13 @@ def ha_changed(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): - nrpe = NRPE() + # Find out if nrpe set nagios_hostname + hostname = None + for rel in relations_of_type('nrpe-external-master'): + if 'nagios_hostname' in rel: + hostname = rel['nagios_hostname'] + break + nrpe = NRPE(hostname=hostname) apt_install('python-dbus') nrpe.add_check( From a7ea314ea6bc0f38790e9c49f0b50a18b32eb8bf Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 4 Nov 2014 17:15:45 +1000 Subject: [PATCH 077/125] [bradm] Tweaked check to include host context and unit name --- hooks/neutron_api_hooks.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8d091303..20a50cf9 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( relations_of_type, open_port, unit_get, + local_unit, ) from charmhelpers.core.host import ( @@ -380,13 +381,16 @@ def update_nrpe_config(): for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] + host_context = rel['nagios_host_context'] break nrpe = NRPE(hostname=hostname) apt_install('python-dbus') - + + current_unit = "%s:%s" % (host_context, local_unit()) + nrpe.add_check( - shortname='neutron-api', - description='neutron-server process', + shortname='neutron-server', + description='process check {%s}' % current_unit, check_cmd = 'check_upstart_job neutron-server', ) From 49fe0d4a6484d7be1606637ee9611c7ecf52f716 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 6 Nov 2014 17:31:51 +1000 Subject: [PATCH 078/125] [bradm] Check if host_context is defined before using it --- hooks/neutron_api_hooks.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 20a50cf9..a24150b3 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -378,6 +378,7 @@ def ha_changed(): def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None + host_context = None for rel in relations_of_type('nrpe-external-master'): if 'nagios_hostname' in rel: hostname = rel['nagios_hostname'] @@ -386,7 +387,10 @@ def update_nrpe_config(): nrpe = NRPE(hostname=hostname) apt_install('python-dbus') - current_unit = "%s:%s" % (host_context, local_unit()) + if host_context: + current_unit = "%s:%s" % (host_context, local_unit()) + else: + current_unit = local_unit() nrpe.add_check( shortname='neutron-server', From a134754fc531eaa43e3cb9326aa134b9ce9891a5 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 12 Nov 2014 09:32:16 +0000 Subject: [PATCH 079/125] Provide fallback options for HA VIP if iface and netmask cannot be automatically detected --- config.yaml | 12 +++++++++++ hooks/neutron_api_hooks.py | 8 +++++-- unit_tests/test_neutron_api_hooks.py | 32 ++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/config.yaml b/config.yaml index b26759a5..c709cb80 100644 --- a/config.yaml +++ b/config.yaml @@ -75,6 +75,18 @@ options: . If multiple networks are being used, a VIP should be provided for each network, separated by spaces. + vip_iface: + type: string + default: eth0 + description: | + Default network interface to use for HA vip when it cannot be automatically + determined. + vip_cidr: + type: int + default: 24 + description: | + Default CIDR netmask to use for HA vip when it cannot be automatically + determined. ha-bindiface: type: string default: eth0 diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8e3cc5ab..ba73bbb6 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -324,7 +324,11 @@ def ha_joined(): res_neutron_vip = 'ocf:heartbeat:IPaddr2' vip_params = 'ip' - iface = get_iface_for_address(vip) + iface = (get_iface_for_address(vip) or + config('vip_iface')) + netmask = (get_netmask_for_address(vip) or + config('vip_cidr')) + if iface is not None: vip_key = 'res_neutron_{}_vip'.format(iface) resources[vip_key] = res_neutron_vip @@ -333,7 +337,7 @@ def ha_joined(): 'nic="{iface}"'.format(ip=vip_params, vip=vip, iface=iface, - netmask=get_netmask_for_address(vip)) + netmask=netmask) ) vip_group.append(vip_key) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 747d66a0..9b565ecb 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -322,6 +322,38 @@ class NeutronAPIHooksTests(CharmTestCase): **_relation_data ) + @patch.object(hooks, 'get_hacluster_config') + def test_ha_joined_no_bound_ip(self, _get_ha_config): + _ha_config = { + 'vip': '10.0.0.1', + 'ha-bindiface': 'eth1', + 'ha-mcastport': '5405', + } + vip_params = 'params ip="10.0.0.1" cidr_netmask="21" nic="eth120"' + _get_ha_config.return_value = _ha_config + self.test_config.set('vip_iface', 'eth120') + self.test_config.set('vip_cidr', '21') + self.get_iface_for_address.return_value = None + self.get_netmask_for_address.return_value = None + _relation_data = { + 'init_services': {'res_neutron_haproxy': 'haproxy'}, + 'corosync_bindiface': _ha_config['ha-bindiface'], + 'corosync_mcastport': _ha_config['ha-mcastport'], + 'resources': { + 'res_neutron_eth120_vip': 'ocf:heartbeat:IPaddr2', + 'res_neutron_haproxy': 'lsb:haproxy' + }, + 'resource_params': { + 'res_neutron_eth120_vip': vip_params, + 'res_neutron_haproxy': 'op monitor interval="5s"' + }, + 'clones': {'cl_nova_haproxy': 'res_neutron_haproxy'} + } + self._call_hook('ha-relation-joined') + self.relation_set.assert_called_with( + **_relation_data + ) + @patch.object(hooks, 'get_hacluster_config') def test_ha_joined_with_ipv6(self, _get_ha_config): self.test_config.set('prefer-ipv6', 'True') From 80c7588da54c0b8eb196c01ce1e4bafa93e51d56 Mon Sep 17 00:00:00 2001 From: James Page Date: Sun, 16 Nov 2014 08:38:32 -0600 Subject: [PATCH 080/125] Resync helpers, fixup unit test --- charm-helpers-sync.yaml | 2 +- hooks/charmhelpers/contrib/network/ip.py | 2 - .../charmhelpers/contrib/openstack/context.py | 192 +++++++++++++----- .../charmhelpers/contrib/openstack/neutron.py | 17 +- .../contrib/openstack/templates/haproxy.cfg | 3 +- hooks/charmhelpers/contrib/openstack/utils.py | 24 +++ .../contrib/storage/linux/ceph.py | 7 +- hooks/charmhelpers/core/hookenv.py | 6 + hooks/charmhelpers/core/host.py | 10 +- hooks/charmhelpers/core/services/__init__.py | 4 +- hooks/charmhelpers/fetch/__init__.py | 6 +- hooks/charmhelpers/fetch/giturl.py | 44 ++++ unit_tests/test_neutron_api_context.py | 1 + 13 files changed, 257 insertions(+), 61 deletions(-) create mode 100644 hooks/charmhelpers/fetch/giturl.py diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..d2fc0229 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~james-page/charm-helpers/lp.1391784 destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index e62e5655..c4bfeadb 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -8,7 +8,6 @@ from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, ERROR, log ) @@ -175,7 +174,6 @@ def format_ipv6_addr(address): if is_ipv6(address): address = "[%s]" % address else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) address = None return address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 538dc913..aaadb790 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -15,6 +15,7 @@ from charmhelpers.fetch import ( from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, @@ -24,7 +25,7 @@ from charmhelpers.core.hookenv import ( unit_get, unit_private_ip, ERROR, - INFO + DEBUG ) from charmhelpers.core.host import ( @@ -57,8 +58,9 @@ from charmhelpers.contrib.network.ip import ( is_address_in_network ) -from charmhelpers.contrib.openstack.utils import get_host_ip - +from charmhelpers.contrib.openstack.utils import ( + get_host_ip, +) CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' @@ -456,26 +458,27 @@ class HAProxyContext(OSContextGenerator): if _laddr: cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) no split configurations found, just use - # private addresses - if not cluster_hosts: - cluster_hosts[addr] = {} - cluster_hosts[addr]['network'] = "{}/{}".format( - addr, - get_netmask_for_address(addr) - ) - cluster_hosts[addr]['backends'] = {} - cluster_hosts[addr]['backends'][l_unit] = addr - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _unit = unit.replace('/', '-') - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - cluster_hosts[addr]['backends'][_unit] = _laddr + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + cluster_hosts[addr]['network'] = "{}/{}".format( + addr, + get_netmask_for_address(addr) + ) + cluster_hosts[addr]['backends'] = {} + cluster_hosts[addr]['backends'][l_unit] = addr + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + cluster_hosts[addr]['backends'][_unit] = _laddr ctxt = { 'frontends': cluster_hosts, + 'default_backend': addr } if config('haproxy-server-timeout'): @@ -584,6 +587,49 @@ class ApacheSSLContext(OSContextGenerator): cns.append(k.lstrip('ssl_key_')) return list(set(cns)) + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + vips = [] + if config('vip'): + vips = config('vip').split() + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level='WARNING') + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return addresses + def __call__(self): if isinstance(self.external_ports, basestring): self.external_ports = [self.external_ports] @@ -602,27 +648,7 @@ class ApacheSSLContext(OSContextGenerator): for cn in self.canonical_names(): self.configure_cert(cn) - addresses = [] - vips = [] - if config('vip'): - vips = config('vip').split() - - for network_type in ['os-internal-network', - 'os-admin-network', - 'os-public-network']: - address = get_address_in_network(config(network_type), - unit_get('private-address')) - if len(vips) > 0 and is_clustered(): - for vip in vips: - if is_address_in_network(config(network_type), - vip): - addresses.append((address, vip)) - break - elif is_clustered(): - addresses.append((address, config('vip'))) - else: - addresses.append((address, address)) - + addresses = self.get_network_addresses() for address, endpoint in set(addresses): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) @@ -700,6 +726,7 @@ class NeutronContext(OSContextGenerator): self.network_manager) n1kv_config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') n1kv_ctxt = { 'core_plugin': driver, 'neutron_plugin': 'n1kv', @@ -710,11 +737,29 @@ class NeutronContext(OSContextGenerator): 'vsm_username': config('n1kv-vsm-username'), 'vsm_password': config('n1kv-vsm-password'), 'restrict_policy_profiles': config( - 'n1kv_restrict_policy_profiles'), + 'n1kv-restrict-policy-profiles'), } + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags return n1kv_ctxt + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = { + 'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config + } + + return calico_ctxt + def neutron_ctxt(self): if https(): proto = 'https' @@ -748,6 +793,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.nvp_ctxt()) elif self.plugin == 'n1kv': ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -761,21 +808,39 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): """ - Responsible for adding user-defined config-flags in charm config to a - template context. + Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. NOTE: the value of config-flags may be a comma-separated list of key=value pairs and some Openstack config files support comma-separated lists as values. """ + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + charm_flag: config flags in charm configuration. + template_flag: insert point for user-defined flags template file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + def __call__(self): - config_flags = config('config-flags') + config_flags = config(self._charm_flag) if not config_flags: return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + return {self._template_flag: + config_flags_parser(config_flags)} class SubordinateConfigContext(OSContextGenerator): @@ -867,7 +932,7 @@ class SubordinateConfigContext(OSContextGenerator): else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -922,3 +987,34 @@ class WorkerConfigContext(OSContextGenerator): "workers": self.num_cpus * multiplier } return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'): + """ + :param zmq_relation : Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = { + 'notifications': 'False', + } + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 84d97bca..b2a2dfe0 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -138,10 +138,25 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['neutron-plugin-cisco']], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', 'bird', 'neutron-dhcp-agent']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 19c9b856..daaee011 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -42,7 +42,8 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor %} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b0d1b03a..ae24fb91 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,6 +2,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess import json @@ -468,6 +469,14 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) @@ -484,3 +493,18 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a4..598ec263 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -113,7 +113,7 @@ def get_osds(service): return None -def create_pool(service, name, replicas=2): +def create_pool(service, name, replicas=3): ''' Create a new RADOS pool ''' if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), @@ -300,7 +300,8 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): + blk_device, fstype, system_services=[], + replicas=3): """ NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. @@ -317,7 +318,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): log('ceph: Creating RBD image ({}).'.format(rbd_img)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index af8fe2db..083a7090 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -214,6 +214,12 @@ class Config(dict): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + dict.keys(self))) + def load_previous(self, path=None): """Load previous copy of config from disk. diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d7ce1e4c..0b8bdc50 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,13 +6,13 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager from collections import OrderedDict @@ -317,7 +317,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 32a673d6..2398e8ed 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -72,6 +72,7 @@ CLOUD_ARCHIVE_POCKETS = { FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -218,6 +219,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,8 +253,10 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..7d672460 --- /dev/null +++ b/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,44 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + #TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 3a43e5ee..b0ad2907 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -113,6 +113,7 @@ class HAProxyContextTest(CharmTestCase): 'backends': unit_addresses, } }, + 'default_backend': '10.10.10.11', 'service_ports': service_ports, 'neutron_bind_port': 9686, } From 1fcf029ce7147ad9f5c07abfa089460f72dbc111 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Mon, 17 Nov 2014 13:42:03 +1000 Subject: [PATCH 081/125] [bradm] Added nrpe checks for sysvinit daemons, change to using services() instead of hard coded daemon list, pep8 fixes. --- .../nrpe-external-master/check_exit_status.pl | 189 ++++++++++++++++++ .../nrpe-external-master/check_status_file.py | 60 ++++++ files/nrpe-external-master/nagios_plugin.py | 78 ++++++++ hooks/neutron_api_hooks.py | 39 +++- hooks/neutron_api_utils.py | 8 + 5 files changed, 368 insertions(+), 6 deletions(-) create mode 100755 files/nrpe-external-master/check_exit_status.pl create mode 100755 files/nrpe-external-master/check_status_file.py create mode 100755 files/nrpe-external-master/nagios_plugin.py diff --git a/files/nrpe-external-master/check_exit_status.pl b/files/nrpe-external-master/check_exit_status.pl new file mode 100755 index 00000000..49df22d8 --- /dev/null +++ b/files/nrpe-external-master/check_exit_status.pl @@ -0,0 +1,189 @@ +#!/usr/bin/perl +################################################################################ +# # +# Copyright (C) 2011 Chad Columbus # +# # +# This program is free software; you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation; either version 2 of the License, or # +# (at your option) any later version. # +# # +# This program is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with this program; if not, write to the Free Software # +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # +# # +################################################################################ + +use strict; +use Getopt::Std; +$| = 1; + +my %opts; +getopts('heronp:s:', \%opts); + +my $VERSION = "Version 1.0"; +my $AUTHOR = '(c) 2011 Chad Columbus '; + +# Default values: +my $script_to_check; +my $pattern = 'is running'; +my $cmd; +my $message; +my $error; + +# Exit codes +my $STATE_OK = 0; +my $STATE_WARNING = 1; +my $STATE_CRITICAL = 2; +my $STATE_UNKNOWN = 3; + +# Parse command line options +if ($opts{'h'} || scalar(%opts) == 0) { + &print_help(); + exit($STATE_OK); +} + +# Make sure scipt is provided: +if ($opts{'s'} eq '') { + # Script to run not provided + print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n"; + exit($STATE_UNKNOWN); +} else { + $script_to_check = $opts{'s'}; +} + +# Make sure only a-z, 0-9, /, _, and - are used in the script. +if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) { + # Script contains illegal characters exit. + print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n"; + exit($STATE_UNKNOWN); +} + +# See if script is executable +if (! -x "$script_to_check") { + print "\nIt appears you can't execute $script_to_check, $!\n"; + exit($STATE_UNKNOWN); +} + +# If a pattern is provided use it: +if ($opts{'p'} ne '') { + $pattern = $opts{'p'}; +} + +# If -r run command via sudo as root: +if ($opts{'r'}) { + $cmd = "sudo -n $script_to_check status" . ' 2>&1'; +} else { + $cmd = "$script_to_check status" . ' 2>&1'; +} + +my $cmd_result = `$cmd`; +chomp($cmd_result); +if ($cmd_result =~ /sudo/i) { + # This means it could not run the sudo command + $message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result"; + $error = $STATE_UNKNOWN; +} else { + # Check exitstatus instead of output: + if ($opts{'e'} == 1) { + if ($? != 0) { + # error + $message = "$script_to_check CRITICAL - Exit code: $?\."; + if ($opts{'o'} == 0) { + $message .= " $cmd_result"; + } + $error = $STATE_CRITICAL; + } else { + # success + $message = "$script_to_check OK - Exit code: $?\."; + if ($opts{'o'} == 0) { + $message .= " $cmd_result"; + } + $error = $STATE_OK; + } + } else { + my $not_check = 1; + if ($opts{'n'} == 1) { + $not_check = 0; + } + if (($cmd_result =~ /$pattern/i) == $not_check) { + $message = "$script_to_check OK"; + if ($opts{'o'} == 0) { + $message .= " - $cmd_result"; + } + $error = $STATE_OK; + } else { + $message = "$script_to_check CRITICAL"; + if ($opts{'o'} == 0) { + $message .= " - $cmd_result"; + } + $error = $STATE_CRITICAL; + } + } +} + +if ($message eq '') { + print "Error: program failed in an unknown way\n"; + exit($STATE_UNKNOWN); +} + +if ($error) { + print "$message\n"; + exit($error); +} else { + # If we get here we are OK + print "$message\n"; + exit($STATE_OK); +} + +#################################### +# Start Subs: +#################################### +sub print_help() { + print << "EOF"; +Check the output or exit status of a script. +$VERSION +$AUTHOR + +Options: +-h + Print detailed help screen + +-s + 'FULL PATH TO SCRIPT' (required) + This is the script to run, the script is designed to run scripts in the + /etc/init.d dir (but can run any script) and will call the script with + a 'status' argument. So if you use another script make sure it will + work with /path/script status, example: /etc/init.d/httpd status + +-e + This is the "exitstaus" flag, it means check the exit status + code instead of looking for a pattern in the output of the script. + +-p 'REGEX' + This is a pattern to look for in the output of the script to confirm it + is running, default is 'is running', but not all init.d scripts output + (iptables), so you can specify an arbitrary pattern. + All patterns are case insensitive. + +-n + This is the "NOT" flag, it means not the -p pattern, so if you want to + make sure the output of the script does NOT contain -p 'REGEX' + +-r + This is the "ROOT" flag, it means run as root via sudo. You will need a + line in your /etc/sudoers file like: + nagios ALL=(root) NOPASSWD: /etc/init.d/* status + +-o + This is the "SUPPRESS OUTPUT" flag. Some programs have a long output + (like iptables), this flag suppresses that output so it is not printed + as a part of the nagios message. +EOF +} + diff --git a/files/nrpe-external-master/check_status_file.py b/files/nrpe-external-master/check_status_file.py new file mode 100755 index 00000000..ba828087 --- /dev/null +++ b/files/nrpe-external-master/check_status_file.py @@ -0,0 +1,60 @@ +#!/usr/bin/python + +# m +# mmmm m m mmmm mmmm mmm mm#mm +# #" "# # # #" "# #" "# #" # # +# # # # # # # # # #"""" # +# ##m#" "mm"# ##m#" ##m#" "#mm" "mm +# # # # +# " " " +# This file is managed by puppet. Do not make local changes. + +# +# Copyright 2014 Canonical Ltd. +# +# Author: Jacek Nykis +# + +import re +import nagios_plugin + + +def parse_args(): + import argparse + + parser = argparse.ArgumentParser( + description='Read file and return nagios status based on its content', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('-f', '--status-file', required=True, + help='Status file path') + parser.add_argument('-c', '--critical-text', default='CRITICAL', + help='String indicating critical status') + parser.add_argument('-w', '--warning-text', default='WARNING', + help='String indicating warning status') + parser.add_argument('-o', '--ok-text', default='OK', + help='String indicating OK status') + parser.add_argument('-u', '--unknown-text', default='UNKNOWN', + help='String indicating unknown status') + return parser.parse_args() + + +def check_status(args): + nagios_plugin.check_file_freshness(args.status_file, 43200) + + with open(args.status_file, "r") as f: + content = [l.strip() for l in f.readlines()] + + for line in content: + if re.search(args.critical_text, line): + raise nagios_plugin.CriticalError(line) + elif re.search(args.warning_text, line): + raise nagios_plugin.WarnError(line) + elif re.search(args.unknown_text, line): + raise nagios_plugin.UnknownError(line) + else: + print line + + +if __name__ == '__main__': + args = parse_args() + nagios_plugin.try_check(check_status, args) diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py new file mode 100755 index 00000000..f0f8e7b5 --- /dev/null +++ b/files/nrpe-external-master/nagios_plugin.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# m +# mmmm m m mmmm mmmm mmm mm#mm +# #" "# # # #" "# #" "# #" # # +# # # # # # # # # #"""" # +# ##m#" "mm"# ##m#" ##m#" "#mm" "mm +# # # # +# " " " +# This file is managed by puppet. Do not make local changes. + +# Copyright (C) 2005, 2006, 2007, 2012 James Troup + +import os +import stat +import time +import traceback +import sys + + +################################################################################ + +class CriticalError(Exception): + """This indicates a critical error.""" + pass + + +class WarnError(Exception): + """This indicates a warning condition.""" + pass + + +class UnknownError(Exception): + """This indicates a unknown error was encountered.""" + pass + + +def try_check(function, *args, **kwargs): + """Perform a check with error/warn/unknown handling.""" + try: + function(*args, **kwargs) + except UnknownError, msg: + print msg + sys.exit(3) + except CriticalError, msg: + print msg + sys.exit(2) + except WarnError, msg: + print msg + sys.exit(1) + except: + print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0]) + print '=' * 60 + traceback.print_exc(file=sys.stdout) + print '=' * 60 + sys.exit(3) + + +################################################################################ + +def check_file_freshness(filename, newer_than=600): + """Check a file exists, is readable and is newer than seconds (where defaults to 600).""" + # First check the file exists and is readable + if not os.path.exists(filename): + raise CriticalError("%s: does not exist." % (filename)) + if os.access(filename, os.R_OK) == 0: + raise CriticalError("%s: is not readable." % (filename)) + + # Then ensure the file is up-to-date enough + mtime = os.stat(filename)[stat.ST_MTIME] + last_modified = time.time() - mtime + if last_modified > newer_than: + raise CriticalError("%s: was last modified on %s and is too old (> %s seconds)." + % (filename, time.ctime(mtime), newer_than)) + if last_modified < 0: + raise CriticalError("%s: was last modified on %s which is in the future." + % (filename, time.ctime(mtime))) + +################################################################################ diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index a24150b3..e9a560ef 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -1,6 +1,7 @@ #!/usr/bin/python import sys +import os import uuid from subprocess import check_call @@ -47,6 +48,7 @@ from neutron_api_utils import ( do_openstack_upgrade, register_configs, restart_map, + services, setup_ipv6 ) from neutron_api_context import ( @@ -374,7 +376,8 @@ def ha_changed(): neutron_api_relation_joined(rid=rid) -@hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') +@hooks.hook('nrpe-external-master-relation-joined', + 'nrpe-external-master-relation-changed') def update_nrpe_config(): # Find out if nrpe set nagios_hostname hostname = None @@ -392,11 +395,35 @@ def update_nrpe_config(): else: current_unit = local_unit() - nrpe.add_check( - shortname='neutron-server', - description='process check {%s}' % current_unit, - check_cmd = 'check_upstart_job neutron-server', - ) + services_to_monitor = services() + + for service in services_to_monitor: + upstart_init = '/etc/init/%s.conf' % service + sysv_init = '/etc/init.d/%s' % service + + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=service, + description='process check {%s}' % current_unit, + check_cmd='check_upstart_job %s' % service, + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % service + checkpath = os.path.join(os.environ['CHARM_DIR'], + 'files/nrpe-external-master', + 'check_exit_status.pl'), + cron_template = '*/5 * * * * root %s -s \ +/etc/init.d/%s status > /var/lib/nagios/service-check-%s.txt\n' \ + % (checkpath[0], service, service) + f = open(cronpath, 'w') + f.write(cron_template) + f.close() + nrpe.add_check( + shortname=service, + description='process check {%s}' % current_unit, + check_cmd='check_status_file.py -f \ +/var/lib/nagios/service-check-%s.txt' % service, + ) nrpe.write() diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 4179440a..6581a9e9 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -172,6 +172,14 @@ def restart_map(): if v['services']]) +def services(): + ''' Returns a list of services associate with this charm ''' + _services = [] + for v in restart_map().values(): + _services = _services + v + return list(set(_services)) + + def keystone_ca_cert_b64(): '''Returns the local Keystone-provided CA cert if it exists, or None.''' if not os.path.isfile(CA_CERT_PATH): From 44dc16e027a9aa96f6ff82a24cd7dd7ddc1bd9d6 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Mon, 17 Nov 2014 15:08:32 +1000 Subject: [PATCH 082/125] [bradm] Removed puppet header from nagios_plugin module --- files/nrpe-external-master/nagios_plugin.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py index f0f8e7b5..fc0d7b7b 100755 --- a/files/nrpe-external-master/nagios_plugin.py +++ b/files/nrpe-external-master/nagios_plugin.py @@ -1,13 +1,4 @@ #!/usr/bin/env python -# m -# mmmm m m mmmm mmmm mmm mm#mm -# #" "# # # #" "# #" "# #" # # -# # # # # # # # # #"""" # -# ##m#" "mm"# ##m#" ##m#" "#mm" "mm -# # # # -# " " " -# This file is managed by puppet. Do not make local changes. - # Copyright (C) 2005, 2006, 2007, 2012 James Troup import os From 1413087bcdd57c1dfd153fd4535c1bdf1081895d Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Tue, 18 Nov 2014 11:16:28 +1000 Subject: [PATCH 083/125] [bradm] Removed nagios check files that were moved to nrpe-external-master charm --- .../nrpe-external-master/check_exit_status.pl | 189 ------------------ .../nrpe-external-master/check_status_file.py | 60 ------ files/nrpe-external-master/check_upstart_job | 72 ------- files/nrpe-external-master/nagios_plugin.py | 69 ------- hooks/neutron_api_hooks.py | 9 +- 5 files changed, 3 insertions(+), 396 deletions(-) delete mode 100755 files/nrpe-external-master/check_exit_status.pl delete mode 100755 files/nrpe-external-master/check_status_file.py delete mode 100755 files/nrpe-external-master/check_upstart_job delete mode 100755 files/nrpe-external-master/nagios_plugin.py diff --git a/files/nrpe-external-master/check_exit_status.pl b/files/nrpe-external-master/check_exit_status.pl deleted file mode 100755 index 49df22d8..00000000 --- a/files/nrpe-external-master/check_exit_status.pl +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/perl -################################################################################ -# # -# Copyright (C) 2011 Chad Columbus # -# # -# This program is free software; you can redistribute it and/or modify # -# it under the terms of the GNU General Public License as published by # -# the Free Software Foundation; either version 2 of the License, or # -# (at your option) any later version. # -# # -# This program is distributed in the hope that it will be useful, # -# but WITHOUT ANY WARRANTY; without even the implied warranty of # -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # -# GNU General Public License for more details. # -# # -# You should have received a copy of the GNU General Public License # -# along with this program; if not, write to the Free Software # -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # -# # -################################################################################ - -use strict; -use Getopt::Std; -$| = 1; - -my %opts; -getopts('heronp:s:', \%opts); - -my $VERSION = "Version 1.0"; -my $AUTHOR = '(c) 2011 Chad Columbus '; - -# Default values: -my $script_to_check; -my $pattern = 'is running'; -my $cmd; -my $message; -my $error; - -# Exit codes -my $STATE_OK = 0; -my $STATE_WARNING = 1; -my $STATE_CRITICAL = 2; -my $STATE_UNKNOWN = 3; - -# Parse command line options -if ($opts{'h'} || scalar(%opts) == 0) { - &print_help(); - exit($STATE_OK); -} - -# Make sure scipt is provided: -if ($opts{'s'} eq '') { - # Script to run not provided - print "\nYou must provide a script to run. Example: -s /etc/init.d/httpd\n"; - exit($STATE_UNKNOWN); -} else { - $script_to_check = $opts{'s'}; -} - -# Make sure only a-z, 0-9, /, _, and - are used in the script. -if ($script_to_check =~ /[^a-z0-9\_\-\/\.]/) { - # Script contains illegal characters exit. - print "\nScript to check can only contain Letters, Numbers, Periods, Underscores, Hyphens, and/or Slashes\n"; - exit($STATE_UNKNOWN); -} - -# See if script is executable -if (! -x "$script_to_check") { - print "\nIt appears you can't execute $script_to_check, $!\n"; - exit($STATE_UNKNOWN); -} - -# If a pattern is provided use it: -if ($opts{'p'} ne '') { - $pattern = $opts{'p'}; -} - -# If -r run command via sudo as root: -if ($opts{'r'}) { - $cmd = "sudo -n $script_to_check status" . ' 2>&1'; -} else { - $cmd = "$script_to_check status" . ' 2>&1'; -} - -my $cmd_result = `$cmd`; -chomp($cmd_result); -if ($cmd_result =~ /sudo/i) { - # This means it could not run the sudo command - $message = "$script_to_check CRITICAL - Could not run: 'sudo -n $script_to_check status'. Result is $cmd_result"; - $error = $STATE_UNKNOWN; -} else { - # Check exitstatus instead of output: - if ($opts{'e'} == 1) { - if ($? != 0) { - # error - $message = "$script_to_check CRITICAL - Exit code: $?\."; - if ($opts{'o'} == 0) { - $message .= " $cmd_result"; - } - $error = $STATE_CRITICAL; - } else { - # success - $message = "$script_to_check OK - Exit code: $?\."; - if ($opts{'o'} == 0) { - $message .= " $cmd_result"; - } - $error = $STATE_OK; - } - } else { - my $not_check = 1; - if ($opts{'n'} == 1) { - $not_check = 0; - } - if (($cmd_result =~ /$pattern/i) == $not_check) { - $message = "$script_to_check OK"; - if ($opts{'o'} == 0) { - $message .= " - $cmd_result"; - } - $error = $STATE_OK; - } else { - $message = "$script_to_check CRITICAL"; - if ($opts{'o'} == 0) { - $message .= " - $cmd_result"; - } - $error = $STATE_CRITICAL; - } - } -} - -if ($message eq '') { - print "Error: program failed in an unknown way\n"; - exit($STATE_UNKNOWN); -} - -if ($error) { - print "$message\n"; - exit($error); -} else { - # If we get here we are OK - print "$message\n"; - exit($STATE_OK); -} - -#################################### -# Start Subs: -#################################### -sub print_help() { - print << "EOF"; -Check the output or exit status of a script. -$VERSION -$AUTHOR - -Options: --h - Print detailed help screen - --s - 'FULL PATH TO SCRIPT' (required) - This is the script to run, the script is designed to run scripts in the - /etc/init.d dir (but can run any script) and will call the script with - a 'status' argument. So if you use another script make sure it will - work with /path/script status, example: /etc/init.d/httpd status - --e - This is the "exitstaus" flag, it means check the exit status - code instead of looking for a pattern in the output of the script. - --p 'REGEX' - This is a pattern to look for in the output of the script to confirm it - is running, default is 'is running', but not all init.d scripts output - (iptables), so you can specify an arbitrary pattern. - All patterns are case insensitive. - --n - This is the "NOT" flag, it means not the -p pattern, so if you want to - make sure the output of the script does NOT contain -p 'REGEX' - --r - This is the "ROOT" flag, it means run as root via sudo. You will need a - line in your /etc/sudoers file like: - nagios ALL=(root) NOPASSWD: /etc/init.d/* status - --o - This is the "SUPPRESS OUTPUT" flag. Some programs have a long output - (like iptables), this flag suppresses that output so it is not printed - as a part of the nagios message. -EOF -} - diff --git a/files/nrpe-external-master/check_status_file.py b/files/nrpe-external-master/check_status_file.py deleted file mode 100755 index ba828087..00000000 --- a/files/nrpe-external-master/check_status_file.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python - -# m -# mmmm m m mmmm mmmm mmm mm#mm -# #" "# # # #" "# #" "# #" # # -# # # # # # # # # #"""" # -# ##m#" "mm"# ##m#" ##m#" "#mm" "mm -# # # # -# " " " -# This file is managed by puppet. Do not make local changes. - -# -# Copyright 2014 Canonical Ltd. -# -# Author: Jacek Nykis -# - -import re -import nagios_plugin - - -def parse_args(): - import argparse - - parser = argparse.ArgumentParser( - description='Read file and return nagios status based on its content', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-f', '--status-file', required=True, - help='Status file path') - parser.add_argument('-c', '--critical-text', default='CRITICAL', - help='String indicating critical status') - parser.add_argument('-w', '--warning-text', default='WARNING', - help='String indicating warning status') - parser.add_argument('-o', '--ok-text', default='OK', - help='String indicating OK status') - parser.add_argument('-u', '--unknown-text', default='UNKNOWN', - help='String indicating unknown status') - return parser.parse_args() - - -def check_status(args): - nagios_plugin.check_file_freshness(args.status_file, 43200) - - with open(args.status_file, "r") as f: - content = [l.strip() for l in f.readlines()] - - for line in content: - if re.search(args.critical_text, line): - raise nagios_plugin.CriticalError(line) - elif re.search(args.warning_text, line): - raise nagios_plugin.WarnError(line) - elif re.search(args.unknown_text, line): - raise nagios_plugin.UnknownError(line) - else: - print line - - -if __name__ == '__main__': - args = parse_args() - nagios_plugin.try_check(check_status, args) diff --git a/files/nrpe-external-master/check_upstart_job b/files/nrpe-external-master/check_upstart_job deleted file mode 100755 index 94efb95e..00000000 --- a/files/nrpe-external-master/check_upstart_job +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/python - -# -# Copyright 2012, 2013 Canonical Ltd. -# -# Author: Paul Collins -# -# Based on http://www.eurion.net/python-snippets/snippet/Upstart%20service%20status.html -# - -import sys - -import dbus - - -class Upstart(object): - def __init__(self): - self._bus = dbus.SystemBus() - self._upstart = self._bus.get_object('com.ubuntu.Upstart', - '/com/ubuntu/Upstart') - def get_job(self, job_name): - path = self._upstart.GetJobByName(job_name, - dbus_interface='com.ubuntu.Upstart0_6') - return self._bus.get_object('com.ubuntu.Upstart', path) - - def get_properties(self, job): - path = job.GetInstance([], dbus_interface='com.ubuntu.Upstart0_6.Job') - instance = self._bus.get_object('com.ubuntu.Upstart', path) - return instance.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - - def get_job_instances(self, job_name): - job = self.get_job(job_name) - paths = job.GetAllInstances([], dbus_interface='com.ubuntu.Upstart0_6.Job') - return [self._bus.get_object('com.ubuntu.Upstart', path) for path in paths] - - def get_job_instance_properties(self, job): - return job.GetAll('com.ubuntu.Upstart0_6.Instance', - dbus_interface=dbus.PROPERTIES_IFACE) - -try: - upstart = Upstart() - try: - job = upstart.get_job(sys.argv[1]) - props = upstart.get_properties(job) - - if props['state'] == 'running': - print 'OK: %s is running' % sys.argv[1] - sys.exit(0) - else: - print 'CRITICAL: %s is not running' % sys.argv[1] - sys.exit(2) - - except dbus.DBusException as e: - instances = upstart.get_job_instances(sys.argv[1]) - propses = [upstart.get_job_instance_properties(instance) for instance in instances] - states = dict([(props['name'], props['state']) for props in propses]) - if len(states) != states.values().count('running'): - not_running = [] - for name in states.keys(): - if states[name] != 'running': - not_running.append(name) - print 'CRITICAL: %d instances of %s not running: %s' % \ - (len(not_running), sys.argv[1], not_running.join(', ')) - sys.exit(2) - else: - print 'OK: %d instances of %s running' % (len(states), sys.argv[1]) - -except dbus.DBusException as e: - print 'CRITICAL: failed to get properties of \'%s\' from upstart' % sys.argv[1] - sys.exit(2) - diff --git a/files/nrpe-external-master/nagios_plugin.py b/files/nrpe-external-master/nagios_plugin.py deleted file mode 100755 index fc0d7b7b..00000000 --- a/files/nrpe-external-master/nagios_plugin.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2005, 2006, 2007, 2012 James Troup - -import os -import stat -import time -import traceback -import sys - - -################################################################################ - -class CriticalError(Exception): - """This indicates a critical error.""" - pass - - -class WarnError(Exception): - """This indicates a warning condition.""" - pass - - -class UnknownError(Exception): - """This indicates a unknown error was encountered.""" - pass - - -def try_check(function, *args, **kwargs): - """Perform a check with error/warn/unknown handling.""" - try: - function(*args, **kwargs) - except UnknownError, msg: - print msg - sys.exit(3) - except CriticalError, msg: - print msg - sys.exit(2) - except WarnError, msg: - print msg - sys.exit(1) - except: - print "%s raised unknown exception '%s'" % (function, sys.exc_info()[0]) - print '=' * 60 - traceback.print_exc(file=sys.stdout) - print '=' * 60 - sys.exit(3) - - -################################################################################ - -def check_file_freshness(filename, newer_than=600): - """Check a file exists, is readable and is newer than seconds (where defaults to 600).""" - # First check the file exists and is readable - if not os.path.exists(filename): - raise CriticalError("%s: does not exist." % (filename)) - if os.access(filename, os.R_OK) == 0: - raise CriticalError("%s: is not readable." % (filename)) - - # Then ensure the file is up-to-date enough - mtime = os.stat(filename)[stat.ST_MTIME] - last_modified = time.time() - mtime - if last_modified > newer_than: - raise CriticalError("%s: was last modified on %s and is too old (> %s seconds)." - % (filename, time.ctime(mtime), newer_than)) - if last_modified < 0: - raise CriticalError("%s: was last modified on %s which is in the future." - % (filename, time.ctime(mtime))) - -################################################################################ diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index e9a560ef..c63fb06b 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -409,12 +409,9 @@ def update_nrpe_config(): ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % service - checkpath = os.path.join(os.environ['CHARM_DIR'], - 'files/nrpe-external-master', - 'check_exit_status.pl'), - cron_template = '*/5 * * * * root %s -s \ -/etc/init.d/%s status > /var/lib/nagios/service-check-%s.txt\n' \ - % (checkpath[0], service, service) + cron_template = '*/5 * * * * root \ +/usr/local/lib/nagios/plugins/check_exit_status.pl -s /etc/init.d/%s \ +status > /var/lib/nagios/service-check-%s.txt\n' % (service, service) f = open(cronpath, 'w') f.write(cron_template) f.close() From 11f96d4d021a9ee0b4afa1d8c68ec6a0a3359ce9 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 18 Nov 2014 18:59:46 -0600 Subject: [PATCH 084/125] [hopem] fix unit tests --- unit_tests/test_neutron_api_utils.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index b3314862..ba9a096e 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -91,7 +91,9 @@ class TestNeutronAPIUtils(CharmTestCase): [self.assertIn(q_conf, _map.keys()) for q_conf in confs] self.assertTrue(nutils.APACHE_CONF not in _map.keys()) - def test_restart_map(self): + @patch('os.path.exists') + def test_restart_map(self, mock_path_exists): + mock_path_exists.return_value = False _restart_map = nutils.restart_map() ML2CONF = "/etc/neutron/plugins/ml2/ml2_conf.ini" expect = OrderedDict([ @@ -104,7 +106,7 @@ class TestNeutronAPIUtils(CharmTestCase): (ML2CONF, { 'services': ['neutron-server'], }), - (nutils.APACHE_24_CONF, { + (nutils.APACHE_CONF, { 'services': ['apache2'], }), (nutils.HAPROXY_CONF, { @@ -113,7 +115,10 @@ class TestNeutronAPIUtils(CharmTestCase): ]) self.assertItemsEqual(_restart_map, expect) - def test_register_configs(self): + @patch('os.path.exists') + def test_register_configs(self, mock_path_exists): + mock_path_exists.return_value = False + class _mock_OSConfigRenderer(): def __init__(self, templates_dir=None, openstack_release=None): self.configs = [] @@ -128,7 +133,7 @@ class TestNeutronAPIUtils(CharmTestCase): confs = ['/etc/neutron/neutron.conf', '/etc/default/neutron-server', '/etc/neutron/plugins/ml2/ml2_conf.ini', - '/etc/apache2/sites-available/openstack_https_frontend.conf', + '/etc/apache2/sites-available/openstack_https_frontend', '/etc/haproxy/haproxy.cfg'] self.assertItemsEqual(_regconfs.configs, confs) From 15c3984cabbe4dc8a81b544a27788ba89c011470 Mon Sep 17 00:00:00 2001 From: James Page Date: Thu, 20 Nov 2014 10:24:44 -0600 Subject: [PATCH 085/125] Resync helpers --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index daaee011..c6a00df2 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -44,6 +44,7 @@ frontend tcp-in_{{ service }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} {% endfor -%} default_backend {{ service }}_{{ default_backend }} + {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn From 4f4fad1119b652f8cd1ee768f1fef4cd32b2a076 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 25 Nov 2014 10:19:07 +0000 Subject: [PATCH 086/125] Enable haproxy for when there is a single unit in a deployment --- charm-helpers-sync.yaml | 2 +- .../charmhelpers/contrib/hahelpers/cluster.py | 21 +- hooks/charmhelpers/contrib/network/ip.py | 100 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 544 ++++++++++-------- .../charmhelpers/contrib/openstack/neutron.py | 20 +- .../contrib/openstack/templates/haproxy.cfg | 4 +- .../contrib/openstack/templating.py | 10 +- hooks/charmhelpers/contrib/openstack/utils.py | 39 +- .../contrib/storage/linux/ceph.py | 176 +++--- .../contrib/storage/linux/loopback.py | 8 +- hooks/charmhelpers/core/fstab.py | 18 +- hooks/charmhelpers/core/hookenv.py | 20 +- hooks/charmhelpers/core/host.py | 33 +- hooks/charmhelpers/core/services/__init__.py | 4 +- hooks/charmhelpers/core/services/helpers.py | 12 +- hooks/charmhelpers/core/templating.py | 3 +- hooks/charmhelpers/fetch/__init__.py | 40 +- hooks/charmhelpers/fetch/archiveurl.py | 60 +- hooks/charmhelpers/fetch/bzrurl.py | 6 +- hooks/charmhelpers/fetch/giturl.py | 48 ++ hooks/neutron_api_context.py | 10 +- hooks/neutron_api_utils.py | 2 +- 24 files changed, 717 insertions(+), 470 deletions(-) create mode 100644 hooks/charmhelpers/fetch/giturl.py diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..c3bf4506 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:charm-helpers +branch: lp:~gnuoy/charm-helpers/haproxy-singlenode-mode destination: hooks/charmhelpers include: - core diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 6d972007..3e51986d 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,9 +13,10 @@ clustering-related helpers. import subprocess import os - from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -150,34 +151,42 @@ def https(): return False -def determine_api_port(public_port): +def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the API service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) -def determine_apache_port(public_port): +def determine_apache_port(public_port, singlenode_mode=False): ''' Description: Determine correct apache listening port based on public IP + state of the cluster. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the HAProxy service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 return public_port - (i * 10) @@ -197,7 +206,7 @@ def get_hacluster_config(): for setting in settings: conf[setting] = config_get(setting) missing = [] - [missing.append(s) for s, v in conf.iteritems() if v is None] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index e62e5655..b9a9815c 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -147,6 +144,7 @@ def _get_for_address(address, key): return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -160,41 +158,42 @@ def _get_for_address(address, key): return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -241,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -251,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 538dc913..355e6e05 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,20 +1,19 @@ import json import os import time - from base64 import b64decode +from subprocess import check_call -from subprocess import ( - check_call -) +import six +from six.moves import xrange from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) - from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, @@ -23,43 +22,40 @@ from charmhelpers.core.hookenv import ( relation_set, unit_get, unit_private_ip, + DEBUG, + INFO, + WARNING, ERROR, - INFO ) - from charmhelpers.core.host import ( mkdir, - write_file + write_file, ) - from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, https, - is_clustered + is_clustered, ) - from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, install_ca_cert, ) - from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network + is_address_in_network, ) - from charmhelpers.contrib.openstack.utils import get_host_ip CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] class OSContextError(Exception): @@ -67,7 +63,7 @@ class OSContextError(Exception): def ensure_packages(packages): - '''Install but do not upgrade required plugin packages''' + """Install but do not upgrade required plugin packages.""" required = filter_installed_packages(packages) if required: apt_install(required, fatal=True) @@ -75,20 +71,27 @@ def ensure_packages(packages): def context_complete(ctxt): _missing = [] - for k, v in ctxt.iteritems(): + for k, v in six.iteritems(ctxt): if v is None or v == '': _missing.append(k) + if _missing: - log('Missing required data: %s' % ' '.join(_missing), level='INFO') + log('Missing required data: %s' % ' '.join(_missing), level=INFO) return False + return True def config_flags_parser(config_flags): + """Parses config flags string into dict. + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", - level=ERROR) + log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError + # strip the following from each value. post_strippers = ' ,' # we strip any leading/trailing '=' or ' ' from the string then @@ -111,17 +114,18 @@ def config_flags_parser(config_flags): # if this not the first entry, expect an embedded key. index = current.rfind(',') if index < 0: - log("invalid config value(s) at index %s" % (i), - level=ERROR) + log("Invalid config value(s) at index %s" % (i), level=ERROR) raise OSContextError key = current[index + 1:] # Add to collection. flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + return flags class OSContextGenerator(object): + """Base class for all context generators.""" interfaces = [] def __call__(self): @@ -133,11 +137,11 @@ class SharedDBContext(OSContextGenerator): def __init__(self, database=None, user=None, relation_prefix=None, ssl_dir=None): - ''' - Allows inspecting relation for settings prefixed with relation_prefix. - This is useful for parsing access for multiple databases returned via - the shared-db interface (eg, nova_password, quantum_password) - ''' + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ self.relation_prefix = relation_prefix self.database = database self.user = user @@ -147,9 +151,8 @@ class SharedDBContext(OSContextGenerator): self.database = self.database or config('database') self.user = self.user or config('database-user') if None in [self.database, self.user]: - log('Could not generate shared_db context. ' - 'Missing required charm config options. ' - '(database name and user)') + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) raise OSContextError ctxt = {} @@ -202,23 +205,24 @@ class PostgresqlDBContext(OSContextGenerator): def __call__(self): self.database = self.database or config('database') if self.database is None: - log('Could not generate postgresql_db context. ' - 'Missing required charm config options. ' - '(database name)') + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) raise OSContextError - ctxt = {} + ctxt = {} for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): - ctxt = { - 'database_host': relation_get('host', rid=rid, unit=unit), - 'database': self.database, - 'database_user': relation_get('user', rid=rid, unit=unit), - 'database_password': relation_get('password', rid=rid, unit=unit), - 'database_type': 'postgresql', - } + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} if context_complete(ctxt): return ctxt + return {} @@ -227,23 +231,29 @@ def db_ssl(rdata, ctxt, ssl_dir): ca_path = os.path.join(ssl_dir, 'db-client.ca') with open(ca_path, 'w') as fh: fh.write(b64decode(rdata['ssl_ca'])) + ctxt['database_ssl_ca'] = ca_path elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found") + log("Charm not setup for ssl support but ssl ca found", level=INFO) return ctxt + if 'ssl_cert' in rdata: cert_path = os.path.join( ssl_dir, 'db-client.cert') if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity") + log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) + with open(cert_path, 'w') as fh: fh.write(b64decode(rdata['ssl_cert'])) + ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') with open(key_path, 'w') as fh: fh.write(b64decode(rdata['ssl_key'])) + ctxt['database_ssl_key'] = key_path + return ctxt @@ -251,9 +261,8 @@ class IdentityServiceContext(OSContextGenerator): interfaces = ['identity-service'] def __call__(self): - log('Generating template context for identity-service') + log('Generating template context for identity-service', level=DEBUG) ctxt = {} - for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -261,26 +270,24 @@ class IdentityServiceContext(OSContextGenerator): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - - ctxt = { - 'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - } + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt = {'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol} if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt + return {} @@ -293,21 +300,23 @@ class AMQPContext(OSContextGenerator): self.interfaces = [rel_name] def __call__(self): - log('Generating template context for amqp') + log('Generating template context for amqp', level=DEBUG) conf = config() - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' if self.relation_prefix: - user_setting = self.relation_prefix + '-rabbit-user' - vhost_setting = self.relation_prefix + '-rabbit-vhost' + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: - log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) raise OSContextError + ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False @@ -321,6 +330,7 @@ class AMQPContext(OSContextGenerator): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -331,6 +341,7 @@ class AMQPContext(OSContextGenerator): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca @@ -344,41 +355,45 @@ class AMQPContext(OSContextGenerator): if context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: - log(("Charm not setup for ssl support " - "but ssl ca found")) + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) break + ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path + # Sufficient information found = break out! break + # Used for active/active rabbitmq >= grizzly - if ('clustered' not in ctxt or ha_vip_only) \ - and len(related_units(rid)) > 1: + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + if not context_complete(ctxt): return {} - else: - return ctxt + + return ctxt class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" interfaces = ['ceph'] def __call__(self): - '''This generates context for /etc/ceph/ceph.conf templates''' if not relation_ids('ceph'): return {} - log('Generating template context for ceph') - + log('Generating template context for ceph', level=DEBUG) mon_hosts = [] auth = None key = None @@ -387,18 +402,18 @@ class CephContext(OSContextGenerator): for unit in related_units(rid): auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) - ceph_addr = \ - relation_get('ceph-public-address', rid=rid, unit=unit) or \ - relation_get('private-address', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = { - 'mon_hosts': ' '.join(mon_hosts), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog - } + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -407,79 +422,68 @@ class CephContext(OSContextGenerator): return {} ensure_packages(['ceph-common']) - return ctxt -ADDRESS_TYPES = ['admin', 'internal', 'public'] - - class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ interfaces = ['cluster'] - def __call__(self): - ''' - Builds half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - ''' - if not relation_ids('cluster'): - return {} + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode - l_unit = local_unit().replace('/', '-') + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = get_host_ip(unit_get('private-address')) + l_unit = local_unit().replace('/', '-') cluster_hosts = {} # NOTE(jamespage): build out map of configured network endpoints # and associated backends for addr_type in ADDRESS_TYPES: - laddr = get_address_in_network( - config('os-{}-network'.format(addr_type))) + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) if laddr: - cluster_hosts[laddr] = {} - cluster_hosts[laddr]['network'] = "{}/{}".format( - laddr, - get_netmask_for_address(laddr) - ) - cluster_hosts[laddr]['backends'] = {} - cluster_hosts[laddr]['backends'][l_unit] = laddr + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr # NOTE(jamespage) no split configurations found, just use # private addresses if not cluster_hosts: - cluster_hosts[addr] = {} - cluster_hosts[addr]['network'] = "{}/{}".format( - addr, - get_netmask_for_address(addr) - ) - cluster_hosts[addr]['backends'] = {} - cluster_hosts[addr]['backends'][l_unit] = addr + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[addr]['backends'][_unit] = _laddr - ctxt = { - 'frontends': cluster_hosts, - } + ctxt = {'frontends': cluster_hosts} if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') @@ -493,13 +497,18 @@ class HAProxyContext(OSContextGenerator): ctxt['stat_port'] = ':8888' for frontend in cluster_hosts: - if len(cluster_hosts[frontend]['backends']) > 1: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') + return ctxt - log('HAProxy context is incomplete, this unit has no peers.') + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) return {} @@ -507,29 +516,28 @@ class ImageServiceContext(OSContextGenerator): interfaces = ['image-service'] def __call__(self): - ''' - Obtains the glance API server from the image-service relation. Useful - in nova and cinder (currently). - ''' - log('Generating template context for image-service.') + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) rids = relation_ids('image-service') if not rids: return {} + for rid in rids: for unit in related_units(rid): api_server = relation_get('glance-api-server', rid=rid, unit=unit) if api_server: return {'glance_api_servers': api_server} - log('ImageService context is incomplete. ' - 'Missing required relation data.') + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) return {} class ApacheSSLContext(OSContextGenerator): - - """ - Generates a context for an apache vhost configuration that configures + """Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context looks something like:: @@ -563,6 +571,7 @@ class ApacheSSLContext(OSContextGenerator): else: cert_filename = 'cert' key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), content=b64decode(cert)) write_file(path=os.path.join(ssl_dir, key_filename), @@ -574,7 +583,8 @@ class ApacheSSLContext(OSContextGenerator): install_ca_cert(b64decode(ca_cert)) def canonical_names(self): - '''Figure out which canonical names clients will access this service''' + """Figure out which canonical names clients will access this service. + """ cns = [] for r_id in relation_ids('identity-service'): for unit in related_units(r_id): @@ -582,55 +592,80 @@ class ApacheSSLContext(OSContextGenerator): for k in rdata: if k.startswith('ssl_key_'): cns.append(k.lstrip('ssl_key_')) - return list(set(cns)) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) def __call__(self): - if isinstance(self.external_ports, basestring): + if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] - if (not self.external_ports or not https()): + + if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() - ctxt = { - 'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': [] - } + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} for cn in self.canonical_names(): self.configure_cert(cn) - addresses = [] - vips = [] - if config('vip'): - vips = config('vip').split() - - for network_type in ['os-internal-network', - 'os-admin-network', - 'os-public-network']: - address = get_address_in_network(config(network_type), - unit_get('private-address')) - if len(vips) > 0 and is_clustered(): - for vip in vips: - if is_address_in_network(config(network_type), - vip): - addresses.append((address, vip)) - break - elif is_clustered(): - addresses.append((address, config('vip'))) - else: - addresses.append((address, address)) - - for address, endpoint in set(addresses): + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) - ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt @@ -647,21 +682,23 @@ class NeutronContext(OSContextGenerator): @property def packages(self): - return neutron_plugin_attribute( - self.plugin, 'packages', self.network_manager) + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) @property def neutron_security_groups(self): return None def _ensure_packages(self): - [ensure_packages(pkgs) for pkgs in self.packages] + for pkgs in self.packages: + ensure_packages(pkgs) def _save_flag_file(self): if self.network_manager == 'quantum': _file = '/etc/nova/quantum_plugin.conf' else: _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: out.write(self.plugin + '\n') @@ -670,13 +707,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - ovs_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return ovs_ctxt @@ -685,13 +720,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - nvp_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return nvp_ctxt @@ -700,35 +733,50 @@ class NeutronContext(OSContextGenerator): self.network_manager) n1kv_config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - n1kv_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': config( - 'n1kv_restrict_policy_profiles'), - } + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags return n1kv_ctxt + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + def neutron_ctxt(self): if https(): proto = 'https' else: proto = 'http' + if is_clustered(): host = config('vip') else: host = unit_get('private-address') - url = '%s://%s:%s' % (proto, host, '9696') - ctxt = { - 'network_manager': self.network_manager, - 'neutron_url': url, - } + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt def __call__(self): @@ -748,6 +796,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.nvp_ctxt()) elif self.plugin == 'n1kv': ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -759,23 +809,40 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. - """ - Responsible for adding user-defined config-flags in charm config to a - template context. + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. NOTE: the value of config-flags may be a comma-separated list of key=value pairs and some Openstack config files support comma-separated lists as values. """ + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + def __call__(self): - config_flags = config('config-flags') + config_flags = config(self._charm_flag) if not config_flags: return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + return {self._template_flag: + config_flags_parser(config_flags)} class SubordinateConfigContext(OSContextGenerator): @@ -819,7 +886,6 @@ class SubordinateConfigContext(OSContextGenerator): }, } } - """ def __init__(self, service, config_file, interface): @@ -849,26 +915,28 @@ class SubordinateConfigContext(OSContextGenerator): if self.service not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service)) + 'nothing for %s service' % (rid, self.service), + level=INFO) continue sub_config = sub_config[self.service] if self.config_file not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file)) + 'nothing for %s' % (rid, self.config_file), + level=INFO) continue sub_config = sub_config[self.config_file] - for k, v in sub_config.iteritems(): + for k, v in six.iteritems(sub_config): if k == 'sections': - for section, config_dict in v.iteritems(): - log("adding section '%s'" % (section)) + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) ctxt[k][section] = config_dict else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) - + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -880,15 +948,14 @@ class LogLevelContext(OSContextGenerator): False if config('debug') is None else config('debug') ctxt['verbose'] = \ False if config('verbose') is None else config('verbose') + return ctxt class SyslogContext(OSContextGenerator): def __call__(self): - ctxt = { - 'use_syslog': config('use-syslog') - } + ctxt = {'use_syslog': config('use-syslog')} return ctxt @@ -896,13 +963,9 @@ class BindHostContext(OSContextGenerator): def __call__(self): if config('prefer-ipv6'): - return { - 'bind_host': '::' - } + return {'bind_host': '::'} else: - return { - 'bind_host': '0.0.0.0' - } + return {'bind_host': '0.0.0.0'} class WorkerConfigContext(OSContextGenerator): @@ -914,11 +977,42 @@ class WorkerConfigContext(OSContextGenerator): except ImportError: apt_install('python-psutil', fatal=True) from psutil import NUM_CPUS + return NUM_CPUS def __call__(self): - multiplier = config('worker-multiplier') or 1 - ctxt = { - "workers": self.num_cpus * multiplier - } + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 84d97bca..8390d135 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -138,10 +138,25 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['neutron-plugin-cisco']], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', 'bird', 'neutron-dhcp-agent']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): elif manager == 'neutron': plugins = neutron_plugins() else: - log('Error: Network manager does not support plugins.') + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) raise Exception try: diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 19c9b856..0229f9d4 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -35,7 +35,7 @@ listen stats {{ stat_port }} stats auth admin:password {% if frontends -%} -{% for service, ports in service_ports.iteritems() -%} +{% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} bind :::{{ ports[0] }} @@ -46,7 +46,7 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn - {% for unit, address in frontends[frontend]['backends'].iteritems() -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index f5442712..33df0675 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,13 +1,13 @@ import os -from charmhelpers.fetch import apt_install +import six +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, ERROR, INFO ) - from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: @@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.itervalues()] + for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -258,7 +258,7 @@ class OSConfigRenderer(object): """ Write out all registered config files. """ - [self.write(k) for k in self.templates.iterkeys()] + [self.write(k) for k in six.iterkeys(self.templates)] def set_release(self, openstack_release): """ @@ -275,5 +275,5 @@ class OSConfigRenderer(object): ''' interfaces = [] [interfaces.extend(i.complete_contexts()) - for i in self.templates.itervalues()] + for i in six.itervalues(self.templates)] return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b0d1b03a..6447ce9a 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,6 +2,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess import json @@ -9,6 +10,8 @@ import os import socket import sys +import six + from charmhelpers.core.hookenv import ( config, log as juju_log, @@ -112,7 +115,7 @@ def get_os_codename_install_source(src): # Best guess match based on deb string provided if src.startswith('deb') or src.startswith('ppa'): - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v in src: return v @@ -133,7 +136,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename): '''Determine OpenStack version number from codename.''' - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -193,7 +196,7 @@ def get_os_version_package(pkg, fatal=True): else: vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.iteritems(): + for version, cname in six.iteritems(vers_map): if cname == codename: return version # e = "Could not determine OpenStack version for package: %s" % pkg @@ -317,7 +320,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in env_vars.iteritems() if u != "script_path"] + for u, p in six.iteritems(env_vars) if u != "script_path"] def openstack_upgrade_available(package): @@ -417,7 +420,7 @@ def ns_query(address): if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, basestring): + elif isinstance(address, six.string_types): rtype = 'A' else: return None @@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) @@ -477,10 +488,24 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, 'hostname': json.dumps(hosts)} if relation_prefix: - keys = kwargs.keys() - for key in keys: + for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a4..5d907c02 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ import time from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ from charmhelpers.core.host import ( service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,82 @@ CEPH_CONF = """[global] def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: out = check_output(['rados', '--id', service, 'lspools']) except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, 'osd', 'ls', '--format=json'])) - else: - return None + + return None -def create_pool(service, name, replicas=2): - ''' Create a new RADOS pool ''' +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +142,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +191,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: out = check_output(['rbd', 'showmapped']) except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +215,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +260,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +270,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -300,9 +281,9 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): - """ - NOTE: This function must only be called from a single service unit for + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -316,15 +297,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -339,42 +321,44 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] output = check_output(cmd) diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 38957ef0..a22c3d7b 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,12 +1,12 @@ - import os import re - from subprocess import ( check_call, check_output, ) +import six + ################################################## # loopback device helpers. @@ -37,7 +37,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == file_path: return d @@ -51,7 +51,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == path: return d diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ class Fstab(file): options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ class Fstab(file): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ class Fstab(file): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ class Fstab(file): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ class Fstab(file): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index af8fe2db..90623667 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,11 @@ import json import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +from six.moves import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -67,12 +69,12 @@ def log(message, level=None): subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +216,12 @@ class Config(dict): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +271,7 @@ class Config(dict): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -300,7 +308,7 @@ def relation_get(attribute=None, unit=None, rid=None): return json.loads(subprocess.check_output(_args)) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +320,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d7ce1e4c..a3cb996b 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -130,7 +131,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +147,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +178,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +192,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +219,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +298,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,7 +307,7 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type @@ -317,7 +318,13 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ class StoredContext(dict): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import host from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 32a673d6..4a27d2cc 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from yaml import safe_load from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,9 @@ from charmhelpers.core.hookenv import ( ) import os +import six +from six.moves.urllib.parse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -69,10 +68,16 @@ CLOUD_ARCHIVE_POCKETS = { # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', -) +if six.PY2: + FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', + ) +else: + FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. @@ -148,7 +153,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +186,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +197,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +223,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +257,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +301,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +405,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..613ea90f 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,14 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +from six.moves.urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, +) +from six.moves.urllib.parse import urlparse, urlunparse, parse_qs +from six.moves.urllib.error import URLError from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +21,24 @@ from charmhelpers.payload.archive import ( from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +66,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +115,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if six.PY2: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ from charmhelpers.fetch import ( ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..61684cb6 --- /dev/null +++ b/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,48 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master"): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hooks/neutron_api_context.py b/hooks/neutron_api_context.py index b6c49287..38cf9330 100644 --- a/hooks/neutron_api_context.py +++ b/hooks/neutron_api_context.py @@ -96,7 +96,8 @@ class NeutronCCContext(context.NeutronContext): ctxt['verbose'] = config('verbose') ctxt['debug'] = config('debug') ctxt['neutron_bind_port'] = \ - determine_api_port(api_port('neutron-server')) + determine_api_port(api_port('neutron-server'), + singlenode_mode=True) for rid in relation_ids('neutron-api'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -126,7 +127,8 @@ class HAProxyContext(context.HAProxyContext): ctxt = super(HAProxyContext, self).__call__() # Apache ports - a_neutron_api = determine_apache_port(api_port('neutron-server')) + a_neutron_api = determine_apache_port(api_port('neutron-server'), + singlenode_mode=True) port_mapping = { 'neutron-server': [ @@ -134,7 +136,9 @@ class HAProxyContext(context.HAProxyContext): } ctxt['neutron_bind_port'] = determine_api_port( - api_port('neutron-server')) + api_port('neutron-server'), + singlenode_mode=True, + ) # for haproxy.conf ctxt['service_ports'] = port_mapping diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 4179440a..7d369d9f 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -89,7 +89,7 @@ BASE_RESOURCE_MAP = OrderedDict([ 'services': ['apache2'], }), (HAPROXY_CONF, { - 'contexts': [context.HAProxyContext(), + 'contexts': [context.HAProxyContext(singlenode_mode=True), neutron_api_context.HAProxyContext()], 'services': ['haproxy'], }), From 45b503b2df0b4f261da900b4f3316c1c0047badf Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 08:52:09 +0000 Subject: [PATCH 087/125] Sync charmhelpers --- .../charmhelpers/contrib/hahelpers/cluster.py | 2 +- hooks/charmhelpers/contrib/network/ip.py | 2 +- .../charmhelpers/contrib/openstack/context.py | 3 +-- .../charmhelpers/contrib/openstack/neutron.py | 4 ++-- .../contrib/storage/linux/ceph.py | 13 +++++++---- .../charmhelpers/contrib/storage/linux/lvm.py | 1 + .../contrib/storage/linux/utils.py | 5 ++-- hooks/charmhelpers/core/hookenv.py | 18 ++++++++++----- hooks/charmhelpers/core/host.py | 16 ++++++++----- hooks/charmhelpers/fetch/__init__.py | 20 ++++++++-------- hooks/charmhelpers/fetch/archiveurl.py | 23 +++++++++++++------ 11 files changed, 64 insertions(+), 43 deletions(-) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 3e51986d..52ce4b7c 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -78,7 +78,7 @@ def is_crm_leader(resource): "show", resource ] try: - status = subprocess.check_output(cmd) + status = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False else: diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index b9a9815c..8dc83165 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -302,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 355e6e05..eebe8c03 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -5,7 +5,6 @@ from base64 import b64decode from subprocess import check_call import six -from six.moves import xrange from charmhelpers.fetch import ( apt_install, @@ -99,7 +98,7 @@ def config_flags_parser(config_flags): split = config_flags.strip(' =').split('=') limit = len(split) flags = {} - for i in xrange(0, limit - 1): + for i in range(0, limit - 1): current = split[i] next = split[i + 1] vindex = next.rfind(',') diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 8390d135..1446f637 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release def headers_package(): """Ensures correct linux-headers for running kernel are installed, for building DKMS package""" - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver QUANTUM_CONF_DIR = '/etc/quantum' @@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum' def kernel_version(): """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() kver = kver.split('.') return (int(kver[0]), int(kver[1])) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 5d907c02..d47dc228 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -65,7 +65,8 @@ def install(): def rbd_exists(service, pool, rbd_img): """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False @@ -82,7 +83,8 @@ def create_rbd_image(service, pool, image, sizemb): def pool_exists(service, name): """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False @@ -96,7 +98,8 @@ def get_osds(service): version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) + 'osd', 'ls', + '--format=json']).decode('UTF-8')) return None @@ -193,7 +196,7 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False @@ -361,7 +364,7 @@ def ceph_version(): """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 8ac7fecc..0aa65f4f 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device): vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() for l in pvd: + l = l.decode('UTF-8') if l.strip().startswith('VG Name'): vg = ' '.join(l.strip().split()[2:]) return vg diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 90623667..99e5d208 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -12,7 +12,10 @@ import sys from subprocess import CalledProcessError import six -from six.moves import UserDict +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict CRITICAL = "CRITICAL" ERROR = "ERROR" @@ -286,7 +289,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -305,7 +309,7 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None except CalledProcessError as e: @@ -337,7 +341,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -348,7 +353,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -457,7 +463,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index a3cb996b..e6783d9b 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -55,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -68,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -116,7 +120,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -314,7 +318,7 @@ def list_nics(nic_type): interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): @@ -336,7 +340,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -347,7 +351,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 4a27d2cc..0a126fc3 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -13,7 +13,10 @@ from charmhelpers.core.hookenv import ( import os import six -from six.moves.urllib.parse import urlparse, urlunparse +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse CLOUD_ARCHIVE = """# Ubuntu Cloud Archive @@ -68,16 +71,11 @@ CLOUD_ARCHIVE_POCKETS = { # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. -if six.PY2: - FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', - ) -else: - FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - ) +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 613ea90f..8a4624b2 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -3,12 +3,21 @@ import hashlib import re import six -from six.moves.urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, -) -from six.moves.urllib.parse import urlparse, urlunparse, parse_qs -from six.moves.urllib.error import URLError +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -125,7 +134,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): - if six.PY2: + if not six.PY3: algorithms = hashlib.algorithms else: algorithms = hashlib.algorithms_available From 588b3b7e3d29368034dffa56cbe8ef7d586d1073 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 26 Nov 2014 10:36:25 +0000 Subject: [PATCH 088/125] Fix charmhelper source and resync --- charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index c3bf4506..8af0007c 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~gnuoy/charm-helpers/haproxy-singlenode-mode +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From ab8682667e62f71a02c1995aa0dce354a61ca603 Mon Sep 17 00:00:00 2001 From: Jasper Aikema Date: Tue, 2 Dec 2014 21:55:33 +0100 Subject: [PATCH 089/125] neutron-api charms breaks when used with postgresql because there is no network-manager config variable in the charm. This fixes it --- hooks/neutron_api_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 8e3cc5ab..431a2da2 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -193,7 +193,7 @@ def db_changed(): def postgresql_neutron_db_changed(): plugin = config('neutron-plugin') # DB config might have been moved to main neutron.conf in H? - CONFIGS.write(neutron_plugin_attribute(plugin, 'config')) + CONFIGS.write(neutron_plugin_attribute(plugin, 'config', 'neutron')) @hooks.hook('amqp-relation-broken', From 0e724e9227a939bd6ac8ad0cba39b2bfa4cf25e1 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 10 Dec 2014 15:52:52 +0000 Subject: [PATCH 090/125] Add contrib.python.packages to charm-helpers sync. --- charm-helpers-sync.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 8af0007c..9b5e79e9 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -9,3 +9,4 @@ include: - contrib.storage.linux - payload.execd - contrib.network.ip + - contrib.python.packages From 879f7063f8e2551657812d1ca706bb43a07e27ac Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 10 Dec 2014 20:28:52 +0000 Subject: [PATCH 091/125] Sync charm-helpers. --- .../charmhelpers/contrib/hahelpers/cluster.py | 23 +- hooks/charmhelpers/contrib/network/ip.py | 102 ++-- .../contrib/openstack/amulet/deployment.py | 3 +- .../contrib/openstack/amulet/utils.py | 4 +- .../charmhelpers/contrib/openstack/context.py | 545 ++++++++++-------- hooks/charmhelpers/contrib/openstack/ip.py | 68 ++- .../charmhelpers/contrib/openstack/neutron.py | 24 +- .../contrib/openstack/templates/haproxy.cfg | 4 +- .../contrib/openstack/templating.py | 10 +- hooks/charmhelpers/contrib/openstack/utils.py | 159 ++++- hooks/charmhelpers/contrib/python/__init__.py | 0 hooks/charmhelpers/contrib/python/packages.py | 77 +++ .../contrib/storage/linux/ceph.py | 187 +++--- .../contrib/storage/linux/loopback.py | 8 +- .../charmhelpers/contrib/storage/linux/lvm.py | 1 + .../contrib/storage/linux/utils.py | 5 +- hooks/charmhelpers/core/fstab.py | 18 +- hooks/charmhelpers/core/hookenv.py | 38 +- hooks/charmhelpers/core/host.py | 71 ++- hooks/charmhelpers/core/services/__init__.py | 4 +- hooks/charmhelpers/core/services/helpers.py | 12 +- hooks/charmhelpers/core/templating.py | 3 +- hooks/charmhelpers/fetch/__init__.py | 30 +- hooks/charmhelpers/fetch/archiveurl.py | 69 ++- hooks/charmhelpers/fetch/bzrurl.py | 6 +- hooks/charmhelpers/fetch/giturl.py | 51 ++ 26 files changed, 1005 insertions(+), 517 deletions(-) create mode 100644 hooks/charmhelpers/contrib/python/__init__.py create mode 100644 hooks/charmhelpers/contrib/python/packages.py create mode 100644 hooks/charmhelpers/fetch/giturl.py diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 6d972007..52ce4b7c 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,9 +13,10 @@ clustering-related helpers. import subprocess import os - from socket import gethostname as get_unit_hostname +import six + from charmhelpers.core.hookenv import ( log, relation_ids, @@ -77,7 +78,7 @@ def is_crm_leader(resource): "show", resource ] try: - status = subprocess.check_output(cmd) + status = subprocess.check_output(cmd).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -150,34 +151,42 @@ def https(): return False -def determine_api_port(public_port): +def determine_api_port(public_port, singlenode_mode=False): ''' Determine correct API server listening port based on existence of HTTPS reverse proxy and/or haproxy. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the API service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 if https(): i += 1 return public_port - (i * 10) -def determine_apache_port(public_port): +def determine_apache_port(public_port, singlenode_mode=False): ''' Description: Determine correct apache listening port based on public IP + state of the cluster. public_port: int: standard public port for given service + singlenode_mode: boolean: Shuffle ports when only a single unit is present + returns: int: the correct listening port for the HAProxy service ''' i = 0 - if len(peer_units()) > 0 or is_clustered(): + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): i += 1 return public_port - (i * 10) @@ -197,7 +206,7 @@ def get_hacluster_config(): for setting in settings: conf[setting] = config_get(setting) missing = [] - [missing.append(s) for s, v in conf.iteritems() if v is None] + [missing.append(s) for s, v in six.iteritems(conf) if v is None] if missing: log('Insufficient config data to configure hacluster.', level=ERROR) raise HAIncompleteConfig diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index e62e5655..8dc83165 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,15 +1,12 @@ import glob import re import subprocess -import sys from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - WARNING, - ERROR, log ) @@ -34,31 +31,28 @@ def _validate_cidr(network): network) +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network: %s" % network) + raise ValueError(errmsg) + + def get_address_in_network(network, fallback=None, fatal=False): - """ - Get an IPv4 or IPv6 address within the network from the host. + """Get an IPv4 or IPv6 address within the network from the host. :param network (str): CIDR presentation format. For example, '192.168.1.0/24'. :param fallback (str): If no address is found, return fallback. :param fatal (boolean): If no address is found, fallback is not set and fatal is True then exit(1). - """ - - def not_found_error_out(): - log("No IP address found in network: %s" % network, - level=ERROR) - sys.exit(1) - if network is None: if fallback is not None: return fallback + + if fatal: + no_ip_found_error_out(network) else: - if fatal: - not_found_error_out() - else: - return None + return None _validate_cidr(network) network = netaddr.IPNetwork(network) @@ -70,6 +64,7 @@ def get_address_in_network(network, fallback=None, fatal=False): cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) if cidr in network: return str(cidr.ip) + if network.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -82,20 +77,20 @@ def get_address_in_network(network, fallback=None, fatal=False): return fallback if fatal: - not_found_error_out() + no_ip_found_error_out(network) return None def is_ipv6(address): - '''Determine whether provided address is IPv6 or not''' + """Determine whether provided address is IPv6 or not.""" try: address = netaddr.IPAddress(address) except netaddr.AddrFormatError: # probably a hostname - so not an address at all! return False - else: - return address.version == 6 + + return address.version == 6 def is_address_in_network(network, address): @@ -113,11 +108,13 @@ def is_address_in_network(network, address): except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Network (%s) is not in CIDR presentation format" % network) + try: address = netaddr.IPAddress(address) except (netaddr.core.AddrFormatError, ValueError): raise ValueError("Address (%s) is not in correct presentation format" % address) + if address in network: return True else: @@ -147,6 +144,7 @@ def _get_for_address(address, key): return iface else: return addresses[netifaces.AF_INET][0][key] + if address.version == 6 and netifaces.AF_INET6 in addresses: for addr in addresses[netifaces.AF_INET6]: if not addr['addr'].startswith('fe80'): @@ -160,41 +158,42 @@ def _get_for_address(address, key): return str(cidr).split('/')[1] else: return addr[key] + return None get_iface_for_address = partial(_get_for_address, key='iface') + get_netmask_for_address = partial(_get_for_address, key='netmask') def format_ipv6_addr(address): - """ - IPv6 needs to be wrapped with [] in url link to parse correctly. + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. """ if is_ipv6(address): - address = "[%s]" % address - else: - log("Not a valid ipv6 address: %s" % address, level=WARNING) - address = None + return "[%s]" % address - return address + return None def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): - """ - Return the assigned IP address for a given interface, if any, or []. - """ + """Return the assigned IP address for a given interface, if any.""" # Extract nic if passed /dev/ethX if '/' in iface: iface = iface.split('/')[-1] + if not exc_list: exc_list = [] + try: inet_num = getattr(netifaces, inet_type) except AttributeError: - raise Exception('Unknown inet type ' + str(inet_type)) + raise Exception("Unknown inet type '%s'" % str(inet_type)) interfaces = netifaces.interfaces() if inc_aliases: @@ -202,15 +201,18 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for _iface in interfaces: if iface == _iface or _iface.split(':')[0] == iface: ifaces.append(_iface) + if fatal and not ifaces: raise Exception("Invalid interface '%s'" % iface) + ifaces.sort() else: if iface not in interfaces: if fatal: - raise Exception("%s not found " % (iface)) + raise Exception("Interface '%s' not found " % (iface)) else: return [] + else: ifaces = [iface] @@ -221,10 +223,13 @@ def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, for entry in net_info[inet_num]: if 'addr' in entry and entry['addr'] not in exc_list: addresses.append(entry['addr']) + if fatal and not addresses: raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) - return addresses + + return sorted(addresses) + get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') @@ -241,6 +246,7 @@ def get_iface_from_addr(addr): raw = re.match(ll_key, _addr) if raw: _addr = raw.group(1) + if _addr == addr: log("Address '%s' is configured on iface '%s'" % (addr, iface)) @@ -251,8 +257,9 @@ def get_iface_from_addr(addr): def sniff_iface(f): - """If no iface provided, inject net iface inferred from unit private - address. + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. """ def iface_sniffer(*args, **kwargs): if not kwargs.get('iface', None): @@ -295,7 +302,7 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, if global_addrs: # Make sure any found global addresses are not temporary cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd) + out = subprocess.check_output(cmd).decode('UTF-8') if dynamic_only: key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") else: @@ -317,33 +324,28 @@ def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, return addrs if fatal: - raise Exception("Interface '%s' doesn't have a scope global " + raise Exception("Interface '%s' does not have a scope global " "non-temporary ipv6 address." % iface) return [] def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of bridges on the system or [] - """ - b_rgex = vnic_dir + '/*/bridge' - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """ - Return a list of nics comprising a given bridge on the system or [] - """ - brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_rgex)] + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] def is_bridge_member(nic): - """ - Check if a given nic is a member of a bridge - """ + """Check if a given nic is a member of a bridge.""" for bridge in get_bridges(): if nic in get_bridge_nics(bridge): return True + return False diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 3c7f422a..f3fee074 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,4 @@ +import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment ) @@ -69,7 +70,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): def _configure_services(self, configs): """Configure all of the services.""" - for service, config in configs.iteritems(): + for service, config in six.iteritems(configs): self.d.configure(service, config) def _get_openstack_release(self): diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 0f312b99..3e0cc61c 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -7,6 +7,8 @@ import glanceclient.v1.client as glance_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import six + from charmhelpers.contrib.amulet.utils import ( AmuletUtils ) @@ -60,7 +62,7 @@ class OpenStackAmuletUtils(AmuletUtils): expected service catalog endpoints. """ self.log.debug('actual: {}'.format(repr(actual))) - for k, v in expected.iteritems(): + for k, v in six.iteritems(expected): if k in actual: ret = self._validate_dict_data(expected[k][0], actual[k][0]) if ret: diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 538dc913..eebe8c03 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,20 +1,18 @@ import json import os import time - from base64 import b64decode +from subprocess import check_call -from subprocess import ( - check_call -) +import six from charmhelpers.fetch import ( apt_install, filter_installed_packages, ) - from charmhelpers.core.hookenv import ( config, + is_relation_made, local_unit, log, relation_get, @@ -23,43 +21,40 @@ from charmhelpers.core.hookenv import ( relation_set, unit_get, unit_private_ip, + DEBUG, + INFO, + WARNING, ERROR, - INFO ) - from charmhelpers.core.host import ( mkdir, - write_file + write_file, ) - from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, https, - is_clustered + is_clustered, ) - from charmhelpers.contrib.hahelpers.apache import ( get_cert, get_ca_cert, install_ca_cert, ) - from charmhelpers.contrib.openstack.neutron import ( neutron_plugin_attribute, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, get_ipv6_addr, get_netmask_for_address, format_ipv6_addr, - is_address_in_network + is_address_in_network, ) - from charmhelpers.contrib.openstack.utils import get_host_ip CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] class OSContextError(Exception): @@ -67,7 +62,7 @@ class OSContextError(Exception): def ensure_packages(packages): - '''Install but do not upgrade required plugin packages''' + """Install but do not upgrade required plugin packages.""" required = filter_installed_packages(packages) if required: apt_install(required, fatal=True) @@ -75,20 +70,27 @@ def ensure_packages(packages): def context_complete(ctxt): _missing = [] - for k, v in ctxt.iteritems(): + for k, v in six.iteritems(ctxt): if v is None or v == '': _missing.append(k) + if _missing: - log('Missing required data: %s' % ' '.join(_missing), level='INFO') + log('Missing required data: %s' % ' '.join(_missing), level=INFO) return False + return True def config_flags_parser(config_flags): + """Parses config flags string into dict. + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", - level=ERROR) + log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError + # strip the following from each value. post_strippers = ' ,' # we strip any leading/trailing '=' or ' ' from the string then @@ -96,7 +98,7 @@ def config_flags_parser(config_flags): split = config_flags.strip(' =').split('=') limit = len(split) flags = {} - for i in xrange(0, limit - 1): + for i in range(0, limit - 1): current = split[i] next = split[i + 1] vindex = next.rfind(',') @@ -111,17 +113,18 @@ def config_flags_parser(config_flags): # if this not the first entry, expect an embedded key. index = current.rfind(',') if index < 0: - log("invalid config value(s) at index %s" % (i), - level=ERROR) + log("Invalid config value(s) at index %s" % (i), level=ERROR) raise OSContextError key = current[index + 1:] # Add to collection. flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + return flags class OSContextGenerator(object): + """Base class for all context generators.""" interfaces = [] def __call__(self): @@ -133,11 +136,11 @@ class SharedDBContext(OSContextGenerator): def __init__(self, database=None, user=None, relation_prefix=None, ssl_dir=None): - ''' - Allows inspecting relation for settings prefixed with relation_prefix. - This is useful for parsing access for multiple databases returned via - the shared-db interface (eg, nova_password, quantum_password) - ''' + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ self.relation_prefix = relation_prefix self.database = database self.user = user @@ -147,9 +150,8 @@ class SharedDBContext(OSContextGenerator): self.database = self.database or config('database') self.user = self.user or config('database-user') if None in [self.database, self.user]: - log('Could not generate shared_db context. ' - 'Missing required charm config options. ' - '(database name and user)') + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) raise OSContextError ctxt = {} @@ -202,23 +204,24 @@ class PostgresqlDBContext(OSContextGenerator): def __call__(self): self.database = self.database or config('database') if self.database is None: - log('Could not generate postgresql_db context. ' - 'Missing required charm config options. ' - '(database name)') + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) raise OSContextError - ctxt = {} + ctxt = {} for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): - ctxt = { - 'database_host': relation_get('host', rid=rid, unit=unit), - 'database': self.database, - 'database_user': relation_get('user', rid=rid, unit=unit), - 'database_password': relation_get('password', rid=rid, unit=unit), - 'database_type': 'postgresql', - } + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} if context_complete(ctxt): return ctxt + return {} @@ -227,23 +230,29 @@ def db_ssl(rdata, ctxt, ssl_dir): ca_path = os.path.join(ssl_dir, 'db-client.ca') with open(ca_path, 'w') as fh: fh.write(b64decode(rdata['ssl_ca'])) + ctxt['database_ssl_ca'] = ca_path elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found") + log("Charm not setup for ssl support but ssl ca found", level=INFO) return ctxt + if 'ssl_cert' in rdata: cert_path = os.path.join( ssl_dir, 'db-client.cert') if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity") + log("Waiting 1m for ssl client cert validity", level=INFO) time.sleep(60) + with open(cert_path, 'w') as fh: fh.write(b64decode(rdata['ssl_cert'])) + ctxt['database_ssl_cert'] = cert_path key_path = os.path.join(ssl_dir, 'db-client.key') with open(key_path, 'w') as fh: fh.write(b64decode(rdata['ssl_key'])) + ctxt['database_ssl_key'] = key_path + return ctxt @@ -251,9 +260,8 @@ class IdentityServiceContext(OSContextGenerator): interfaces = ['identity-service'] def __call__(self): - log('Generating template context for identity-service') + log('Generating template context for identity-service', level=DEBUG) ctxt = {} - for rid in relation_ids('identity-service'): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) @@ -261,26 +269,24 @@ class IdentityServiceContext(OSContextGenerator): serv_host = format_ipv6_addr(serv_host) or serv_host auth_host = rdata.get('auth_host') auth_host = format_ipv6_addr(auth_host) or auth_host - - ctxt = { - 'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - } + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + ctxt = {'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol} if context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') return ctxt + return {} @@ -293,21 +299,23 @@ class AMQPContext(OSContextGenerator): self.interfaces = [rel_name] def __call__(self): - log('Generating template context for amqp') + log('Generating template context for amqp', level=DEBUG) conf = config() - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' if self.relation_prefix: - user_setting = self.relation_prefix + '-rabbit-user' - vhost_setting = self.relation_prefix + '-rabbit-vhost' + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' try: username = conf[user_setting] vhost = conf[vhost_setting] except KeyError as e: - log('Could not generate shared_db context. ' - 'Missing required charm config options: %s.' % e) + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) raise OSContextError + ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False @@ -321,6 +329,7 @@ class AMQPContext(OSContextGenerator): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host ctxt['rabbitmq_host'] = host + ctxt.update({ 'rabbitmq_user': username, 'rabbitmq_password': relation_get('password', rid=rid, @@ -331,6 +340,7 @@ class AMQPContext(OSContextGenerator): ssl_port = relation_get('ssl_port', rid=rid, unit=unit) if ssl_port: ctxt['rabbit_ssl_port'] = ssl_port + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) if ssl_ca: ctxt['rabbit_ssl_ca'] = ssl_ca @@ -344,41 +354,45 @@ class AMQPContext(OSContextGenerator): if context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: - log(("Charm not setup for ssl support " - "but ssl ca found")) + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) break + ca_path = os.path.join( self.ssl_dir, 'rabbit-client-ca.pem') with open(ca_path, 'w') as fh: fh.write(b64decode(ctxt['rabbit_ssl_ca'])) ctxt['rabbit_ssl_ca'] = ca_path + # Sufficient information found = break out! break + # Used for active/active rabbitmq >= grizzly - if ('clustered' not in ctxt or ha_vip_only) \ - and len(related_units(rid)) > 1: + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): rabbitmq_hosts = [] for unit in related_units(rid): host = relation_get('private-address', rid=rid, unit=unit) host = format_ipv6_addr(host) or host rabbitmq_hosts.append(host) - ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) + if not context_complete(ctxt): return {} - else: - return ctxt + + return ctxt class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" interfaces = ['ceph'] def __call__(self): - '''This generates context for /etc/ceph/ceph.conf templates''' if not relation_ids('ceph'): return {} - log('Generating template context for ceph') - + log('Generating template context for ceph', level=DEBUG) mon_hosts = [] auth = None key = None @@ -387,18 +401,18 @@ class CephContext(OSContextGenerator): for unit in related_units(rid): auth = relation_get('auth', rid=rid, unit=unit) key = relation_get('key', rid=rid, unit=unit) - ceph_addr = \ - relation_get('ceph-public-address', rid=rid, unit=unit) or \ - relation_get('private-address', rid=rid, unit=unit) + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, + unit=unit) + unit_priv_addr = relation_get('private-address', rid=rid, + unit=unit) + ceph_addr = ceph_pub_addr or unit_priv_addr ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = { - 'mon_hosts': ' '.join(mon_hosts), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog - } + ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), + 'auth': auth, + 'key': key, + 'use_syslog': use_syslog} if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') @@ -407,79 +421,68 @@ class CephContext(OSContextGenerator): return {} ensure_packages(['ceph-common']) - return ctxt -ADDRESS_TYPES = ['admin', 'internal', 'public'] - - class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + """ interfaces = ['cluster'] - def __call__(self): - ''' - Builds half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - ''' - if not relation_ids('cluster'): - return {} + def __init__(self, singlenode_mode=False): + self.singlenode_mode = singlenode_mode - l_unit = local_unit().replace('/', '-') + def __call__(self): + if not relation_ids('cluster') and not self.singlenode_mode: + return {} if config('prefer-ipv6'): addr = get_ipv6_addr(exc_list=[config('vip')])[0] else: addr = get_host_ip(unit_get('private-address')) + l_unit = local_unit().replace('/', '-') cluster_hosts = {} # NOTE(jamespage): build out map of configured network endpoints # and associated backends for addr_type in ADDRESS_TYPES: - laddr = get_address_in_network( - config('os-{}-network'.format(addr_type))) + cfg_opt = 'os-{}-network'.format(addr_type) + laddr = get_address_in_network(config(cfg_opt)) if laddr: - cluster_hosts[laddr] = {} - cluster_hosts[laddr]['network'] = "{}/{}".format( - laddr, - get_netmask_for_address(laddr) - ) - cluster_hosts[laddr]['backends'] = {} - cluster_hosts[laddr]['backends'][l_unit] = laddr + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, + netmask), + 'backends': {l_unit: laddr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('{}-address'.format(addr_type), rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr # NOTE(jamespage) no split configurations found, just use # private addresses if not cluster_hosts: - cluster_hosts[addr] = {} - cluster_hosts[addr]['network'] = "{}/{}".format( - addr, - get_netmask_for_address(addr) - ) - cluster_hosts[addr]['backends'] = {} - cluster_hosts[addr]['backends'][l_unit] = addr + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} for rid in relation_ids('cluster'): for unit in related_units(rid): - _unit = unit.replace('/', '-') _laddr = relation_get('private-address', rid=rid, unit=unit) if _laddr: + _unit = unit.replace('/', '-') cluster_hosts[addr]['backends'][_unit] = _laddr - ctxt = { - 'frontends': cluster_hosts, - } + ctxt = {'frontends': cluster_hosts} if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + if config('haproxy-client-timeout'): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') @@ -493,13 +496,18 @@ class HAProxyContext(OSContextGenerator): ctxt['stat_port'] = ':8888' for frontend in cluster_hosts: - if len(cluster_hosts[frontend]['backends']) > 1: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.') + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) with open('/etc/default/haproxy', 'w') as out: out.write('ENABLED=1\n') + return ctxt - log('HAProxy context is incomplete, this unit has no peers.') + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) return {} @@ -507,29 +515,28 @@ class ImageServiceContext(OSContextGenerator): interfaces = ['image-service'] def __call__(self): - ''' - Obtains the glance API server from the image-service relation. Useful - in nova and cinder (currently). - ''' - log('Generating template context for image-service.') + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) rids = relation_ids('image-service') if not rids: return {} + for rid in rids: for unit in related_units(rid): api_server = relation_get('glance-api-server', rid=rid, unit=unit) if api_server: return {'glance_api_servers': api_server} - log('ImageService context is incomplete. ' - 'Missing required relation data.') + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) return {} class ApacheSSLContext(OSContextGenerator): - - """ - Generates a context for an apache vhost configuration that configures + """Generates a context for an apache vhost configuration that configures HTTPS reverse proxying for one or many endpoints. Generated context looks something like:: @@ -563,6 +570,7 @@ class ApacheSSLContext(OSContextGenerator): else: cert_filename = 'cert' key_filename = 'key' + write_file(path=os.path.join(ssl_dir, cert_filename), content=b64decode(cert)) write_file(path=os.path.join(ssl_dir, key_filename), @@ -574,7 +582,8 @@ class ApacheSSLContext(OSContextGenerator): install_ca_cert(b64decode(ca_cert)) def canonical_names(self): - '''Figure out which canonical names clients will access this service''' + """Figure out which canonical names clients will access this service. + """ cns = [] for r_id in relation_ids('identity-service'): for unit in related_units(r_id): @@ -582,55 +591,80 @@ class ApacheSSLContext(OSContextGenerator): for k in rdata: if k.startswith('ssl_key_'): cns.append(k.lstrip('ssl_key_')) - return list(set(cns)) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and vip + (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + if config('vip'): + vips = config('vip').split() + else: + vips = [] + + for net_type in ['os-internal-network', 'os-admin-network', + 'os-public-network']: + addr = get_address_in_network(config(net_type), + unit_get('private-address')) + if len(vips) > 1 and is_clustered(): + if not config(net_type): + log("Multiple networks configured but net_type " + "is None (%s)." % net_type, level=WARNING) + continue + + for vip in vips: + if is_address_in_network(config(net_type), vip): + addresses.append((addr, vip)) + break + + elif is_clustered() and config('vip'): + addresses.append((addr, config('vip'))) + else: + addresses.append((addr, addr)) + + return sorted(addresses) def __call__(self): - if isinstance(self.external_ports, basestring): + if isinstance(self.external_ports, six.string_types): self.external_ports = [self.external_ports] - if (not self.external_ports or not https()): + + if not self.external_ports or not https(): return {} self.configure_ca() self.enable_modules() - ctxt = { - 'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': [] - } + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} for cn in self.canonical_names(): self.configure_cert(cn) - addresses = [] - vips = [] - if config('vip'): - vips = config('vip').split() - - for network_type in ['os-internal-network', - 'os-admin-network', - 'os-public-network']: - address = get_address_in_network(config(network_type), - unit_get('private-address')) - if len(vips) > 0 and is_clustered(): - for vip in vips: - if is_address_in_network(config(network_type), - vip): - addresses.append((address, vip)) - break - elif is_clustered(): - addresses.append((address, config('vip'))) - else: - addresses.append((address, address)) - - for address, endpoint in set(addresses): + addresses = self.get_network_addresses() + for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: ext_port = determine_apache_port(api_port) int_port = determine_api_port(api_port) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) - ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) return ctxt @@ -647,21 +681,23 @@ class NeutronContext(OSContextGenerator): @property def packages(self): - return neutron_plugin_attribute( - self.plugin, 'packages', self.network_manager) + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) @property def neutron_security_groups(self): return None def _ensure_packages(self): - [ensure_packages(pkgs) for pkgs in self.packages] + for pkgs in self.packages: + ensure_packages(pkgs) def _save_flag_file(self): if self.network_manager == 'quantum': _file = '/etc/nova/quantum_plugin.conf' else: _file = '/etc/nova/neutron_plugin.conf' + with open(_file, 'wb') as out: out.write(self.plugin + '\n') @@ -670,13 +706,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - ovs_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return ovs_ctxt @@ -685,13 +719,11 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - nvp_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config - } + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} return nvp_ctxt @@ -700,35 +732,50 @@ class NeutronContext(OSContextGenerator): self.network_manager) n1kv_config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - n1kv_ctxt = { - 'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': config( - 'n1kv_restrict_policy_profiles'), - } + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags return n1kv_ctxt + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + def neutron_ctxt(self): if https(): proto = 'https' else: proto = 'http' + if is_clustered(): host = config('vip') else: host = unit_get('private-address') - url = '%s://%s:%s' % (proto, host, '9696') - ctxt = { - 'network_manager': self.network_manager, - 'neutron_url': url, - } + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt def __call__(self): @@ -748,6 +795,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.nvp_ctxt()) elif self.plugin == 'n1kv': ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -759,23 +808,40 @@ class NeutronContext(OSContextGenerator): class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. - """ - Responsible for adding user-defined config-flags in charm config to a - template context. + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. NOTE: the value of config-flags may be a comma-separated list of key=value pairs and some Openstack config files support comma-separated lists as values. """ + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + def __call__(self): - config_flags = config('config-flags') + config_flags = config(self._charm_flag) if not config_flags: return {} - flags = config_flags_parser(config_flags) - return {'user_config_flags': flags} + return {self._template_flag: + config_flags_parser(config_flags)} class SubordinateConfigContext(OSContextGenerator): @@ -819,7 +885,6 @@ class SubordinateConfigContext(OSContextGenerator): }, } } - """ def __init__(self, service, config_file, interface): @@ -849,26 +914,28 @@ class SubordinateConfigContext(OSContextGenerator): if self.service not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service)) + 'nothing for %s service' % (rid, self.service), + level=INFO) continue sub_config = sub_config[self.service] if self.config_file not in sub_config: log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file)) + 'nothing for %s' % (rid, self.config_file), + level=INFO) continue sub_config = sub_config[self.config_file] - for k, v in sub_config.iteritems(): + for k, v in six.iteritems(sub_config): if k == 'sections': - for section, config_dict in v.iteritems(): - log("adding section '%s'" % (section)) + for section, config_dict in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) ctxt[k][section] = config_dict else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) - + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -880,15 +947,14 @@ class LogLevelContext(OSContextGenerator): False if config('debug') is None else config('debug') ctxt['verbose'] = \ False if config('verbose') is None else config('verbose') + return ctxt class SyslogContext(OSContextGenerator): def __call__(self): - ctxt = { - 'use_syslog': config('use-syslog') - } + ctxt = {'use_syslog': config('use-syslog')} return ctxt @@ -896,13 +962,9 @@ class BindHostContext(OSContextGenerator): def __call__(self): if config('prefer-ipv6'): - return { - 'bind_host': '::' - } + return {'bind_host': '::'} else: - return { - 'bind_host': '0.0.0.0' - } + return {'bind_host': '0.0.0.0'} class WorkerConfigContext(OSContextGenerator): @@ -914,11 +976,42 @@ class WorkerConfigContext(OSContextGenerator): except ImportError: apt_install('python-psutil', fatal=True) from psutil import NUM_CPUS + return NUM_CPUS def __call__(self): - multiplier = config('worker-multiplier') or 1 - ctxt = { - "workers": self.num_cpus * multiplier - } + multiplier = config('worker-multiplier') or 0 + ctxt = {"workers": self.num_cpus * multiplier} + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index bc84fc45..f062c807 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -2,21 +2,19 @@ from charmhelpers.core.hookenv import ( config, unit_get, ) - from charmhelpers.contrib.network.ip import ( get_address_in_network, is_address_in_network, is_ipv6, get_ipv6_addr, ) - from charmhelpers.contrib.hahelpers.cluster import is_clustered PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' -_address_map = { +ADDRESS_MAP = { PUBLIC: { 'config': 'os-public-network', 'fallback': 'public-address' @@ -33,16 +31,14 @@ _address_map = { def canonical_url(configs, endpoint_type=PUBLIC): - ''' - Returns the correct HTTP URL to this host given the state of HTTPS + """Returns the correct HTTP URL to this host given the state of HTTPS configuration, hacluster and charm configuration. - :configs OSTemplateRenderer: A config tempating object to inspect for - a complete https context. - :endpoint_type str: The endpoint type to resolve. - - :returns str: Base URL for services on the current service unit. - ''' + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ scheme = 'http' if 'https' in configs.complete_contexts(): scheme = 'https' @@ -53,27 +49,45 @@ def canonical_url(configs, endpoint_type=PUBLIC): def resolve_address(endpoint_type=PUBLIC): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured. + + :param endpoint_type: Network endpoing type + """ resolved_address = None - if is_clustered(): - if config(_address_map[endpoint_type]['config']) is None: - # Assume vip is simple and pass back directly - resolved_address = config('vip') + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + clustered = is_clustered() + if clustered: + if not net_addr: + # If no net-splits defined, we expect a single vip + resolved_address = vips[0] else: - for vip in config('vip').split(): - if is_address_in_network( - config(_address_map[endpoint_type]['config']), - vip): + for vip in vips: + if is_address_in_network(net_addr, vip): resolved_address = vip + break else: if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] + fallback_addr = get_ipv6_addr(exc_list=vips)[0] else: - fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) - resolved_address = get_address_in_network( - config(_address_map[endpoint_type]['config']), fallback_addr) + fallback_addr = unit_get(net_fallback) + + resolved_address = get_address_in_network(net_addr, fallback_addr) if resolved_address is None: - raise ValueError('Unable to resolve a suitable IP address' - ' based on charm state and configuration') - else: - return resolved_address + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 84d97bca..1446f637 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -14,7 +14,7 @@ from charmhelpers.contrib.openstack.utils import os_release def headers_package(): """Ensures correct linux-headers for running kernel are installed, for building DKMS package""" - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() return 'linux-headers-%s' % kver QUANTUM_CONF_DIR = '/etc/quantum' @@ -22,7 +22,7 @@ QUANTUM_CONF_DIR = '/etc/quantum' def kernel_version(): """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).strip() + kver = check_output(['uname', '-r']).decode('UTF-8').strip() kver = kver.split('.') return (int(kver[0]), int(kver[1])) @@ -138,10 +138,25 @@ def neutron_plugins(): relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], 'services': [], - 'packages': [['neutron-plugin-cisco']], + 'packages': [[headers_package()] + determine_dkms_package(), + ['neutron-plugin-cisco']], 'server_packages': ['neutron-server', 'neutron-plugin-cisco'], 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [ + context.SharedDBContext(user=config('neutron-database-user'), + database=config('neutron-database'), + relation_prefix='neutron', + ssl_dir=NEUTRON_CONF_DIR)], + 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'packages': [[headers_package()] + determine_dkms_package(), + ['calico-compute', 'bird', 'neutron-dhcp-agent']], + 'server_packages': ['neutron-server', 'calico-control'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -162,7 +177,8 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None): elif manager == 'neutron': plugins = neutron_plugins() else: - log('Error: Network manager does not support plugins.') + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) raise Exception try: diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 19c9b856..0229f9d4 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -35,7 +35,7 @@ listen stats {{ stat_port }} stats auth admin:password {% if frontends -%} -{% for service, ports in service_ports.iteritems() -%} +{% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} bind :::{{ ports[0] }} @@ -46,7 +46,7 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn - {% for unit, address in frontends[frontend]['backends'].iteritems() -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} server {{ unit }} {{ address }}:{{ ports[1] }} check {% endfor %} {% endfor -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index f5442712..33df0675 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,13 +1,13 @@ import os -from charmhelpers.fetch import apt_install +import six +from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( log, ERROR, INFO ) - from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: @@ -43,7 +43,7 @@ def get_loader(templates_dir, os_release): order by OpenStack release. """ tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in OPENSTACK_CODENAMES.itervalues()] + for rel in six.itervalues(OPENSTACK_CODENAMES)] if not os.path.isdir(templates_dir): log('Templates directory not found @ %s.' % templates_dir, @@ -258,7 +258,7 @@ class OSConfigRenderer(object): """ Write out all registered config files. """ - [self.write(k) for k in self.templates.iterkeys()] + [self.write(k) for k in six.iterkeys(self.templates)] def set_release(self, openstack_release): """ @@ -275,5 +275,5 @@ class OSConfigRenderer(object): ''' interfaces = [] [interfaces.extend(i.complete_contexts()) - for i in self.templates.itervalues()] + for i in six.itervalues(self.templates)] return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index b0d1b03a..44179679 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -2,6 +2,7 @@ # Common python helper functions used for OpenStack charms. from collections import OrderedDict +from functools import wraps import subprocess import json @@ -9,11 +10,13 @@ import os import socket import sys +import six +import yaml + from charmhelpers.core.hookenv import ( config, log as juju_log, charm_dir, - ERROR, INFO, relation_ids, relation_set @@ -30,7 +33,8 @@ from charmhelpers.contrib.network.ip import ( ) from charmhelpers.core.host import lsb_release, mounts, umount -from charmhelpers.fetch import apt_install, apt_cache +from charmhelpers.fetch import apt_install, apt_cache, install_remote +from charmhelpers.contrib.python.packages import pip_install from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -112,7 +116,7 @@ def get_os_codename_install_source(src): # Best guess match based on deb string provided if src.startswith('deb') or src.startswith('ppa'): - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v in src: return v @@ -133,7 +137,7 @@ def get_os_codename_version(vers): def get_os_version_codename(codename): '''Determine OpenStack version number from codename.''' - for k, v in OPENSTACK_CODENAMES.iteritems(): + for k, v in six.iteritems(OPENSTACK_CODENAMES): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -193,7 +197,7 @@ def get_os_version_package(pkg, fatal=True): else: vers_map = OPENSTACK_CODENAMES - for version, cname in vers_map.iteritems(): + for version, cname in six.iteritems(vers_map): if cname == codename: return version # e = "Could not determine OpenStack version for package: %s" % pkg @@ -317,7 +321,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars): rc_script.write( "#!/bin/bash\n") [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in env_vars.iteritems() if u != "script_path"] + for u, p in six.iteritems(env_vars) if u != "script_path"] def openstack_upgrade_available(package): @@ -350,8 +354,8 @@ def ensure_block_device(block_device): ''' _none = ['None', 'none', None] if (block_device in _none): - error_out('prepare_storage(): Missing required input: ' - 'block_device=%s.' % block_device, level=ERROR) + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) if block_device.startswith('/dev/'): bdev = block_device @@ -367,8 +371,7 @@ def ensure_block_device(block_device): bdev = '/dev/%s' % block_device if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev, - level=ERROR) + error_out('Failed to locate valid block device at %s' % bdev) return bdev @@ -417,7 +420,7 @@ def ns_query(address): if isinstance(address, dns.name.Name): rtype = 'PTR' - elif isinstance(address, basestring): + elif isinstance(address, six.string_types): rtype = 'A' else: return None @@ -468,6 +471,14 @@ def get_hostname(address, fqdn=True): return result.split('.')[0] +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + def sync_db_with_multi_ipv6_addresses(database, database_user, relation_prefix=None): hosts = get_ipv6_addr(dynamic_only=False) @@ -477,10 +488,132 @@ def sync_db_with_multi_ipv6_addresses(database, database_user, 'hostname': json.dumps(hosts)} if relation_prefix: - keys = kwargs.keys() - for key in keys: + for key in list(kwargs.keys()): kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] del kwargs[key] for rid in relation_ids('shared-db'): relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def git_install_requested(): + """Returns true if openstack-origin-git is specified.""" + return config('openstack-origin-git') != "None" + + +requirements_dir = None + + +def git_clone_and_install(file_name, core_project): + """Clone/install all OpenStack repos specified in yaml config file.""" + global requirements_dir + + if file_name == "None": + return + + yaml_file = os.path.join(charm_dir(), file_name) + + # clone/install the requirements project first + installed = _git_clone_and_install_subset(yaml_file, + whitelist=['requirements']) + if 'requirements' not in installed: + error_out('requirements git repository must be specified') + + # clone/install all other projects except requirements and the core project + blacklist = ['requirements', core_project] + _git_clone_and_install_subset(yaml_file, blacklist=blacklist, + update_requirements=True) + + # clone/install the core project + whitelist = [core_project] + installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist, + update_requirements=True) + if core_project not in installed: + error_out('{} git repository must be specified'.format(core_project)) + + +def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[], + update_requirements=False): + """Clone/install subset of OpenStack repos specified in yaml config file.""" + global requirements_dir + installed = [] + + with open(yaml_file, 'r') as fd: + projects = yaml.load(fd) + for proj, val in projects.items(): + # The project subset is chosen based on the following 3 rules: + # 1) If project is in blacklist, we don't clone/install it, period. + # 2) If whitelist is empty, we clone/install everything else. + # 3) If whitelist is not empty, we clone/install everything in the + # whitelist. + if proj in blacklist: + continue + if whitelist and proj not in whitelist: + continue + repo = val['repository'] + branch = val['branch'] + repo_dir = _git_clone_and_install_single(repo, branch, + update_requirements) + if proj == 'requirements': + requirements_dir = repo_dir + installed.append(proj) + return installed + + +def _git_clone_and_install_single(repo, branch, update_requirements=False): + """Clone and install a single git repository.""" + dest_parent_dir = "/mnt/openstack-git/" + dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) + + if not os.path.exists(dest_parent_dir): + juju_log('Host dir not mounted at {}. ' + 'Creating directory there instead.'.format(dest_parent_dir)) + os.mkdir(dest_parent_dir) + + if not os.path.exists(dest_dir): + juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) + repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) + else: + repo_dir = dest_dir + + if update_requirements: + if not requirements_dir: + error_out('requirements repo must be cloned before ' + 'updating from global requirements.') + _git_update_requirements(repo_dir, requirements_dir) + + juju_log('Installing git repo from dir: {}'.format(repo_dir)) + pip_install(repo_dir) + + return repo_dir + + +def _git_update_requirements(package_dir, reqs_dir): + """Update from global requirements. + + Update an OpenStack git directory's requirements.txt and + test-requirements.txt from global-requirements.txt.""" + orig_dir = os.getcwd() + os.chdir(reqs_dir) + cmd = "python update.py {}".format(package_dir) + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + package = os.path.basename(package_dir) + error_out("Error updating {} from global-requirements.txt".format(package)) + os.chdir(orig_dir) diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py new file mode 100644 index 00000000..78162b1b --- /dev/null +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# coding: utf-8 + +__author__ = "Jorge Niedbalski " + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import log + +try: + from pip import main as pip_execute +except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as pip_execute + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, **options): + """Install a requirements file """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, **options): + """Install a python package""" + command = ["install"] + + available_options = ('proxy', 'src', 'log', "index-url", ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 768438a4..d47dc228 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -16,19 +16,18 @@ import time from subprocess import ( check_call, check_output, - CalledProcessError + CalledProcessError, ) - from charmhelpers.core.hookenv import ( relation_get, relation_ids, related_units, log, + DEBUG, INFO, WARNING, - ERROR + ERROR, ) - from charmhelpers.core.host import ( mount, mounts, @@ -37,7 +36,6 @@ from charmhelpers.core.host import ( service_running, umount, ) - from charmhelpers.fetch import ( apt_install, ) @@ -56,99 +54,85 @@ CEPH_CONF = """[global] def install(): - ''' Basic Ceph client installation ''' + """Basic Ceph client installation.""" ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) def rbd_exists(service, pool, rbd_img): - ''' Check to see if a RADOS block device exists ''' + """Check to see if a RADOS block device exists.""" try: - out = check_output(['rbd', 'list', '--id', service, - '--pool', pool]) + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]).decode('UTF-8') except CalledProcessError: return False - else: - return rbd_img in out + + return rbd_img in out def create_rbd_image(service, pool, image, sizemb): - ''' Create a new RADOS block device ''' - cmd = [ - 'rbd', - 'create', - image, - '--size', - str(sizemb), - '--id', - service, - '--pool', - pool - ] + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] check_call(cmd) def pool_exists(service, name): - ''' Check to see if a RADOS pool already exists ''' + """Check to see if a RADOS pool already exists.""" try: - out = check_output(['rados', '--id', service, 'lspools']) + out = check_output(['rados', '--id', service, + 'lspools']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def get_osds(service): - ''' - Return a list of all Ceph Object Storage Daemons - currently in the cluster - ''' + """Return a list of all Ceph Object Storage Daemons currently in the + cluster. + """ version = ceph_version() if version and version >= '0.56': return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', '--format=json'])) - else: - return None + 'osd', 'ls', + '--format=json']).decode('UTF-8')) + + return None -def create_pool(service, name, replicas=2): - ''' Create a new RADOS pool ''' +def create_pool(service, name, replicas=3): + """Create a new RADOS pool.""" if pool_exists(service, name): log("Ceph pool {} already exists, skipping creation".format(name), level=WARNING) return + # Calculate the number of placement groups based # on upstream recommended best practices. osds = get_osds(service) if osds: - pgnum = (len(osds) * 100 / replicas) + pgnum = (len(osds) * 100 // replicas) else: # NOTE(james-page): Default to 200 for older ceph versions # which don't support OSD query from cli pgnum = 200 - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'create', - name, str(pgnum) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] check_call(cmd) - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'set', name, - 'size', str(replicas) - ] + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', + str(replicas)] check_call(cmd) def delete_pool(service, name): - ''' Delete a RADOS pool from ceph ''' - cmd = [ - 'ceph', '--id', service, - 'osd', 'pool', 'delete', - name, '--yes-i-really-really-mean-it' - ] + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] check_call(cmd) @@ -161,44 +145,43 @@ def _keyring_path(service): def create_keyring(service, key): - ''' Create a new Ceph keyring containing key''' + """Create a new Ceph keyring containing key.""" keyring = _keyring_path(service) if os.path.exists(keyring): - log('ceph: Keyring exists at %s.' % keyring, level=WARNING) + log('Ceph keyring exists at %s.' % keyring, level=WARNING) return - cmd = [ - 'ceph-authtool', - keyring, - '--create-keyring', - '--name=client.{}'.format(service), - '--add-key={}'.format(key) - ] + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] check_call(cmd) - log('ceph: Created new ring at %s.' % keyring, level=INFO) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) def create_key_file(service, key): - ''' Create a file containing key ''' + """Create a file containing key.""" keyfile = _keyfile_path(service) if os.path.exists(keyfile): - log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) + log('Keyfile exists at %s.' % keyfile, level=WARNING) return + with open(keyfile, 'w') as fd: fd.write(key) - log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + log('Created new keyfile at %s.' % keyfile, level=INFO) def get_ceph_nodes(): - ''' Query named relation 'ceph' to detemine current nodes ''' + """Query named relation 'ceph' to determine current nodes.""" hosts = [] for r_id in relation_ids('ceph'): for unit in related_units(r_id): hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts def configure(service, key, auth, use_syslog): - ''' Perform basic configuration of Ceph ''' + """Perform basic configuration of Ceph.""" create_keyring(service, key) create_key_file(service, key) hosts = get_ceph_nodes() @@ -211,17 +194,17 @@ def configure(service, key, auth, use_syslog): def image_mapped(name): - ''' Determine whether a RADOS block device is mapped locally ''' + """Determine whether a RADOS block device is mapped locally.""" try: - out = check_output(['rbd', 'showmapped']) + out = check_output(['rbd', 'showmapped']).decode('UTF-8') except CalledProcessError: return False - else: - return name in out + + return name in out def map_block_storage(service, pool, image): - ''' Map a RADOS block device for local use ''' + """Map a RADOS block device for local use.""" cmd = [ 'rbd', 'map', @@ -235,31 +218,32 @@ def map_block_storage(service, pool, image): def filesystem_mounted(fs): - ''' Determine whether a filesytems is already mounted ''' + """Determine whether a filesytems is already mounted.""" return fs in [f for f, m in mounts()] def make_filesystem(blk_device, fstype='ext4', timeout=10): - ''' Make a new filesystem on the specified block device ''' + """Make a new filesystem on the specified block device.""" count = 0 e_noent = os.errno.ENOENT while not os.path.exists(blk_device): if count >= timeout: - log('ceph: gave up waiting on block device %s' % blk_device, + log('Gave up waiting on block device %s' % blk_device, level=ERROR) raise IOError(e_noent, os.strerror(e_noent), blk_device) - log('ceph: waiting for block device %s to appear' % blk_device, - level=INFO) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) count += 1 time.sleep(1) else: - log('ceph: Formatting block device %s as filesystem %s.' % + log('Formatting block device %s as filesystem %s.' % (blk_device, fstype), level=INFO) check_call(['mkfs', '-t', fstype, blk_device]) def place_data_on_block_device(blk_device, data_src_dst): - ''' Migrate data in data_src_dst to blk_device and then remount ''' + """Migrate data in data_src_dst to blk_device and then remount.""" # mount block device into /mnt mount(blk_device, '/mnt') # copy data to /mnt @@ -279,8 +263,8 @@ def place_data_on_block_device(blk_device, data_src_dst): # TODO: re-use def modprobe(module): - ''' Load a kernel module and configure for auto-load on reboot ''' - log('ceph: Loading kernel module', level=INFO) + """Load a kernel module and configure for auto-load on reboot.""" + log('Loading kernel module', level=INFO) cmd = ['modprobe', module] check_call(cmd) with open('/etc/modules', 'r+') as modules: @@ -289,7 +273,7 @@ def modprobe(module): def copy_files(src, dst, symlinks=False, ignore=None): - ''' Copy files from src to dst ''' + """Copy files from src to dst.""" for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) @@ -300,9 +284,9 @@ def copy_files(src, dst, symlinks=False, ignore=None): def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[]): - """ - NOTE: This function must only be called from a single service unit for + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for the same rbd_img otherwise data loss will occur. Ensures given pool and RBD image exists, is mapped to a block device, @@ -316,15 +300,16 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, """ # Ensure pool, RBD image, RBD mappings are in place. if not pool_exists(service, pool): - log('ceph: Creating new pool {}.'.format(pool)) - create_pool(service, pool) + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) if not rbd_exists(service, pool, rbd_img): - log('ceph: Creating RBD image ({}).'.format(rbd_img)) + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) create_rbd_image(service, pool, rbd_img, sizemb) if not image_mapped(rbd_img): - log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) map_block_storage(service, pool, rbd_img) # make file system @@ -339,45 +324,47 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, for svc in system_services: if service_running(svc): - log('ceph: Stopping services {} prior to migrating data.' - .format(svc)) + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) service_stop(svc) place_data_on_block_device(blk_device, mount_point) for svc in system_services: - log('ceph: Starting service {} after migrating data.' - .format(svc)) + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) service_start(svc) def ensure_ceph_keyring(service, user=None, group=None): - ''' - Ensures a ceph keyring is created for a named service - and optionally ensures user and group ownership. + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. Returns False if no ceph key is available in relation state. - ''' + """ key = None for rid in relation_ids('ceph'): for unit in related_units(rid): key = relation_get('key', rid=rid, unit=unit) if key: break + if not key: return False + create_keyring(service=service, key=key) keyring = _keyring_path(service) if user and group: check_call(['chown', '%s.%s' % (user, group), keyring]) + return True def ceph_version(): - ''' Retrieve the local version of ceph ''' + """Retrieve the local version of ceph.""" if os.path.exists('/usr/bin/ceph'): cmd = ['ceph', '-v'] - output = check_output(cmd) + output = check_output(cmd).decode('US-ASCII') output = output.split() if len(output) > 3: return output[2] diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index 38957ef0..a22c3d7b 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,12 +1,12 @@ - import os import re - from subprocess import ( check_call, check_output, ) +import six + ################################################## # loopback device helpers. @@ -37,7 +37,7 @@ def create_loopback(file_path): ''' file_path = os.path.abspath(file_path) check_call(['losetup', '--find', file_path]) - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == file_path: return d @@ -51,7 +51,7 @@ def ensure_loopback_device(path, size): :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) ''' - for d, f in loopback_devices().iteritems(): + for d, f in six.iteritems(loopback_devices()): if f == path: return d diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 8ac7fecc..0aa65f4f 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -61,6 +61,7 @@ def list_lvm_volume_group(block_device): vg = None pvd = check_output(['pvdisplay', block_device]).splitlines() for l in pvd: + l = l.decode('UTF-8') if l.strip().startswith('VG Name'): vg = ' '.join(l.strip().split()[2:]) return vg diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index 1b958712..c6a15e14 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -30,7 +30,8 @@ def zap_disk(block_device): # sometimes sgdisk exits non-zero; this is OK, dd will clean up call(['sgdisk', '--zap-all', '--mbrtogpt', '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), 'bs=1M', 'count=1']) @@ -47,7 +48,7 @@ def is_device_mounted(device): it doesn't. ''' is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']) + out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index cfaf0a65..0adf0db3 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -3,10 +3,11 @@ __author__ = 'Jorge Niedbalski R. ' +import io import os -class Fstab(file): +class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer for file `/etc/fstab` """ @@ -24,8 +25,8 @@ class Fstab(file): options = "defaults" self.options = options - self.d = d - self.p = p + self.d = int(d) + self.p = int(p) def __eq__(self, o): return str(self) == str(o) @@ -45,7 +46,7 @@ class Fstab(file): self._path = path else: self._path = self.DEFAULT_PATH - file.__init__(self, self._path, 'r+') + super(Fstab, self).__init__(self._path, 'rb+') def _hydrate_entry(self, line): # NOTE: use split with no arguments to split on any @@ -58,8 +59,9 @@ class Fstab(file): def entries(self): self.seek(0) for line in self.readlines(): + line = line.decode('us-ascii') try: - if not line.startswith("#"): + if line.strip() and not line.startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -75,14 +77,14 @@ class Fstab(file): if self.get_entry_by_attr('device', entry.device): return False - self.write(str(entry) + '\n') + self.write((str(entry) + '\n').encode('us-ascii')) self.truncate() return entry def remove_entry(self, entry): self.seek(0) - lines = self.readlines() + lines = [l.decode('us-ascii') for l in self.readlines()] found = False for index, line in enumerate(lines): @@ -97,7 +99,7 @@ class Fstab(file): lines.remove(line) self.seek(0) - self.write(''.join(lines)) + self.write(''.join(lines).encode('us-ascii')) self.truncate() return True diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index af8fe2db..07d1f690 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -9,9 +9,14 @@ import json import yaml import subprocess import sys -import UserDict from subprocess import CalledProcessError +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" @@ -63,16 +68,18 @@ def log(message, level=None): command = ['juju-log'] if level: command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) command += [message] subprocess.call(command) -class Serializable(UserDict.IterableUserDict): +class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" def __init__(self, obj): # wrap the object - UserDict.IterableUserDict.__init__(self) + UserDict.__init__(self) self.data = obj def __getattr__(self, attr): @@ -214,6 +221,12 @@ class Config(dict): except KeyError: return (self._prev_dict or {})[key] + def keys(self): + prev_keys = [] + if self._prev_dict is not None: + prev_keys = self._prev_dict.keys() + return list(set(prev_keys + list(dict.keys(self)))) + def load_previous(self, path=None): """Load previous copy of config from disk. @@ -263,7 +276,7 @@ class Config(dict): """ if self._prev_dict: - for k, v in self._prev_dict.iteritems(): + for k, v in six.iteritems(self._prev_dict): if k not in self: self[k] = v with open(self.path, 'w') as f: @@ -278,7 +291,8 @@ def config(scope=None): config_cmd_line.append(scope) config_cmd_line.append('--format=json') try: - config_data = json.loads(subprocess.check_output(config_cmd_line)) + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) if scope is not None: return config_data return Config(config_data) @@ -297,10 +311,10 @@ def relation_get(attribute=None, unit=None, rid=None): if unit: _args.append(unit) try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None - except CalledProcessError, e: + except CalledProcessError as e: if e.returncode == 2: return None raise @@ -312,7 +326,7 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_cmd_line = ['relation-set'] if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) - for k, v in (relation_settings.items() + kwargs.items()): + for k, v in (list(relation_settings.items()) + list(kwargs.items())): if v is None: relation_cmd_line.append('{}='.format(k)) else: @@ -329,7 +343,8 @@ def relation_ids(reltype=None): relid_cmd_line = ['relation-ids', '--format=json'] if reltype is not None: relid_cmd_line.append(reltype) - return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] return [] @@ -340,7 +355,8 @@ def related_units(relid=None): units_cmd_line = ['relation-list', '--format=json'] if relid is not None: units_cmd_line.extend(('-r', relid)) - return json.loads(subprocess.check_output(units_cmd_line)) or [] + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] @cached @@ -449,7 +465,7 @@ def unit_get(attribute): """Get the unit ID for the remote unit""" _args = ['unit-get', '--format=json', attribute] try: - return json.loads(subprocess.check_output(_args)) + return json.loads(subprocess.check_output(_args).decode('UTF-8')) except ValueError: return None diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index d7ce1e4c..c6f1680a 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -6,19 +6,20 @@ # Matthew Wedgwood import os +import re import pwd import grp import random import string import subprocess import hashlib -import shutil from contextlib import contextmanager - from collections import OrderedDict -from hookenv import log -from fstab import Fstab +import six + +from .hookenv import log +from .fstab import Fstab def service_start(service_name): @@ -54,7 +55,9 @@ def service(action, service_name): def service_running(service): """Determine whether a system service is running""" try: - output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) + output = subprocess.check_output( + ['service', service, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -67,7 +70,9 @@ def service_running(service): def service_available(service_name): """Determine whether a system service is available""" try: - subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError as e: return 'unrecognized service' not in e.output else: @@ -96,6 +101,26 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def add_group(group_name, system_group=False): + """Add a group to the system""" + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + except KeyError: + log('creating group {0}'.format(group_name)) + cmd = ['addgroup'] + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + group_info = grp.getgrnam(group_name) + return group_info + + def add_user_to_group(username, group): """Add a user to a group""" cmd = [ @@ -115,7 +140,7 @@ def rsync(from_path, to_path, flags='-r', options=None): cmd.append(from_path) cmd.append(to_path) log(" ".join(cmd)) - return subprocess.check_output(cmd).strip() + return subprocess.check_output(cmd).decode('UTF-8').strip() def symlink(source, destination): @@ -130,7 +155,7 @@ def symlink(source, destination): subprocess.check_call(cmd) -def mkdir(path, owner='root', group='root', perms=0555, force=False): +def mkdir(path, owner='root', group='root', perms=0o555, force=False): """Create a directory""" log("Making dir {} {}:{} {:o}".format(path, owner, group, perms)) @@ -146,7 +171,7 @@ def mkdir(path, owner='root', group='root', perms=0555, force=False): os.chown(realpath, uid, gid) -def write_file(path, content, owner='root', group='root', perms=0444): +def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a string""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid @@ -177,7 +202,7 @@ def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): cmd_args.extend([device, mountpoint]) try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) return False @@ -191,7 +216,7 @@ def umount(mountpoint, persist=False): cmd_args = ['umount', mountpoint] try: subprocess.check_output(cmd_args) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: log('Error unmounting {}\n{}'.format(mountpoint, e.output)) return False @@ -218,8 +243,8 @@ def file_hash(path, hash_type='md5'): """ if os.path.exists(path): h = getattr(hashlib, hash_type)() - with open(path, 'r') as source: - h.update(source.read()) # IGNORE:E1101 - it does have update + with open(path, 'rb') as source: + h.update(source.read()) return h.hexdigest() else: return None @@ -297,7 +322,7 @@ def pwgen(length=None): if length is None: length = random.choice(range(35, 45)) alphanumeric_chars = [ - l for l in (string.letters + string.digits) + l for l in (string.ascii_letters + string.digits) if l not in 'l0QD1vAEIOUaeiou'] random_chars = [ random.choice(alphanumeric_chars) for _ in range(length)] @@ -306,18 +331,24 @@ def pwgen(length=None): def list_nics(nic_type): '''Return a list of nics of given type(s)''' - if isinstance(nic_type, basestring): + if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type interfaces = [] for int_type in int_types: cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - interfaces.append(line.split()[1].replace(":", "")) + matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + if matched: + interface = matched.groups()[0] + else: + interface = line.split()[1].replace(":", "") + interfaces.append(interface) + return interfaces @@ -329,7 +360,7 @@ def set_nic_mtu(nic, mtu): def get_nic_mtu(nic): cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).split('\n') + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" for line in ip_output: words = line.split() @@ -340,7 +371,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd) + ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" words = ip_output.split() if 'link/ether' in words: @@ -357,8 +388,8 @@ def cmp_pkgrevno(package, revno, pkgcache=None): ''' import apt_pkg - from charmhelpers.fetch import apt_cache if not pkgcache: + from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index e8039a84..69dde79a 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,2 @@ -from .base import * -from .helpers import * +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 7067b94b..163a7932 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -196,7 +196,7 @@ class StoredContext(dict): if not os.path.isabs(file_name): file_name = os.path.join(hookenv.charm_dir(), file_name) with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0600) + os.fchmod(file_stream.fileno(), 0o600) yaml.dump(config_data, file_stream) def read_context(self, file_name): @@ -211,15 +211,19 @@ class StoredContext(dict): class TemplateCallback(ManagerCallback): """ - Callback class that will render a Jinja2 template, for use as a ready action. + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` - :param str source: The template source file, relative to `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file """ - def __init__(self, source, target, owner='root', group='root', perms=0444): + def __init__(self, source, target, + owner='root', group='root', perms=0o444): self.source = source self.target = target self.owner = owner diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 2c638853..83133fa4 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -4,7 +4,8 @@ from charmhelpers.core import host from charmhelpers.core import hookenv -def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None): """ Render a template. diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 32a673d6..0a126fc3 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -5,10 +5,6 @@ from yaml import safe_load from charmhelpers.core.host import ( lsb_release ) -from urlparse import ( - urlparse, - urlunparse, -) import subprocess from charmhelpers.core.hookenv import ( config, @@ -16,6 +12,12 @@ from charmhelpers.core.hookenv import ( ) import os +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main @@ -72,6 +74,7 @@ CLOUD_ARCHIVE_POCKETS = { FETCH_HANDLERS = ( 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. @@ -148,7 +151,7 @@ def apt_install(packages, options=None, fatal=False): cmd = ['apt-get', '--assume-yes'] cmd.extend(options) cmd.append('install') - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -181,7 +184,7 @@ def apt_update(fatal=False): def apt_purge(packages, fatal=False): """Purge one or more packages""" cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -192,7 +195,7 @@ def apt_purge(packages, fatal=False): def apt_hold(packages, fatal=False): """Hold one or more packages""" cmd = ['apt-mark', 'hold'] - if isinstance(packages, basestring): + if isinstance(packages, six.string_types): cmd.append(packages) else: cmd.extend(packages) @@ -218,6 +221,7 @@ def add_source(source, key=None): pocket for the release. 'cloud:' may be used to activate official cloud archive pockets, such as 'cloud:icehouse' + 'distro' may be used as a noop @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an @@ -251,12 +255,14 @@ def add_source(source, key=None): release = lsb_release()['DISTRIB_CODENAME'] with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass else: - raise SourceConfigError("Unknown source: {!r}".format(source)) + log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile() as key_file: + with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) @@ -293,14 +299,14 @@ def configure_sources(update=False, sources = safe_load((config(sources_var) or '').strip()) or [] keys = safe_load((config(keys_var) or '').strip()) or None - if isinstance(sources, basestring): + if isinstance(sources, six.string_types): sources = [sources] if keys is None: for source in sources: add_source(source, None) else: - if isinstance(keys, basestring): + if isinstance(keys, six.string_types): keys = [keys] if len(sources) != len(keys): @@ -397,7 +403,7 @@ def _run_apt_command(cmd, fatal=False): while result is None or result == APT_NO_LOCK: try: result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError, e: + except subprocess.CalledProcessError as e: retry_count = retry_count + 1 if retry_count > APT_NO_LOCK_RETRY_COUNT: raise diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 8c045650..8a4624b2 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,8 +1,23 @@ import os -import urllib2 -from urllib import urlretrieve -import urlparse import hashlib +import re + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs from charmhelpers.fetch import ( BaseFetchHandler, @@ -15,6 +30,24 @@ from charmhelpers.payload.archive import ( from charmhelpers.core.host import mkdir, check_hash +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + class ArchiveUrlFetchHandler(BaseFetchHandler): """ Handler to download archive files from arbitrary URLs. @@ -42,20 +75,20 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): """ # propogate all exceptions # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse.urlparse(source) + proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): - auth, barehost = urllib2.splituser(netloc) + auth, barehost = splituser(netloc) if auth is not None: - source = urlparse.urlunparse((proto, barehost, path, params, query, fragment)) - username, password = urllib2.splitpasswd(auth) - passman = urllib2.HTTPPasswordMgrWithDefaultRealm() + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() # Realm is set to None in add_password to force the username and password # to be used whatever the realm passman.add_password(None, source, username, password) - authhandler = urllib2.HTTPBasicAuthHandler(passman) - opener = urllib2.build_opener(authhandler) - urllib2.install_opener(opener) - response = urllib2.urlopen(source) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) try: with open(dest, 'w') as dest_file: dest_file.write(response.read()) @@ -91,17 +124,21 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) - except urllib2.URLError as e: + except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) - options = urlparse.parse_qs(url_parts.fragment) + options = parse_qs(url_parts.fragment) for key, value in options.items(): - if key in hashlib.algorithms: + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 0e580e47..8ef48f30 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -5,6 +5,10 @@ from charmhelpers.fetch import ( ) from charmhelpers.core.host import mkdir +import six +if six.PY3: + raise ImportError('bzrlib does not support Python3') + try: from bzrlib.branch import Branch except ImportError: @@ -42,7 +46,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0755) + mkdir(dest_dir, perms=0o755) try: self.branch(source, dest_dir) except OSError as e: diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..f3aa2821 --- /dev/null +++ b/hooks/charmhelpers/fetch/giturl.py @@ -0,0 +1,51 @@ +import os +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.core.host import mkdir + +import six +if six.PY3: + raise ImportError('GitPython does not support Python 3') + +try: + from git import Repo +except ImportError: + from charmhelpers.fetch import apt_install + apt_install("python-git") + from git import Repo + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs""" + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git'): + return False + else: + return True + + def clone(self, source, dest, branch): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + repo = Repo.clone_from(source, dest) + repo.git.checkout(branch) + + def install(self, source, branch="master", dest=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + try: + self.clone(source, dest_dir, branch) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir From d2dbbab3f659ec177e447ea0dfe9b70c6e426ec1 Mon Sep 17 00:00:00 2001 From: Jasper Aikema Date: Wed, 10 Dec 2014 22:45:57 +0100 Subject: [PATCH 092/125] The db config is now in the neutron.conf file --- hooks/neutron_api_hooks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 431a2da2..28737654 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -192,8 +192,8 @@ def db_changed(): @restart_on_change(restart_map()) def postgresql_neutron_db_changed(): plugin = config('neutron-plugin') - # DB config might have been moved to main neutron.conf in H? - CONFIGS.write(neutron_plugin_attribute(plugin, 'config', 'neutron')) + CONFIGS.write(NEUTRON_CONF) + @hooks.hook('amqp-relation-broken', From 64279a32dddaeba455b321cbc02172477cedc44c Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 13:38:25 +0000 Subject: [PATCH 093/125] Add __init__ to charm-helpers sync. --- charm-helpers-sync.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 9b5e79e9..d8680f5f 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,6 +1,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: + - __init__ - core - fetch - contrib.openstack|inc=* From 08ff7d72a94b157e0221fddb0f6bfebbe3058b49 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 13:38:31 +0000 Subject: [PATCH 094/125] Sync charm-helpers. --- hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ hooks/charmhelpers/__init__.pyc | Bin 0 -> 117 bytes hooks/charmhelpers/core/hookenv.py | 18 ++++++++++++++---- 3 files changed, 36 insertions(+), 4 deletions(-) create mode 100644 hooks/charmhelpers/__init__.pyc diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/hooks/charmhelpers/__init__.pyc b/hooks/charmhelpers/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eb6a0f0ab30bc236a03ec212623c1cf972f263f GIT binary patch literal 117 zcmZSn%*$1h)De=*00oRd+5w1*S%5?e14FO|NW@PANHCxg#d1KgjQsrUV*TWd#G>4c o)SQCUqGJ8{_{_Y_lK6PNg31yOpc0$h{FKt1R6CGC#X!se07Hls@c;k- literal 0 HcmV?d00001 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 07d1f690..69ae4564 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -395,21 +395,31 @@ def relations_of_type(reltype=None): return relation_data +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" - charmdir = os.environ.get('CHARM_DIR', '') - mdf = open(os.path.join(charmdir, 'metadata.yaml')) - md = yaml.safe_load(mdf) rel_types = [] + md = metadata() for key in ('provides', 'requires', 'peers'): section = md.get(key) if section: rel_types.extend(section.keys()) - mdf.close() return rel_types +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + @cached def relations(): """Get a nested dictionary of relation data for all related units""" From 6911be629af09355b9e7c52fe0b6a62f87b3a679 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 13:57:54 +0000 Subject: [PATCH 095/125] Remove .pyc files from charmhelpers/hooks. --- hooks/charmhelpers/__init__.pyc | Bin 117 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 hooks/charmhelpers/__init__.pyc diff --git a/hooks/charmhelpers/__init__.pyc b/hooks/charmhelpers/__init__.pyc deleted file mode 100644 index 8eb6a0f0ab30bc236a03ec212623c1cf972f263f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 117 zcmZSn%*$1h)De=*00oRd+5w1*S%5?e14FO|NW@PANHCxg#d1KgjQsrUV*TWd#G>4c o)SQCUqGJ8{_{_Y_lK6PNg31yOpc0$h{FKt1R6CGC#X!se07Hls@c;k- From 7da87994ab03552229e9dabf78b3679797d061f5 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 14:50:50 +0000 Subject: [PATCH 096/125] Remove __init__ from charm-helpers yaml. --- charm-helpers-sync.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index d8680f5f..9b5e79e9 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,7 +1,6 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - - __init__ - core - fetch - contrib.openstack|inc=* From 8097cc8355e43a54e7255f88c9b16e78db3481f2 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 16:42:25 +0000 Subject: [PATCH 097/125] Remove hooks/charmhelpers/__init__.py and re-sync charm-helpers. --- hooks/charmhelpers/__init__.py | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index b46e2e23..e69de29b 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -1,22 +0,0 @@ -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa From c7cf1830357d535efa1f03a0e6e5b63a544c703f Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Thu, 11 Dec 2014 17:48:01 +0000 Subject: [PATCH 098/125] Sync charm-helpers and actually pick up charmhelpers/__init__.py this time. --- hooks/charmhelpers/__init__.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index e69de29b..b46e2e23 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -0,0 +1,22 @@ +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa From 3d2b2d9bc6a8320d7a5f68f7cd05ee53e13459ea Mon Sep 17 00:00:00 2001 From: James Page Date: Mon, 15 Dec 2014 09:28:52 +0000 Subject: [PATCH 099/125] [trivial] Resync charm-helpers --- hooks/charmhelpers/contrib/openstack/context.py | 15 +++++++++++++++ hooks/charmhelpers/core/templating.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eebe8c03..eb108910 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -21,11 +21,15 @@ from charmhelpers.core.hookenv import ( relation_set, unit_get, unit_private_ip, + charm_name, DEBUG, INFO, WARNING, ERROR, ) + +from charmhelpers.core.sysctl import create as sysctl_create + from charmhelpers.core.host import ( mkdir, write_file, @@ -1015,3 +1019,14 @@ class NotificationDriverContext(OSContextGenerator): ctxt['notifications'] = "True" return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 83133fa4..569eaed6 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -48,5 +48,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target)) + host.mkdir(os.path.dirname(target), owner, group) host.write_file(target, content, owner, group, perms) From dbbb99ba071f494926feef87f5f145b744fce4e4 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 18 Dec 2014 11:27:01 +0000 Subject: [PATCH 100/125] charmhelpers sync to get fix for LP #1396246 --- .../charmhelpers/contrib/hahelpers/cluster.py | 38 ++++++++++------ .../contrib/storage/linux/ceph.py | 43 +++++++++++++++++++ hooks/charmhelpers/core/decorators.py | 41 ++++++++++++++++++ hooks/charmhelpers/core/host.py | 11 +++-- 4 files changed, 117 insertions(+), 16 deletions(-) create mode 100644 hooks/charmhelpers/core/decorators.py diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 52ce4b7c..912b2fe3 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -13,6 +13,7 @@ clustering-related helpers. import subprocess import os + from socket import gethostname as get_unit_hostname import six @@ -28,12 +29,19 @@ from charmhelpers.core.hookenv import ( WARNING, unit_get, ) +from charmhelpers.core.decorators import ( + retry_on_exception, +) class HAIncompleteConfig(Exception): pass +class CRMResourceNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. @@ -68,24 +76,30 @@ def is_clustered(): return False -def is_crm_leader(resource): +@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. """ - cmd = [ - "crm", "resource", - "show", resource - ] + cmd = ['crm', 'resource', 'show', resource] try: - status = subprocess.check_output(cmd).decode('UTF-8') + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: - return False - else: - if get_unit_hostname() in status: - return True - else: - return False + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False def is_leader(resource): diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index d47dc228..1479f4f3 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -372,3 +372,46 @@ def ceph_version(): return None else: return None + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + def __init__(self, api_version=1): + self.api_version = api_version + self.ops = [] + + def add_op_create_pool(self, name, replica_count=3): + self.ops.append({'op': 'create-pool', 'name': name, + 'replicas': replica_count}) + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py new file mode 100644 index 00000000..029a4ef4 --- /dev/null +++ b/hooks/charmhelpers/core/decorators.py @@ -0,0 +1,41 @@ +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index c6f1680a..5221120c 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -162,13 +162,16 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid realpath = os.path.abspath(path) - if os.path.exists(realpath): - if force and not os.path.isdir(realpath): + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) - else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) def write_file(path, content, owner='root', group='root', perms=0o444): From b939adb87812ea6089f6f50db6f0ff5420e167fe Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 19 Dec 2014 10:23:45 +0000 Subject: [PATCH 101/125] Sync charmhelpers --- hooks/charmhelpers/contrib/openstack/neutron.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 1446f637..095cc24b 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -152,9 +152,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], 'packages': [[headers_package()] + determine_dkms_package(), - ['calico-compute', 'bird', 'neutron-dhcp-agent']], + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], 'server_packages': ['neutron-server', 'calico-control'], 'server_services': ['neutron-server'] } From 686744ba38257a6200d91eb82c2dc7a7066eb614 Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Fri, 19 Dec 2014 17:11:28 +0000 Subject: [PATCH 102/125] charmhelpers sync to get fix for precise haproxy ipv6 --- hooks/charmhelpers/contrib/openstack/context.py | 1 + hooks/charmhelpers/contrib/openstack/neutron.py | 10 ++++++++-- .../contrib/openstack/templates/haproxy.cfg | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eb108910..180bfad2 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -491,6 +491,7 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): + ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 1446f637..095cc24b 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -152,9 +152,15 @@ def neutron_plugins(): database=config('neutron-database'), relation_prefix='neutron', ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata'], 'packages': [[headers_package()] + determine_dkms_package(), - ['calico-compute', 'bird', 'neutron-dhcp-agent']], + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata']], 'server_packages': ['neutron-server', 'calico-control'], 'server_services': ['neutron-server'] } diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 0229f9d4..9ae1efb9 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -38,7 +38,9 @@ listen stats {{ stat_port }} {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6 -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} From 77697efcc1d3e0200c1dc83d2971b8e84b1bbe41 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 5 Jan 2015 09:31:09 +0000 Subject: [PATCH 103/125] charmhelper sync --- hooks/charmhelpers/contrib/openstack/context.py | 1 + hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg | 2 ++ 2 files changed, 3 insertions(+) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eb108910..180bfad2 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -491,6 +491,7 @@ class HAProxyContext(OSContextGenerator): ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') if config('prefer-ipv6'): + ctxt['ipv6'] = True ctxt['local_host'] = 'ip6-localhost' ctxt['haproxy_host'] = '::' ctxt['stat_port'] = ':::8888' diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 0229f9d4..9ae1efb9 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -38,7 +38,9 @@ listen stats {{ stat_port }} {% for service, ports in service_ports.items() -%} frontend tcp-in_{{ service }} bind *:{{ ports[0] }} + {% if ipv6 -%} bind :::{{ ports[0] }} + {% endif -%} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} From 267b4872b7ddaedfb0230eea8075a181a2bbc079 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 9 Jan 2015 16:43:11 +0000 Subject: [PATCH 104/125] Fixed unit_tests --- unit_tests/test_neutron_api_hooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 747d66a0..b296d8cd 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -48,6 +48,7 @@ TO_PATCH = [ 'get_iface_for_address', 'get_netmask_for_address', 'get_address_in_network', + 'update_nrpe_config', ] NEUTRON_CONF_DIR = "/etc/neutron" From fc27e18694b5917b5e6c19cccdf864f98ddfbb0d Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 12 Jan 2015 12:04:00 +0000 Subject: [PATCH 105/125] Use rnpe functions from charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 102 ++++++++++++++++-- .../contrib/charmsupport/volumes.py | 7 +- hooks/charmhelpers/contrib/openstack/utils.py | 6 ++ hooks/charmhelpers/fetch/__init__.py | 9 +- hooks/neutron_api_hooks.py | 54 ++-------- 5 files changed, 120 insertions(+), 58 deletions(-) diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 51b62d39..f3a936d0 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -18,6 +18,7 @@ from charmhelpers.core.hookenv import ( log, relation_ids, relation_set, + relations_of_type, ) from charmhelpers.core.host import service @@ -54,6 +55,12 @@ from charmhelpers.core.host import service # juju-myservice-0 # If you're running multiple environments with the same services in them # this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup # # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master # @@ -125,9 +132,6 @@ define service {{ def _locate_cmd(self, check_cmd): search_path = ( - '/', - os.path.join(os.environ['CHARM_DIR'], - 'files/nrpe-external-master'), '/usr/lib/nagios/plugins', '/usr/local/lib/nagios/plugins', ) @@ -141,7 +145,7 @@ define service {{ log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname): + def write(self, nagios_context, hostname, nagios_servicegroups=None): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -153,16 +157,21 @@ define service {{ log('Not writing service config as {} is not accessible'.format( NRPE.nagios_exportdir)) else: - self.write_service_config(nagios_context, hostname) + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) - def write_service_config(self, nagios_context, hostname): + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups=None): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) + if not nagios_servicegroups: + nagios_servicegroups = nagios_context + templ_vars = { 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_context, + 'nagios_servicegroup': nagios_servicegroups, 'description': self.description, 'shortname': self.shortname, 'command': self.command, @@ -186,6 +195,10 @@ class NRPE(object): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = 'juju' self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -211,7 +224,8 @@ class NRPE(object): nrpe_monitors = {} monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname) + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) nrpe_monitors[nrpecheck.shortname] = { "command": nrpecheck.command, } @@ -220,3 +234,75 @@ class NRPE(object): for rid in relation_ids("local-monitors"): relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py index 0f905dff..d61aa47f 100644 --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -2,7 +2,8 @@ Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. -Configuration stanzas: +Configuration stanzas:: + volume-ephemeral: type: boolean default: true @@ -20,7 +21,8 @@ Configuration stanzas: is 'true' and no volume-map value is set. Use 'juju set' to set a value and 'juju resolved' to complete configuration. -Usage: +Usage:: + from charmsupport.volumes import configure_volume, VolumeConfigurationError from charmsupport.hookenv import log, ERROR def post_mount_hook(): @@ -34,6 +36,7 @@ Usage: after_change=post_mount_hook) except VolumeConfigurationError: log('Storage could not be configured', ERROR) + ''' # XXX: Known limitations diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 44179679..ddd40ce5 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -53,6 +53,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('saucy', 'havana'), ('trusty', 'icehouse'), ('utopic', 'juno'), + ('vivid', 'kilo'), ]) @@ -64,6 +65,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2013.2', 'havana'), ('2014.1', 'icehouse'), ('2014.2', 'juno'), + ('2015.1', 'kilo'), ]) # The ugly duckling @@ -84,6 +86,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.0.0', 'juno'), ('2.1.0', 'juno'), ('2.2.0', 'juno'), + ('2.2.1', 'kilo'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -289,6 +292,9 @@ def configure_installation_source(rel): 'juno': 'trusty-updates/juno', 'juno/updates': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', } try: diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a126fc3..aceadea4 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -64,9 +64,16 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', 'juno/proposed': 'trusty-proposed/juno', - 'juno/proposed': 'trusty-proposed/juno', 'trusty-juno/proposed': 'trusty-proposed/juno', 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', } # The order of this list is very important. Handlers should be listed in from diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index c63fb06b..7450c2ce 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -1,7 +1,6 @@ #!/usr/bin/python import sys -import os import uuid from subprocess import check_call @@ -15,10 +14,8 @@ from charmhelpers.core.hookenv import ( relation_get, relation_ids, relation_set, - relations_of_type, open_port, unit_get, - local_unit, ) from charmhelpers.core.host import ( @@ -77,7 +74,7 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.context import ADDRESS_TYPES -from charmhelpers.contrib.charmsupport.nrpe import NRPE +from charmhelpers.contrib.charmsupport import nrpe hooks = Hooks() CONFIGS = register_configs() @@ -379,50 +376,13 @@ def ha_changed(): @hooks.hook('nrpe-external-master-relation-joined', 'nrpe-external-master-relation-changed') def update_nrpe_config(): - # Find out if nrpe set nagios_hostname - hostname = None - host_context = None - for rel in relations_of_type('nrpe-external-master'): - if 'nagios_hostname' in rel: - hostname = rel['nagios_hostname'] - host_context = rel['nagios_host_context'] - break - nrpe = NRPE(hostname=hostname) + # python-dbus is used by check_upstart_job apt_install('python-dbus') - - if host_context: - current_unit = "%s:%s" % (host_context, local_unit()) - else: - current_unit = local_unit() - - services_to_monitor = services() - - for service in services_to_monitor: - upstart_init = '/etc/init/%s.conf' % service - sysv_init = '/etc/init.d/%s' % service - - if os.path.exists(upstart_init): - nrpe.add_check( - shortname=service, - description='process check {%s}' % current_unit, - check_cmd='check_upstart_job %s' % service, - ) - elif os.path.exists(sysv_init): - cronpath = '/etc/cron.d/nagios-service-check-%s' % service - cron_template = '*/5 * * * * root \ -/usr/local/lib/nagios/plugins/check_exit_status.pl -s /etc/init.d/%s \ -status > /var/lib/nagios/service-check-%s.txt\n' % (service, service) - f = open(cronpath, 'w') - f.write(cron_template) - f.close() - nrpe.add_check( - shortname=service, - description='process check {%s}' % current_unit, - check_cmd='check_status_file.py -f \ -/var/lib/nagios/service-check-%s.txt' % service, - ) - - nrpe.write() + hostname = nrpe.get_nagios_hostname() + current_unit = nrpe.get_nagios_unit_name() + nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) + nrpe_setup.write() def main(): From 30c0baa5c3dc540566940ff3b51da8f201fd3a0e Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Mon, 12 Jan 2015 12:41:14 +0000 Subject: [PATCH 106/125] charmhelpers sync to get fix for apache ssl port selection --- hooks/charmhelpers/contrib/openstack/context.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 180bfad2..8ab61bf0 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -663,8 +663,9 @@ class ApacheSSLContext(OSContextGenerator): addresses = self.get_network_addresses() for address, endpoint in sorted(set(addresses)): for api_port in self.external_ports: - ext_port = determine_apache_port(api_port) - int_port = determine_api_port(api_port) + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) portmap = (address, endpoint, int(ext_port), int(int_port)) ctxt['endpoints'].append(portmap) ctxt['ext_ports'].append(int(ext_port)) From 1b322055f77bd6b339b5e9717f60809174407b57 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 13 Jan 2015 12:07:33 +0000 Subject: [PATCH 107/125] [trivial] Resync helpers --- .../charmhelpers/contrib/openstack/context.py | 32 +++++++++++-------- .../contrib/openstack/templates/haproxy.cfg | 4 ++- .../contrib/storage/linux/ceph.py | 11 +++++++ unit_tests/test_neutron_api_context.py | 1 + 4 files changed, 33 insertions(+), 15 deletions(-) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 8ab61bf0..eaa89a67 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -468,21 +468,25 @@ class HAProxyContext(OSContextGenerator): _unit = unit.replace('/', '-') cluster_hosts[laddr]['backends'][_unit] = _laddr - # NOTE(jamespage) no split configurations found, just use - # private addresses - if not cluster_hosts: - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr + # NOTE(jamespage) add backend based on private address - this + # with either be the only backend or the fallback if no acls + # match in the frontend + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), + 'backends': {l_unit: addr}} + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr - ctxt = {'frontends': cluster_hosts} + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } if config('haproxy-server-timeout'): ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg index 9ae1efb9..ad875f16 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -44,7 +44,9 @@ frontend tcp-in_{{ service }} {% for frontend in frontends -%} acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor %} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + {% for frontend in frontends -%} backend {{ service }}_{{ frontend }} balance leastconn diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 1479f4f3..6ebeab5c 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -157,6 +157,17 @@ def create_keyring(service, key): log('Created new ceph keyring at %s.' % keyring, level=DEBUG) +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + def create_key_file(service, key): """Create a file containing key.""" keyfile = _keyfile_path(service) diff --git a/unit_tests/test_neutron_api_context.py b/unit_tests/test_neutron_api_context.py index 3a43e5ee..b0ad2907 100644 --- a/unit_tests/test_neutron_api_context.py +++ b/unit_tests/test_neutron_api_context.py @@ -113,6 +113,7 @@ class HAProxyContextTest(CharmTestCase): 'backends': unit_addresses, } }, + 'default_backend': '10.10.10.11', 'service_ports': service_ports, 'neutron_bind_port': 9686, } From b0864959f0d18f1d684382d83695f0144b3f9c85 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 13 Jan 2015 14:42:40 +0000 Subject: [PATCH 108/125] Switchback to trunk of charm-helpers --- charm-helpers-sync.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index d2fc0229..8af0007c 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,4 +1,4 @@ -branch: lp:~james-page/charm-helpers/lp.1391784 +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core From 392f420ecadefd61e6f3ce3074eab17e75c96f27 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 20 Jan 2015 14:15:55 +0000 Subject: [PATCH 109/125] Add kilo support --- hooks/neutron_api_hooks.py | 6 ++- hooks/neutron_api_utils.py | 12 ++++- templates/kilo/neutron.conf | 77 ++++++++++++++++++++++++++++ unit_tests/test_neutron_api_utils.py | 13 ++++- 4 files changed, 102 insertions(+), 6 deletions(-) create mode 100644 templates/kilo/neutron.conf diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 4fdf20cd..aa74e8ac 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -104,7 +104,8 @@ def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) apt_update() - apt_install(determine_packages(), fatal=True) + apt_install(determine_packages(config('openstack-origin')), + fatal=True) [open_port(port) for port in determine_ports()] @@ -112,7 +113,8 @@ def install(): @hooks.hook('config-changed') @restart_on_change(restart_map(), stopstart=True) def config_changed(): - apt_install(filter_installed_packages(determine_packages()), + apt_install(filter_installed_packages( + determine_packages(config('openstack-origin'))), fatal=True) if config('prefer-ipv6'): setup_ipv6() diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 17ee02d9..6d5155d8 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -45,6 +45,12 @@ BASE_PACKAGES = [ 'uuid', ] +KILO_PACKAGES = [ + 'python-neutron-lbaas', + 'python-neutron-fwaas', + 'python-neutron-vpnaas', +] + BASE_SERVICES = [ 'neutron-server' ] @@ -100,7 +106,7 @@ def api_port(service): return API_PORTS[service] -def determine_packages(): +def determine_packages(source=None): # currently all packages match service names packages = [] + BASE_PACKAGES for v in resource_map().values(): @@ -109,6 +115,8 @@ def determine_packages(): 'server_packages', 'neutron') packages.extend(pkgs) + if get_os_codename_install_source(source) >= 'kilo': + packages.extend(KILO_PACKAGES) return list(set(packages)) @@ -208,7 +216,7 @@ def do_openstack_upgrade(configs): ] apt_update(fatal=True) apt_upgrade(options=dpkg_opts, fatal=True, dist=True) - pkgs = determine_packages() + pkgs = determine_packages(new_os_rel) # Sort packages just to make unit tests easier pkgs.sort() apt_install(packages=pkgs, diff --git a/templates/kilo/neutron.conf b/templates/kilo/neutron.conf new file mode 100644 index 00000000..bd1f9c12 --- /dev/null +++ b/templates/kilo/neutron.conf @@ -0,0 +1,77 @@ +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +## Restart trigger {{ restart_trigger }} +############################################################################### +[DEFAULT] +verbose = {{ verbose }} +debug = {{ debug }} +use_syslog = {{ use_syslog }} +state_path = /var/lib/neutron +lock_path = $state_path/lock +bind_host = {{ bind_host }} +auth_strategy = keystone +notification_driver = neutron.openstack.common.notifier.rpc_notifier +api_workers = {{ workers }} +rpc_workers = {{ workers }} + +{% if neutron_bind_port -%} +bind_port = {{ neutron_bind_port }} +{% else -%} +bind_port = 9696 +{% endif -%} + +{% if core_plugin -%} +core_plugin = {{ core_plugin }} +{% if neutron_plugin in ['ovs', 'ml2'] -%} +service_plugins = router,firewall,lbaas,vpnaas,metering +{% endif -%} +{% endif -%} + +{% if neutron_security_groups -%} +allow_overlapping_ips = True +neutron_firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver +{% endif -%} + +{% include "parts/rabbitmq" %} + +notify_nova_on_port_status_changes = True +notify_nova_on_port_data_changes = True +nova_url = {{ nova_url }} +nova_region_name = {{ region }} +{% if auth_host -%} +nova_admin_username = {{ admin_user }} +nova_admin_tenant_id = {{ admin_tenant_id }} +nova_admin_password = {{ admin_password }} +nova_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0 +{% endif -%} + +[quotas] +quota_driver = neutron.db.quota_db.DbQuotaDriver +{% if neutron_security_groups -%} +quota_items = network,subnet,port,security_group,security_group_rule +{% endif -%} + +[agent] +root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + +[keystone_authtoken] +signing_dir = /var/lib/neutron/keystone-signing +{% if service_host -%} +service_protocol = {{ service_protocol }} +service_host = {{ service_host }} +service_port = {{ service_port }} +auth_host = {{ auth_host }} +auth_port = {{ auth_port }} +auth_protocol = {{ auth_protocol }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +{% endif -%} + +{% include "parts/section-database" %} + +[service_providers] +service_provider=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +service_provider=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default +service_provider=FIREWALL:Iptables:neutron_fwaas.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver:default diff --git a/unit_tests/test_neutron_api_utils.py b/unit_tests/test_neutron_api_utils.py index ba9a096e..9ff7ef45 100644 --- a/unit_tests/test_neutron_api_utils.py +++ b/unit_tests/test_neutron_api_utils.py @@ -1,6 +1,7 @@ from mock import MagicMock, patch from collections import OrderedDict +from copy import deepcopy import charmhelpers.contrib.openstack.templating as templating templating.OSConfigRenderer = MagicMock() @@ -65,10 +66,18 @@ class TestNeutronAPIUtils(CharmTestCase): def test_determine_packages(self): pkg_list = nutils.determine_packages() - expect = nutils.BASE_PACKAGES + expect = deepcopy(nutils.BASE_PACKAGES) expect.extend(['neutron-server', 'neutron-plugin-ml2']) self.assertItemsEqual(pkg_list, expect) + def test_determine_packages_kilo(self): + self.get_os_codename_install_source.return_value = 'kilo' + pkg_list = nutils.determine_packages() + expect = deepcopy(nutils.BASE_PACKAGES) + expect.extend(['neutron-server', 'neutron-plugin-ml2']) + expect.extend(nutils.KILO_PACKAGES) + self.assertItemsEqual(pkg_list, expect) + def test_determine_ports(self): port_list = nutils.determine_ports() self.assertItemsEqual(port_list, [9696]) @@ -169,7 +178,7 @@ class TestNeutronAPIUtils(CharmTestCase): self.apt_upgrade.assert_called_with(options=dpkg_opts, fatal=True, dist=True) - pkgs = nutils.BASE_PACKAGES + pkgs = nutils.determine_packages() pkgs.sort() self.apt_install.assert_called_with(packages=pkgs, options=dpkg_opts, From 01d2d2db528a81159a06f79cb894efa3c9bb3a44 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Jan 2015 09:44:26 +0000 Subject: [PATCH 110/125] [gnuoy,trivial] Pre-release charmhelper sync --- hooks/charmhelpers/__init__.py | 16 +++++++++ hooks/charmhelpers/contrib/__init__.py | 15 ++++++++ .../contrib/charmsupport/__init__.py | 15 ++++++++ .../charmhelpers/contrib/charmsupport/nrpe.py | 16 +++++++++ .../contrib/charmsupport/volumes.py | 16 +++++++++ .../contrib/hahelpers/__init__.py | 15 ++++++++ .../charmhelpers/contrib/hahelpers/apache.py | 16 +++++++++ .../charmhelpers/contrib/hahelpers/cluster.py | 22 +++++++++++- .../charmhelpers/contrib/network/__init__.py | 15 ++++++++ hooks/charmhelpers/contrib/network/ip.py | 16 +++++++++ .../contrib/network/ovs/__init__.py | 16 +++++++++ .../contrib/openstack/__init__.py | 15 ++++++++ .../contrib/openstack/alternatives.py | 16 +++++++++ .../contrib/openstack/amulet/__init__.py | 15 ++++++++ .../contrib/openstack/amulet/deployment.py | 16 +++++++++ .../contrib/openstack/amulet/utils.py | 16 +++++++++ .../charmhelpers/contrib/openstack/context.py | 16 +++++++++ hooks/charmhelpers/contrib/openstack/ip.py | 16 +++++++++ .../charmhelpers/contrib/openstack/neutron.py | 16 +++++++++ .../contrib/openstack/templates/__init__.py | 16 +++++++++ .../contrib/openstack/templating.py | 16 +++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 16 +++++++++ hooks/charmhelpers/contrib/python/__init__.py | 15 ++++++++ hooks/charmhelpers/contrib/python/packages.py | 21 ++++++++++- .../charmhelpers/contrib/storage/__init__.py | 15 ++++++++ .../contrib/storage/linux/__init__.py | 15 ++++++++ .../contrib/storage/linux/ceph.py | 16 +++++++++ .../contrib/storage/linux/loopback.py | 16 +++++++++ .../charmhelpers/contrib/storage/linux/lvm.py | 16 +++++++++ .../contrib/storage/linux/utils.py | 16 +++++++++ hooks/charmhelpers/core/__init__.py | 15 ++++++++ hooks/charmhelpers/core/decorators.py | 16 +++++++++ hooks/charmhelpers/core/fstab.py | 16 +++++++++ hooks/charmhelpers/core/hookenv.py | 16 +++++++++ hooks/charmhelpers/core/host.py | 35 ++++++++++++++++--- hooks/charmhelpers/core/services/__init__.py | 16 +++++++++ hooks/charmhelpers/core/services/base.py | 16 +++++++++ hooks/charmhelpers/core/services/helpers.py | 16 +++++++++ hooks/charmhelpers/core/sysctl.py | 16 +++++++++ hooks/charmhelpers/core/templating.py | 16 +++++++++ hooks/charmhelpers/fetch/__init__.py | 16 +++++++++ hooks/charmhelpers/fetch/archiveurl.py | 16 +++++++++ hooks/charmhelpers/fetch/bzrurl.py | 26 +++++++++++++- hooks/charmhelpers/fetch/giturl.py | 20 +++++++++++ hooks/charmhelpers/payload/__init__.py | 16 +++++++++ hooks/charmhelpers/payload/execd.py | 16 +++++++++ 46 files changed, 763 insertions(+), 7 deletions(-) diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py index b46e2e23..f72e7f84 100644 --- a/hooks/charmhelpers/__init__.py +++ b/hooks/charmhelpers/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. import subprocess diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/__init__.py +++ b/hooks/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/charmsupport/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ b/hooks/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index f3a936d0..0fd0a9d8 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Compatibility with the nrpe-external-master charm""" # Copyright 2012 Canonical Ltd. # diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py index d61aa47f..320961b9 100644 --- a/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Functions for managing volumes in juju units. One volume is supported per unit. Subordinates may have their own storage, provided it is on its own partition. diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/hahelpers/__init__.py +++ b/hooks/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py index 6616ffff..00917195 100644 --- a/hooks/charmhelpers/contrib/hahelpers/apache.py +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 912b2fe3..9a2588b6 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # @@ -205,19 +221,23 @@ def determine_apache_port(public_port, singlenode_mode=False): return public_port - (i * 10) -def get_hacluster_config(): +def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required for initiating a relation to hacluster: ha-bindiface, ha-mcastport, vip + param: exclude_keys: list of setting key(s) to be excluded. returns: dict: A dict containing settings keyed by setting name. raises: HAIncompleteConfig if settings are missing. ''' settings = ['ha-bindiface', 'ha-mcastport', 'vip'] conf = {} for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + conf[setting] = config_get(setting) missing = [] [missing.append(s) for s, v in six.iteritems(conf) if v is None] diff --git a/hooks/charmhelpers/contrib/network/__init__.py b/hooks/charmhelpers/contrib/network/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/network/__init__.py +++ b/hooks/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 8dc83165..98b17544 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import glob import re import subprocess diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py index 8f8a5230..77e2db7f 100644 --- a/hooks/charmhelpers/contrib/network/ovs/__init__.py +++ b/hooks/charmhelpers/contrib/network/ovs/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helpers for interacting with OpenvSwitch ''' import subprocess import os diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/openstack/__init__.py +++ b/hooks/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py index b413259c..ef77caf3 100644 --- a/hooks/charmhelpers/contrib/openstack/alternatives.py +++ b/hooks/charmhelpers/contrib/openstack/alternatives.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + ''' Helper for managing alternatives for file conflict resolution ''' import subprocess diff --git a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/__init__.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index f3fee074..c50d3ec6 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import six from charmhelpers.contrib.amulet.deployment import ( AmuletDeployment diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 3e0cc61c..9c3d918a 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import logging import os import time diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index eaa89a67..c7c4cd4a 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import json import os import time diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index f062c807..9eabed73 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from charmhelpers.core.hookenv import ( config, unit_get, diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 095cc24b..902757fe 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Various utilies for dealing with Neutron and the renaming from Quantum. from subprocess import check_output diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py index 0b49ad28..75876796 100644 --- a/hooks/charmhelpers/contrib/openstack/templates/__init__.py +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # dummy __init__.py to fool syncer into thinking this is a syncable python # module diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 33df0675..24cb272b 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import six diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index ddd40ce5..26259a03 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,21 @@ #!/usr/bin/python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # Common python helper functions used for OpenStack charms. from collections import OrderedDict from functools import wraps diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/python/__init__.py +++ b/hooks/charmhelpers/contrib/python/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index 78162b1b..d848a120 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # coding: utf-8 +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = "Jorge Niedbalski " from charmhelpers.fetch import apt_install, apt_update @@ -35,7 +51,7 @@ def pip_install_requirements(requirements, **options): pip_execute(command) -def pip_install(package, fatal=False, **options): +def pip_install(package, fatal=False, upgrade=False, **options): """Install a python package""" command = ["install"] @@ -43,6 +59,9 @@ def pip_install(package, fatal=False, **options): for option in parse_options(options, available_options): command.append(option) + if upgrade: + command.append('--upgrade') + if isinstance(package, list): command.extend(package) else: diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/storage/__init__.py +++ b/hooks/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/contrib/storage/linux/__init__.py +++ b/hooks/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 6ebeab5c..31ea7f9e 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2012 Canonical Ltd. # diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py index a22c3d7b..c296f098 100644 --- a/hooks/charmhelpers/contrib/storage/linux/loopback.py +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from subprocess import ( diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py index 0aa65f4f..34b5f71a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/lvm.py +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from subprocess import ( CalledProcessError, check_call, diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c6a15e14..c8373b72 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re from stat import S_ISBLK diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py index e69de29b..d1400a02 100644 --- a/hooks/charmhelpers/core/__init__.py +++ b/hooks/charmhelpers/core/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers/core/decorators.py b/hooks/charmhelpers/core/decorators.py index 029a4ef4..bb05620b 100644 --- a/hooks/charmhelpers/core/decorators.py +++ b/hooks/charmhelpers/core/decorators.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + # # Copyright 2014 Canonical Ltd. # diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index 0adf0db3..be7de248 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import io diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 69ae4564..cf552b39 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Interactions with the Juju environment" # Copyright 2013 Canonical Ltd. # diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 5221120c..cf2cbe14 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + """Tools for working with the host system""" # Copyright 2012 Canonical Ltd. # @@ -168,10 +184,10 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): log("Removing non-directory file {} prior to mkdir()".format(path)) os.unlink(realpath) os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) elif not path_exists: os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) def write_file(path, content, owner='root', group='root', perms=0o444): @@ -389,6 +405,9 @@ def cmp_pkgrevno(package, revno, pkgcache=None): * 0 => Installed revno is the same as supplied arg * -1 => Installed revno is less than supplied arg + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. ''' import apt_pkg if not pkgcache: @@ -407,13 +426,21 @@ def chdir(d): os.chdir(cur) -def chownr(path, owner, group): +def chownr(path, owner, group, follow_links=True): uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown for root, dirs, files in os.walk(path): for name in dirs + files: full = os.path.join(root, name) broken_symlink = os.path.lexists(full) and not os.path.exists(full) if not broken_symlink: - os.chown(full, uid, gid) + chown(full, uid, gid) + + +def lchownr(path, owner, group): + chownr(path, owner, group, follow_links=False) diff --git a/hooks/charmhelpers/core/services/__init__.py b/hooks/charmhelpers/core/services/__init__.py index 69dde79a..0928158b 100644 --- a/hooks/charmhelpers/core/services/__init__.py +++ b/hooks/charmhelpers/core/services/__init__.py @@ -1,2 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + from .base import * # NOQA from .helpers import * # NOQA diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index 87ecb130..c5534e4c 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import re import json diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 163a7932..5e3af9da 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import yaml from charmhelpers.core import hookenv diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index 0f299630..d642a371 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -1,6 +1,22 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + __author__ = 'Jorge Niedbalski R. ' import yaml diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 569eaed6..97669092 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.core import host diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index aceadea4..792e629a 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import importlib from tempfile import NamedTemporaryFile import time diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 8a4624b2..d25a0ddd 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import hashlib import re diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index 8ef48f30..3531315a 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -11,10 +27,12 @@ if six.PY3: try: from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors except ImportError: from charmhelpers.fetch import apt_install apt_install("python-bzrlib") from bzrlib.branch import Branch + from bzrlib import bzrdir, workingtree, errors class BzrUrlFetchHandler(BaseFetchHandler): @@ -34,9 +52,15 @@ class BzrUrlFetchHandler(BaseFetchHandler): if url_parts.scheme == "lp": from bzrlib.plugin import load_plugins load_plugins() + try: + local_branch = bzrdir.BzrDir.create_branch_convenience(dest) + except errors.AlreadyControlDirError: + local_branch = Branch.open(dest) try: remote_branch = Branch.open(source) - remote_branch.bzrdir.sprout(dest).open_branch() + remote_branch.push(local_branch) + tree = workingtree.WorkingTree.open(dest) + tree.update() except Exception as e: raise e diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index f3aa2821..5376786b 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -1,3 +1,19 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os from charmhelpers.fetch import ( BaseFetchHandler, @@ -16,6 +32,8 @@ except ImportError: apt_install("python-git") from git import Repo +from git.exc import GitCommandError + class GitUrlFetchHandler(BaseFetchHandler): """Handler for git branches via generic and github URLs""" @@ -46,6 +64,8 @@ class GitUrlFetchHandler(BaseFetchHandler): mkdir(dest_dir, perms=0o755) try: self.clone(source, dest_dir, branch) + except GitCommandError as e: + raise UnhandledSource(e.message) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/hooks/charmhelpers/payload/__init__.py b/hooks/charmhelpers/payload/__init__.py index fc9fbc08..e6f42497 100644 --- a/hooks/charmhelpers/payload/__init__.py +++ b/hooks/charmhelpers/payload/__init__.py @@ -1 +1,17 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + "Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers/payload/execd.py b/hooks/charmhelpers/payload/execd.py index 6476a75f..4d4d81a6 100644 --- a/hooks/charmhelpers/payload/execd.py +++ b/hooks/charmhelpers/payload/execd.py @@ -1,5 +1,21 @@ #!/usr/bin/env python +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + import os import sys import subprocess From 94328c6655dc2ac107f387c0aa013fa0802a80c6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 3 Feb 2015 07:33:30 +0200 Subject: [PATCH 111/125] tidy lint --- hooks/neutron_api_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index aa74e8ac..f71b8526 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -114,7 +114,7 @@ def install(): @restart_on_change(restart_map(), stopstart=True) def config_changed(): apt_install(filter_installed_packages( - determine_packages(config('openstack-origin'))), + determine_packages(config('openstack-origin'))), fatal=True) if config('prefer-ipv6'): setup_ipv6() From 2287be12ab0f30234b252636fc24b3a76c9b479f Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Wed, 11 Feb 2015 12:38:30 +0000 Subject: [PATCH 112/125] charm-helpers sync --- hooks/charmhelpers/core/host.py | 10 +- hooks/charmhelpers/core/sysctl.py | 16 +- hooks/charmhelpers/core/templating.py | 6 +- hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++++++++++ 4 files changed, 496 insertions(+), 13 deletions(-) create mode 100644 hooks/charmhelpers/core/unitdata.py diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def restart_on_change(restart_map, stopstart=False): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index d642a371..8e1b9eeb 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -26,25 +26,31 @@ from subprocess import check_call from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ from charmhelpers.core import hookenv def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..01329ab7 --- /dev/null +++ b/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if not charm_rev in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV From ecb9db4af369c7b337074edb1c1d58f591506fa5 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 13 Feb 2015 15:02:52 +0000 Subject: [PATCH 113/125] Fix lint and unit tests after previous commit --- hooks/neutron_api_hooks.py | 5 ----- unit_tests/test_neutron_api_hooks.py | 1 - 2 files changed, 6 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index bd740954..08218fd4 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -33,9 +33,6 @@ from charmhelpers.contrib.openstack.utils import ( openstack_upgrade_available, sync_db_with_multi_ipv6_addresses ) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, -) from neutron_api_utils import ( NEUTRON_CONF, @@ -197,9 +194,7 @@ def db_changed(): @hooks.hook('pgsql-db-relation-changed') @restart_on_change(restart_map()) def postgresql_neutron_db_changed(): - plugin = config('neutron-plugin') CONFIGS.write(NEUTRON_CONF) - @hooks.hook('amqp-relation-broken', diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 3674bf8a..b6626f78 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -38,7 +38,6 @@ TO_PATCH = [ 'get_overlay_network_type', 'is_relation_made', 'log', - 'neutron_plugin_attribute', 'open_port', 'openstack_upgrade_available', 'relation_get', From de2ae2bfa8defc288f5316e98cca6aa1da4bd7d8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 13 Feb 2015 15:04:48 +0000 Subject: [PATCH 114/125] Fix lint and unit tests after previous commit --- hooks/neutron_api_hooks.py | 5 ----- unit_tests/test_neutron_api_hooks.py | 1 - 2 files changed, 6 deletions(-) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 93a56236..1b9aa9c6 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -33,9 +33,6 @@ from charmhelpers.contrib.openstack.utils import ( openstack_upgrade_available, sync_db_with_multi_ipv6_addresses ) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, -) from neutron_api_utils import ( NEUTRON_CONF, @@ -195,9 +192,7 @@ def db_changed(): @hooks.hook('pgsql-db-relation-changed') @restart_on_change(restart_map()) def postgresql_neutron_db_changed(): - plugin = config('neutron-plugin') CONFIGS.write(NEUTRON_CONF) - @hooks.hook('amqp-relation-broken', diff --git a/unit_tests/test_neutron_api_hooks.py b/unit_tests/test_neutron_api_hooks.py index 3674bf8a..b6626f78 100644 --- a/unit_tests/test_neutron_api_hooks.py +++ b/unit_tests/test_neutron_api_hooks.py @@ -38,7 +38,6 @@ TO_PATCH = [ 'get_overlay_network_type', 'is_relation_made', 'log', - 'neutron_plugin_attribute', 'open_port', 'openstack_upgrade_available', 'relation_get', From 5f7fc1b01e7a7b1c47a90ff9a3fd050c62796518 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 16 Feb 2015 18:05:37 +0000 Subject: [PATCH 115/125] Explicitly install python-six to get cloud archive version installed --- hooks/neutron_api_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 6d5155d8..5d31a281 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -42,6 +42,7 @@ BASE_PACKAGES = [ 'python-keystoneclient', 'python-mysqldb', 'python-psycopg2', + 'python-six', 'uuid', ] From 4852b063cbddee0d9239ff89079cdf78efa07690 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 17 Feb 2015 07:10:15 +0000 Subject: [PATCH 116/125] Add amulet tests --- Makefile | 14 +- ...pers-sync.yaml => charm-helpers-hooks.yaml | 0 charm-helpers-tests.yaml | 5 + hooks/charmhelpers/contrib/python/packages.py | 4 +- hooks/charmhelpers/core/fstab.py | 4 +- hooks/charmhelpers/core/sysctl.py | 4 +- hooks/charmhelpers/core/unitdata.py | 2 +- hooks/charmhelpers/fetch/archiveurl.py | 20 +- hooks/charmhelpers/fetch/giturl.py | 2 +- hooks/neutron_api_utils.py | 1 + tests/00-setup | 11 + tests/14-basic-precise-icehouse | 11 + tests/15-basic-trusty-icehouse | 9 + tests/16-basic-trusty-juno | 11 + tests/README | 53 +++ tests/basic_deployment.py | 381 ++++++++++++++++++ tests/charmhelpers/__init__.py | 38 ++ tests/charmhelpers/contrib/__init__.py | 15 + tests/charmhelpers/contrib/amulet/__init__.py | 15 + .../charmhelpers/contrib/amulet/deployment.py | 93 +++++ tests/charmhelpers/contrib/amulet/utils.py | 195 +++++++++ .../contrib/openstack/__init__.py | 15 + .../contrib/openstack/amulet/__init__.py | 15 + .../contrib/openstack/amulet/deployment.py | 111 +++++ .../contrib/openstack/amulet/utils.py | 294 ++++++++++++++ 25 files changed, 1303 insertions(+), 20 deletions(-) rename charm-helpers-sync.yaml => charm-helpers-hooks.yaml (100%) create mode 100644 charm-helpers-tests.yaml create mode 100755 tests/00-setup create mode 100755 tests/14-basic-precise-icehouse create mode 100755 tests/15-basic-trusty-icehouse create mode 100755 tests/16-basic-trusty-juno create mode 100644 tests/README create mode 100644 tests/basic_deployment.py create mode 100644 tests/charmhelpers/__init__.py create mode 100644 tests/charmhelpers/contrib/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/amulet/utils.py create mode 100644 tests/charmhelpers/contrib/openstack/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 tests/charmhelpers/contrib/openstack/amulet/utils.py diff --git a/Makefile b/Makefile index 616b9a1f..91874fb4 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks unit_tests + @flake8 --exclude hooks/charmhelpers hooks unit_tests tests @charm proof unit_test: @@ -15,7 +15,17 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml +# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml + +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse \ + 16-basic-trusty-juno publish: lint unit_test bzr push lp:charms/neutron-api diff --git a/charm-helpers-sync.yaml b/charm-helpers-hooks.yaml similarity index 100% rename from charm-helpers-sync.yaml rename to charm-helpers-hooks.yaml diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index d848a120..8659516b 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = "Jorge Niedbalski " - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import log @@ -29,6 +27,8 @@ except ImportError: apt_install('python-pip') from pip import main as pip_execute +__author__ = "Jorge Niedbalski " + def parse_options(given, available): """Given a set of options, check if available""" diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index be7de248..9cdcc886 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index 8e1b9eeb..21cc8ab2 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -29,6 +27,8 @@ from charmhelpers.core.hookenv import ( ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 01329ab7..3000134a 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -435,7 +435,7 @@ class HookData(object): os.path.join(charm_dir, 'revision')).read().strip() charm_rev = charm_rev or '0' revs = self.kv.get('charm_revisions', []) - if not charm_rev in revs: + if charm_rev not in revs: revs.append(charm_rev.strip() or '0') self.kv.set('charm_revisions', revs) diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import os import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ else: ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ except ImportError: apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 6d5155d8..5b205608 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -43,6 +43,7 @@ BASE_PACKAGES = [ 'python-mysqldb', 'python-psycopg2', 'uuid', + 'python-six', ] KILO_PACKAGES = [ diff --git a/tests/00-setup b/tests/00-setup new file mode 100755 index 00000000..06cfdb07 --- /dev/null +++ b/tests/00-setup @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet \ + python-neutronclient \ + python-keystoneclient \ + python-novaclient \ + python-glanceclient diff --git a/tests/14-basic-precise-icehouse b/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..fbbbd299 --- /dev/null +++ b/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic openstack-dashboard deployment on precise-icehouse.""" + +from basic_deployment import NeutronAPIBasicDeployment + +if __name__ == '__main__': + deployment = NeutronAPIBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/tests/15-basic-trusty-icehouse b/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..ae602c82 --- /dev/null +++ b/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic neutron-api deployment on trusty-icehouse.""" + +from basic_deployment import NeutronAPIBasicDeployment + +if __name__ == '__main__': + deployment = NeutronAPIBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/tests/16-basic-trusty-juno b/tests/16-basic-trusty-juno new file mode 100755 index 00000000..ff2e240b --- /dev/null +++ b/tests/16-basic-trusty-juno @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic openstack-dashboard deployment on trusty-juno.""" + +from basic_deployment import NeutronAPIBasicDeployment + +if __name__ == '__main__': + deployment = NeutronAPIBasicDeployment(series='trusty', + openstack='cloud:trusty-juno', + source='cloud:trusty-updates/juno') + deployment.run_tests() diff --git a/tests/README b/tests/README new file mode 100644 index 00000000..17aa79d5 --- /dev/null +++ b/tests/README @@ -0,0 +1,53 @@ +This directory provides Amulet tests that focus on verification of +neutron-api deployments. + +In order to run tests, you'll need charm-tools installed (in addition to +juju, of course): + sudo add-apt-repository ppa:juju/stable + sudo apt-get update + sudo apt-get install charm-tools + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py new file mode 100644 index 00000000..0b3db9c7 --- /dev/null +++ b/tests/basic_deployment.py @@ -0,0 +1,381 @@ +#!/usr/bin/python + +import amulet + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) + +from charmhelpers.contrib.openstack.amulet.utils import ( + OpenStackAmuletUtils, + DEBUG, # flake8: noqa + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class NeutronAPIBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic neutron-api deployment.""" + + def __init__(self, series, openstack=None, source=None, stable=False): + """Deploy the entire test environment.""" + super(NeutronAPIBasicDeployment, self).__init__(series, openstack, + source, stable) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add services + + Add the services that we're testing, where neutron-api is local, + and the rest of the service are from lp branches that are + compatible with the local charm (e.g. stable or next). + """ + this_service = {'name': 'neutron-api'} + other_services = [{'name': 'mysql'}, + {'name': 'rabbitmq-server'}, {'name': 'keystone'}, + {'name': 'neutron-openvswitch'}, + {'name': 'nova-cloud-controller'}, + {'name': 'quantum-gateway'}, + {'name': 'nova-compute'}] + super(NeutronAPIBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'neutron-api:shared-db': 'mysql:shared-db', + 'neutron-api:amqp': 'rabbitmq-server:amqp', + 'neutron-api:neutron-api': 'nova-cloud-controller:neutron-api', + 'neutron-api:neutron-plugin-api': 'quantum-gateway:' + 'neutron-plugin-api', + 'neutron-api:neutron-plugin-api': 'neutron-openvswitch:' + 'neutron-plugin-api', + 'neutron-api:identity-service': 'keystone:identity-service', + 'keystone:shared-db': 'mysql:shared-db', + 'nova-compute:neutron-plugin': 'neutron-openvswitch:neutron-plugin' + } + super(NeutronAPIBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + nova_cc_config = {'network-manager': 'Quantum', + 'quantum-security-groups': 'yes'} + configs = {'keystone': keystone_config, + 'nova-cloud-controller': nova_cc_config} + super(NeutronAPIBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] + self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0'] + self.neutron_api_sentry = self.d.sentry.unit['neutron-api/0'] + self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] + +# def test_neutron_api_shared_db_relation(self): +# """Verify the neutron-api to mysql shared-db relation data""" +# unit = self.neutron_api_sentry +# relation = ['shared-db', 'mysql:shared-db'] +# expected = { +# 'private-address': u.valid_ip, +# 'database': 'neutron', +# 'username': 'neutron', +# 'hostname': u.valid_ip +# } +# +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api shared-db', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_shared_db_neutron_api_relation(self): +# """Verify the mysql to neutron-api shared-db relation data""" +# unit = self.mysql_sentry +# relation = ['shared-db', 'neutron-api:shared-db'] +# expected = { +# 'allowed_units': 'neutron-api/0', +# 'db_host': u.valid_ip, +# 'private-address': u.valid_ip, +# } +# ret = u.validate_relation_data(unit, relation, expected) +# rel_data = unit.relation('shared-db', 'neutron-api:shared-db') +# if ret or 'password' not in rel_data: +# message = u.relation_error('mysql shared-db', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_neutron_api_amqp_relation(self): +# """Verify the neutron-api to rabbitmq-server amqp relation data""" +# unit = self.neutron_api_sentry +# relation = ['amqp', 'rabbitmq-server:amqp'] +# expected = { +# 'username': 'neutron', +# 'private-address': u.valid_ip, +# 'vhost': 'openstack' +# } +# +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api amqp', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_amqp_neutron_api_relation(self): +# """Verify the rabbitmq-server to neutron-api amqp relation data""" +# unit = self.rabbitmq_sentry +# relation = ['amqp', 'neutron-api:amqp'] +# rel_data = unit.relation('amqp', 'neutron-api:amqp') +# expected = { +# 'hostname': u.valid_ip, +# 'private-address': u.valid_ip, +# } +# +# ret = u.validate_relation_data(unit, relation, expected) +# if ret or not 'password' in rel_data: +# message = u.relation_error('rabbitmq amqp', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_neutron_api_identity_relation(self): +# """Verify the neutron-api to keystone identity-service relation data""" +# unit = self.neutron_api_sentry +# relation = ['identity-service', 'keystone:identity-service'] +# api_ip = unit.relation('identity-service', +# 'keystone:identity-service')['private-address'] +# api_endpoint = "http://%s:9696" % (api_ip) +# expected = { +# 'private-address': u.valid_ip, +# 'quantum_region': 'RegionOne', +# 'quantum_service': 'quantum', +# 'quantum_admin_url': api_endpoint, +# 'quantum_internal_url': api_endpoint, +# 'quantum_public_url': api_endpoint, +# } +# +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api identity-service', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_keystone_neutron_api_identity_relation(self): +# """Verify the neutron-api to keystone identity-service relation data""" +# unit = self.keystone_sentry +# relation = ['identity-service', 'neutron-api:identity-service'] +# id_relation = unit.relation('identity-service', +# 'neutron-api:identity-service') +# id_ip = id_relation['private-address'] +# expected = { +# 'admin_token': 'ubuntutesting', +# 'auth_host': id_ip, +# 'auth_port': "35357", +# 'auth_protocol': 'http', +# 'https_keystone': "False", +# 'private-address': id_ip, +# 'service_host': id_ip, +# } +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api identity-service', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_neutron_api_plugin_relation(self): +# """Verify neutron-api to neutron-openvswitch neutron-plugin-api""" +# unit = self.neutron_api_sentry +# relation = ['neutron-plugin-api', +# 'neutron-openvswitch:neutron-plugin-api'] +# expected = { +# 'private-address': u.valid_ip, +# } +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api neutron-plugin-api', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# # XXX Test missing to examine the relation data neutron-openvswitch is +# # receiving. Current;y this data cannot be interegated due to +# # Bug#1421388 +# + def test_z_restart_on_config_change(self): + """Verify that the specified services are restarted when the config + is changed. + + Note(coreycb): The method name with the _z_ is a little odd + but it forces the test to run last. It just makes things + easier because restarting services requires re-authorization. + """ + conf = '/etc/neutron/neutron.conf' + services = ['neutron-server'] + self.d.configure('neutron-api', {'use-syslog': 'True'}) + stime = 60 + for s in services: + if not u.service_restarted(self.neutron_api_sentry, s, conf, + pgrep_full=True, sleep_time=stime): + self.d.configure('neutron-api', {'use-syslog': 'False'}) + msg = "service {} didn't restart after config change".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + stime = 0 + self.d.configure('neutron-api', {'use-syslog': 'False'}) +# +# def test_neutron_api_novacc_relation(self): +# """Verify the neutron-api to nova-cloud-controller relation data""" +# unit = self.neutron_api_sentry +# relation = ['neutron-api', 'nova-cloud-controller:neutron-api'] +# api_ip = unit.relation('identity-service', +# 'keystone:identity-service')['private-address'] +# api_endpoint = "http://%s:9696" % (api_ip) +# expected = { +# 'private-address': api_ip, +# 'neutron-plugin': 'ovs', +# 'neutron-security-groups': "no", +# 'neutron-url': api_endpoint, +# } +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('neutron-api neutron-api', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_novacc_neutron_api_relation(self): +# """Verify the nova-cloud-controller to neutron-api relation data""" +# unit = self.nova_cc_sentry +# relation = ['neutron-api', 'neutron-api:neutron-api'] +# cc_ip = unit.relation('neutron-api', +# 'neutron-api:neutron-api')['private-address'] +# cc_endpoint = "http://%s:8774/v2" % (cc_ip) +# expected = { +# 'private-address': cc_ip, +# 'nova_url': cc_endpoint, +# } +# ret = u.validate_relation_data(unit, relation, expected) +# if ret: +# message = u.relation_error('nova-cc neutron-api', ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_neutron_config(self): +# """Verify the data in the neutron config file.""" +# unit = self.neutron_api_sentry +# cc_relation = self.nova_cc_sentry.relation('neutron-api', +# 'neutron-api:neutron-api') +# rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', +# 'neutron-api:amqp') +# ks_rel = self.keystone_sentry.relation('identity-service', +# 'neutron-api:identity-service') +# +# nova_auth_url = '%s://%s:%s/v2.0' % (ks_rel['auth_protocol'], +# ks_rel['auth_host'], +# ks_rel['auth_port']) +# db_relation = self.mysql_sentry.relation('shared-db', +# 'neutron-api:shared-db') +# db_conn = 'mysql://neutron:%s@%s/neutron' % (db_relation['password'], +# db_relation['db_host']) +# conf = '/etc/neutron/neutron.conf' +# expected = { +# 'DEFAULT': { +# 'verbose': 'False', +# 'debug': 'False', +# 'rabbit_userid': 'neutron', +# 'rabbit_virtual_host': 'openstack', +# 'rabbit_password': rabbitmq_relation['password'], +# 'rabbit_host': rabbitmq_relation['hostname'], +# 'bind_port': '9686', +# 'nova_url': cc_relation['nova_url'], +# 'nova_region_name': 'RegionOne', +# 'nova_admin_username': ks_rel['service_username'], +# 'nova_admin_tenant_id': ks_rel['service_tenant_id'], +# 'nova_admin_password': ks_rel['service_password'], +# 'nova_admin_auth_url': nova_auth_url, +# }, +# 'keystone_authtoken': { +# 'signing_dir': '/var/lib/neutron/keystone-signing', +# 'service_protocol': ks_rel['service_protocol'], +# 'service_host': ks_rel['service_host'], +# 'service_port': ks_rel['service_port'], +# 'auth_host': ks_rel['auth_host'], +# 'auth_port': ks_rel['auth_port'], +# 'auth_protocol': ks_rel['auth_protocol'], +# 'admin_tenant_name': 'services', +# 'admin_user': 'quantum', +# 'admin_password': ks_rel['service_password'], +# }, +# 'database': { +# 'connection': db_conn, +# }, +# } +# +# for section, pairs in expected.iteritems(): +# ret = u.validate_config_data(unit, conf, section, pairs) +# if ret: +# message = "neutron config error: {}".format(ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_ml2_config(self): +# """Verify the data in the ml2 config file. This is only available +# since icehouse.""" +# unit = self.neutron_api_sentry +# conf = '/etc/neutron/plugins/ml2/ml2_conf.ini' +# neutron_api_relation = unit.relation('shared-db', 'mysql:shared-db') +# expected = { +# 'ml2': { +# 'type_drivers': 'gre,vxlan,vlan,flat', +# 'tenant_network_types': 'gre,vxlan,vlan,flat', +# 'mechanism_drivers': 'openvswitch,hyperv,l2population', +# }, +# 'ml2_type_gre': { +# 'tunnel_id_ranges': '1:1000' +# }, +# 'ml2_type_vxlan': { +# 'vni_ranges': '1001:2000' +# }, +# 'ovs': { +# 'enable_tunneling': 'True', +# 'local_ip': neutron_api_relation['private-address'] +# }, +# 'agent': { +# 'tunnel_types': 'gre', +# }, +# 'securitygroup': { +# 'enable_security_group': 'False', +# } +# } +# +# for section, pairs in expected.iteritems(): +# ret = u.validate_config_data(unit, conf, section, pairs) +# if ret: +# message = "ml2 config error: {}".format(ret) +# amulet.raise_status(amulet.FAIL, msg=message) +# +# def test_services(self): +# """Verify the expected services are running on the corresponding +# service units.""" +# neutron_services = ['status neutron-dhcp-agent', +# 'status neutron-lbaas-agent', +# 'status neutron-metadata-agent', +# 'status neutron-plugin-openvswitch-agent', +# 'status neutron-vpn-agent', +# 'status neutron-metering-agent', +# 'status neutron-ovs-cleanup'] +# +# nova_cc_services = ['status nova-api-ec2', +# 'status nova-api-os-compute', +# 'status nova-objectstore', +# 'status nova-cert', +# 'status nova-scheduler', +# 'status nova-conductor'] +# +# commands = { +# self.mysql_sentry: ['status mysql'], +# self.keystone_sentry: ['status keystone'], +# self.nova_cc_sentry: nova_cc_services, +# self.quantum_gateway_sentry: neutron_services +# } +# +# ret = u.validate_services(commands) +# if ret: +# amulet.raise_status(amulet.FAIL, msg=ret) diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..f72e7f84 --- /dev/null +++ b/tests/charmhelpers/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +import subprocess +import sys + +try: + import six # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # flake8: noqa + +try: + import yaml # flake8: noqa +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # flake8: noqa diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/tests/charmhelpers/contrib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..367d6b47 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..253fb08f --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import ConfigParser +import io +import logging +import re +import sys +import time + +import six + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Validate services. + + Verify the specified services are running on the corresponding + service units. + """ + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + """ + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool. + """ + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + print service + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d1400a02 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..0cfeaa4c --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,111 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, stable=True): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + self.stable = stable + # Note(coreycb): this needs to be changed when new next branches come + # out. + self.current_next = "trusty" + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] + + if self.stable: + for svc in other_services: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + for svc in other_services: + if svc['name'] in base_charms: + temp = 'lp:charms/{}' + svc['location'] = temp.format(svc['name']) + else: + temp = 'lp:~openstack-charmers/charms/{}/{}/next' + svc['location'] = temp.format(self.current_next, + svc['name']) + return other_services + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin/source.""" + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + ignore: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in ignore: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + (self.precise_essex, self.precise_folsom, self.precise_grizzly, + self.precise_havana, self.precise_icehouse, + self.trusty_icehouse) = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..9c3d918a --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,294 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +import six + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + unit = keystone_sentry + service_ip = unit.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "cirros-{}-x86_64-disk.img".format(version) + local_path = os.path.join('tests', cirros_img) + + if not os.path.exists(local_path): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, local_path) + f.close() + + with open(local_path) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + count = 1 + status = image.status + while status != 'active' and count < 10: + time.sleep(3) + image = glance.images.get(image.id) + status = image.status + self.log.debug('image status: {}'.format(status)) + count += 1 + + if status != 'active': + self.log.error('image creation timed out') + return None + + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + num_before = len(list(glance.images.list())) + glance.images.delete(image) + + count = 1 + num_after = len(list(glance.images.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(glance.images.list())) + self.log.debug('number of images: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('image deletion timed out') + return False + + return True + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + num_before = len(list(nova.servers.list())) + nova.servers.delete(instance) + + count = 1 + num_after = len(list(nova.servers.list())) + while num_after != (num_before - 1) and count < 10: + time.sleep(3) + num_after = len(list(nova.servers.list())) + self.log.debug('number of instances: {}'.format(num_after)) + count += 1 + + if num_after != (num_before - 1): + self.log.error('instance deletion timed out') + return False + + return True From 3dc147d0197b7a89d150ee98ff5442ef2ff4c792 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 17 Feb 2015 08:24:56 +0000 Subject: [PATCH 117/125] Add missing nova-cc <-> mysql relation and unleash the rest of the tests --- tests/basic_deployment.py | 555 +++++++++++++++++++------------------- 1 file changed, 278 insertions(+), 277 deletions(-) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 0b3db9c7..2e157291 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -58,7 +58,8 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment): 'neutron-plugin-api', 'neutron-api:identity-service': 'keystone:identity-service', 'keystone:shared-db': 'mysql:shared-db', - 'nova-compute:neutron-plugin': 'neutron-openvswitch:neutron-plugin' + 'nova-compute:neutron-plugin': 'neutron-openvswitch:neutron-plugin', + 'nova-cloud-controller:shared-db': 'mysql:shared-db', } super(NeutronAPIBasicDeployment, self)._add_relations(relations) @@ -83,126 +84,126 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment): self.neutron_api_sentry = self.d.sentry.unit['neutron-api/0'] self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] -# def test_neutron_api_shared_db_relation(self): -# """Verify the neutron-api to mysql shared-db relation data""" -# unit = self.neutron_api_sentry -# relation = ['shared-db', 'mysql:shared-db'] -# expected = { -# 'private-address': u.valid_ip, -# 'database': 'neutron', -# 'username': 'neutron', -# 'hostname': u.valid_ip -# } -# -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api shared-db', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_shared_db_neutron_api_relation(self): -# """Verify the mysql to neutron-api shared-db relation data""" -# unit = self.mysql_sentry -# relation = ['shared-db', 'neutron-api:shared-db'] -# expected = { -# 'allowed_units': 'neutron-api/0', -# 'db_host': u.valid_ip, -# 'private-address': u.valid_ip, -# } -# ret = u.validate_relation_data(unit, relation, expected) -# rel_data = unit.relation('shared-db', 'neutron-api:shared-db') -# if ret or 'password' not in rel_data: -# message = u.relation_error('mysql shared-db', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_neutron_api_amqp_relation(self): -# """Verify the neutron-api to rabbitmq-server amqp relation data""" -# unit = self.neutron_api_sentry -# relation = ['amqp', 'rabbitmq-server:amqp'] -# expected = { -# 'username': 'neutron', -# 'private-address': u.valid_ip, -# 'vhost': 'openstack' -# } -# -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api amqp', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_amqp_neutron_api_relation(self): -# """Verify the rabbitmq-server to neutron-api amqp relation data""" -# unit = self.rabbitmq_sentry -# relation = ['amqp', 'neutron-api:amqp'] -# rel_data = unit.relation('amqp', 'neutron-api:amqp') -# expected = { -# 'hostname': u.valid_ip, -# 'private-address': u.valid_ip, -# } -# -# ret = u.validate_relation_data(unit, relation, expected) -# if ret or not 'password' in rel_data: -# message = u.relation_error('rabbitmq amqp', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_neutron_api_identity_relation(self): -# """Verify the neutron-api to keystone identity-service relation data""" -# unit = self.neutron_api_sentry -# relation = ['identity-service', 'keystone:identity-service'] -# api_ip = unit.relation('identity-service', -# 'keystone:identity-service')['private-address'] -# api_endpoint = "http://%s:9696" % (api_ip) -# expected = { -# 'private-address': u.valid_ip, -# 'quantum_region': 'RegionOne', -# 'quantum_service': 'quantum', -# 'quantum_admin_url': api_endpoint, -# 'quantum_internal_url': api_endpoint, -# 'quantum_public_url': api_endpoint, -# } -# -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api identity-service', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_keystone_neutron_api_identity_relation(self): -# """Verify the neutron-api to keystone identity-service relation data""" -# unit = self.keystone_sentry -# relation = ['identity-service', 'neutron-api:identity-service'] -# id_relation = unit.relation('identity-service', -# 'neutron-api:identity-service') -# id_ip = id_relation['private-address'] -# expected = { -# 'admin_token': 'ubuntutesting', -# 'auth_host': id_ip, -# 'auth_port': "35357", -# 'auth_protocol': 'http', -# 'https_keystone': "False", -# 'private-address': id_ip, -# 'service_host': id_ip, -# } -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api identity-service', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_neutron_api_plugin_relation(self): -# """Verify neutron-api to neutron-openvswitch neutron-plugin-api""" -# unit = self.neutron_api_sentry -# relation = ['neutron-plugin-api', -# 'neutron-openvswitch:neutron-plugin-api'] -# expected = { -# 'private-address': u.valid_ip, -# } -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api neutron-plugin-api', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# # XXX Test missing to examine the relation data neutron-openvswitch is -# # receiving. Current;y this data cannot be interegated due to -# # Bug#1421388 -# + def test_neutron_api_shared_db_relation(self): + """Verify the neutron-api to mysql shared-db relation data""" + unit = self.neutron_api_sentry + relation = ['shared-db', 'mysql:shared-db'] + expected = { + 'private-address': u.valid_ip, + 'database': 'neutron', + 'username': 'neutron', + 'hostname': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api shared-db', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_shared_db_neutron_api_relation(self): + """Verify the mysql to neutron-api shared-db relation data""" + unit = self.mysql_sentry + relation = ['shared-db', 'neutron-api:shared-db'] + expected = { + 'allowed_units': 'neutron-api/0', + 'db_host': u.valid_ip, + 'private-address': u.valid_ip, + } + ret = u.validate_relation_data(unit, relation, expected) + rel_data = unit.relation('shared-db', 'neutron-api:shared-db') + if ret or 'password' not in rel_data: + message = u.relation_error('mysql shared-db', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_neutron_api_amqp_relation(self): + """Verify the neutron-api to rabbitmq-server amqp relation data""" + unit = self.neutron_api_sentry + relation = ['amqp', 'rabbitmq-server:amqp'] + expected = { + 'username': 'neutron', + 'private-address': u.valid_ip, + 'vhost': 'openstack' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api amqp', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_amqp_neutron_api_relation(self): + """Verify the rabbitmq-server to neutron-api amqp relation data""" + unit = self.rabbitmq_sentry + relation = ['amqp', 'neutron-api:amqp'] + rel_data = unit.relation('amqp', 'neutron-api:amqp') + expected = { + 'hostname': u.valid_ip, + 'private-address': u.valid_ip, + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret or not 'password' in rel_data: + message = u.relation_error('rabbitmq amqp', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_neutron_api_identity_relation(self): + """Verify the neutron-api to keystone identity-service relation data""" + unit = self.neutron_api_sentry + relation = ['identity-service', 'keystone:identity-service'] + api_ip = unit.relation('identity-service', + 'keystone:identity-service')['private-address'] + api_endpoint = "http://%s:9696" % (api_ip) + expected = { + 'private-address': u.valid_ip, + 'quantum_region': 'RegionOne', + 'quantum_service': 'quantum', + 'quantum_admin_url': api_endpoint, + 'quantum_internal_url': api_endpoint, + 'quantum_public_url': api_endpoint, + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api identity-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_keystone_neutron_api_identity_relation(self): + """Verify the neutron-api to keystone identity-service relation data""" + unit = self.keystone_sentry + relation = ['identity-service', 'neutron-api:identity-service'] + id_relation = unit.relation('identity-service', + 'neutron-api:identity-service') + id_ip = id_relation['private-address'] + expected = { + 'admin_token': 'ubuntutesting', + 'auth_host': id_ip, + 'auth_port': "35357", + 'auth_protocol': 'http', + 'https_keystone': "False", + 'private-address': id_ip, + 'service_host': id_ip, + } + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api identity-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_neutron_api_plugin_relation(self): + """Verify neutron-api to neutron-openvswitch neutron-plugin-api""" + unit = self.neutron_api_sentry + relation = ['neutron-plugin-api', + 'neutron-openvswitch:neutron-plugin-api'] + expected = { + 'private-address': u.valid_ip, + } + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api neutron-plugin-api', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + # XXX Test missing to examine the relation data neutron-openvswitch is + # receiving. Current;y this data cannot be interegated due to + # Bug#1421388 + def test_z_restart_on_config_change(self): """Verify that the specified services are restarted when the config is changed. @@ -223,159 +224,159 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment): amulet.raise_status(amulet.FAIL, msg=msg) stime = 0 self.d.configure('neutron-api', {'use-syslog': 'False'}) -# -# def test_neutron_api_novacc_relation(self): -# """Verify the neutron-api to nova-cloud-controller relation data""" -# unit = self.neutron_api_sentry -# relation = ['neutron-api', 'nova-cloud-controller:neutron-api'] -# api_ip = unit.relation('identity-service', -# 'keystone:identity-service')['private-address'] -# api_endpoint = "http://%s:9696" % (api_ip) -# expected = { -# 'private-address': api_ip, -# 'neutron-plugin': 'ovs', -# 'neutron-security-groups': "no", -# 'neutron-url': api_endpoint, -# } -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('neutron-api neutron-api', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_novacc_neutron_api_relation(self): -# """Verify the nova-cloud-controller to neutron-api relation data""" -# unit = self.nova_cc_sentry -# relation = ['neutron-api', 'neutron-api:neutron-api'] -# cc_ip = unit.relation('neutron-api', -# 'neutron-api:neutron-api')['private-address'] -# cc_endpoint = "http://%s:8774/v2" % (cc_ip) -# expected = { -# 'private-address': cc_ip, -# 'nova_url': cc_endpoint, -# } -# ret = u.validate_relation_data(unit, relation, expected) -# if ret: -# message = u.relation_error('nova-cc neutron-api', ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_neutron_config(self): -# """Verify the data in the neutron config file.""" -# unit = self.neutron_api_sentry -# cc_relation = self.nova_cc_sentry.relation('neutron-api', -# 'neutron-api:neutron-api') -# rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', -# 'neutron-api:amqp') -# ks_rel = self.keystone_sentry.relation('identity-service', -# 'neutron-api:identity-service') -# -# nova_auth_url = '%s://%s:%s/v2.0' % (ks_rel['auth_protocol'], -# ks_rel['auth_host'], -# ks_rel['auth_port']) -# db_relation = self.mysql_sentry.relation('shared-db', -# 'neutron-api:shared-db') -# db_conn = 'mysql://neutron:%s@%s/neutron' % (db_relation['password'], -# db_relation['db_host']) -# conf = '/etc/neutron/neutron.conf' -# expected = { -# 'DEFAULT': { -# 'verbose': 'False', -# 'debug': 'False', -# 'rabbit_userid': 'neutron', -# 'rabbit_virtual_host': 'openstack', -# 'rabbit_password': rabbitmq_relation['password'], -# 'rabbit_host': rabbitmq_relation['hostname'], -# 'bind_port': '9686', -# 'nova_url': cc_relation['nova_url'], -# 'nova_region_name': 'RegionOne', -# 'nova_admin_username': ks_rel['service_username'], -# 'nova_admin_tenant_id': ks_rel['service_tenant_id'], -# 'nova_admin_password': ks_rel['service_password'], -# 'nova_admin_auth_url': nova_auth_url, -# }, -# 'keystone_authtoken': { -# 'signing_dir': '/var/lib/neutron/keystone-signing', -# 'service_protocol': ks_rel['service_protocol'], -# 'service_host': ks_rel['service_host'], -# 'service_port': ks_rel['service_port'], -# 'auth_host': ks_rel['auth_host'], -# 'auth_port': ks_rel['auth_port'], -# 'auth_protocol': ks_rel['auth_protocol'], -# 'admin_tenant_name': 'services', -# 'admin_user': 'quantum', -# 'admin_password': ks_rel['service_password'], -# }, -# 'database': { -# 'connection': db_conn, -# }, -# } -# -# for section, pairs in expected.iteritems(): -# ret = u.validate_config_data(unit, conf, section, pairs) -# if ret: -# message = "neutron config error: {}".format(ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_ml2_config(self): -# """Verify the data in the ml2 config file. This is only available -# since icehouse.""" -# unit = self.neutron_api_sentry -# conf = '/etc/neutron/plugins/ml2/ml2_conf.ini' -# neutron_api_relation = unit.relation('shared-db', 'mysql:shared-db') -# expected = { -# 'ml2': { -# 'type_drivers': 'gre,vxlan,vlan,flat', -# 'tenant_network_types': 'gre,vxlan,vlan,flat', -# 'mechanism_drivers': 'openvswitch,hyperv,l2population', -# }, -# 'ml2_type_gre': { -# 'tunnel_id_ranges': '1:1000' -# }, -# 'ml2_type_vxlan': { -# 'vni_ranges': '1001:2000' -# }, -# 'ovs': { -# 'enable_tunneling': 'True', -# 'local_ip': neutron_api_relation['private-address'] -# }, -# 'agent': { -# 'tunnel_types': 'gre', -# }, -# 'securitygroup': { -# 'enable_security_group': 'False', -# } -# } -# -# for section, pairs in expected.iteritems(): -# ret = u.validate_config_data(unit, conf, section, pairs) -# if ret: -# message = "ml2 config error: {}".format(ret) -# amulet.raise_status(amulet.FAIL, msg=message) -# -# def test_services(self): -# """Verify the expected services are running on the corresponding -# service units.""" -# neutron_services = ['status neutron-dhcp-agent', -# 'status neutron-lbaas-agent', -# 'status neutron-metadata-agent', -# 'status neutron-plugin-openvswitch-agent', -# 'status neutron-vpn-agent', -# 'status neutron-metering-agent', -# 'status neutron-ovs-cleanup'] -# -# nova_cc_services = ['status nova-api-ec2', -# 'status nova-api-os-compute', -# 'status nova-objectstore', -# 'status nova-cert', -# 'status nova-scheduler', -# 'status nova-conductor'] -# -# commands = { -# self.mysql_sentry: ['status mysql'], -# self.keystone_sentry: ['status keystone'], -# self.nova_cc_sentry: nova_cc_services, -# self.quantum_gateway_sentry: neutron_services -# } -# -# ret = u.validate_services(commands) -# if ret: -# amulet.raise_status(amulet.FAIL, msg=ret) + + def test_neutron_api_novacc_relation(self): + """Verify the neutron-api to nova-cloud-controller relation data""" + unit = self.neutron_api_sentry + relation = ['neutron-api', 'nova-cloud-controller:neutron-api'] + api_ip = unit.relation('identity-service', + 'keystone:identity-service')['private-address'] + api_endpoint = "http://%s:9696" % (api_ip) + expected = { + 'private-address': api_ip, + 'neutron-plugin': 'ovs', + 'neutron-security-groups': "no", + 'neutron-url': api_endpoint, + } + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('neutron-api neutron-api', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_novacc_neutron_api_relation(self): + """Verify the nova-cloud-controller to neutron-api relation data""" + unit = self.nova_cc_sentry + relation = ['neutron-api', 'neutron-api:neutron-api'] + cc_ip = unit.relation('neutron-api', + 'neutron-api:neutron-api')['private-address'] + cc_endpoint = "http://%s:8774/v2" % (cc_ip) + expected = { + 'private-address': cc_ip, + 'nova_url': cc_endpoint, + } + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('nova-cc neutron-api', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_neutron_config(self): + """Verify the data in the neutron config file.""" + unit = self.neutron_api_sentry + cc_relation = self.nova_cc_sentry.relation('neutron-api', + 'neutron-api:neutron-api') + rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', + 'neutron-api:amqp') + ks_rel = self.keystone_sentry.relation('identity-service', + 'neutron-api:identity-service') + + nova_auth_url = '%s://%s:%s/v2.0' % (ks_rel['auth_protocol'], + ks_rel['auth_host'], + ks_rel['auth_port']) + db_relation = self.mysql_sentry.relation('shared-db', + 'neutron-api:shared-db') + db_conn = 'mysql://neutron:%s@%s/neutron' % (db_relation['password'], + db_relation['db_host']) + conf = '/etc/neutron/neutron.conf' + expected = { + 'DEFAULT': { + 'verbose': 'False', + 'debug': 'False', + 'rabbit_userid': 'neutron', + 'rabbit_virtual_host': 'openstack', + 'rabbit_password': rabbitmq_relation['password'], + 'rabbit_host': rabbitmq_relation['hostname'], + 'bind_port': '9686', + 'nova_url': cc_relation['nova_url'], + 'nova_region_name': 'RegionOne', + 'nova_admin_username': ks_rel['service_username'], + 'nova_admin_tenant_id': ks_rel['service_tenant_id'], + 'nova_admin_password': ks_rel['service_password'], + 'nova_admin_auth_url': nova_auth_url, + }, + 'keystone_authtoken': { + 'signing_dir': '/var/lib/neutron/keystone-signing', + 'service_protocol': ks_rel['service_protocol'], + 'service_host': ks_rel['service_host'], + 'service_port': ks_rel['service_port'], + 'auth_host': ks_rel['auth_host'], + 'auth_port': ks_rel['auth_port'], + 'auth_protocol': ks_rel['auth_protocol'], + 'admin_tenant_name': 'services', + 'admin_user': 'quantum', + 'admin_password': ks_rel['service_password'], + }, + 'database': { + 'connection': db_conn, + }, + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "neutron config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ml2_config(self): + """Verify the data in the ml2 config file. This is only available + since icehouse.""" + unit = self.neutron_api_sentry + conf = '/etc/neutron/plugins/ml2/ml2_conf.ini' + neutron_api_relation = unit.relation('shared-db', 'mysql:shared-db') + expected = { + 'ml2': { + 'type_drivers': 'gre,vxlan,vlan,flat', + 'tenant_network_types': 'gre,vxlan,vlan,flat', + 'mechanism_drivers': 'openvswitch,hyperv,l2population', + }, + 'ml2_type_gre': { + 'tunnel_id_ranges': '1:1000' + }, + 'ml2_type_vxlan': { + 'vni_ranges': '1001:2000' + }, + 'ovs': { + 'enable_tunneling': 'True', + 'local_ip': neutron_api_relation['private-address'] + }, + 'agent': { + 'tunnel_types': 'gre', + }, + 'securitygroup': { + 'enable_security_group': 'False', + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ml2 config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_services(self): + """Verify the expected services are running on the corresponding + service units.""" + neutron_services = ['status neutron-dhcp-agent', + 'status neutron-lbaas-agent', + 'status neutron-metadata-agent', + 'status neutron-plugin-openvswitch-agent', + 'status neutron-vpn-agent', + 'status neutron-metering-agent', + 'status neutron-ovs-cleanup'] + + nova_cc_services = ['status nova-api-ec2', + 'status nova-api-os-compute', + 'status nova-objectstore', + 'status nova-cert', + 'status nova-scheduler', + 'status nova-conductor'] + + commands = { + self.mysql_sentry: ['status mysql'], + self.keystone_sentry: ['status keystone'], + self.nova_cc_sentry: nova_cc_services, + self.quantum_gateway_sentry: neutron_services + } + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) From 2d22e0e3029d2059298cb6a15ad25ca7e293ad93 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 17 Feb 2015 08:53:57 +0000 Subject: [PATCH 118/125] Fix allowed_units after nova-cc <-> mysql relation was added --- tests/basic_deployment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index 2e157291..bcd3ee66 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -105,7 +105,7 @@ class NeutronAPIBasicDeployment(OpenStackAmuletDeployment): unit = self.mysql_sentry relation = ['shared-db', 'neutron-api:shared-db'] expected = { - 'allowed_units': 'neutron-api/0', + 'allowed_units': 'nova-cloud-controller/0 neutron-api/0', 'db_host': u.valid_ip, 'private-address': u.valid_ip, } From 87e5016d6e8d063e0badec6c541f00935adde326 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Tue, 17 Feb 2015 11:43:13 +0000 Subject: [PATCH 119/125] Fix duplicate python-six --- hooks/neutron_api_utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/hooks/neutron_api_utils.py b/hooks/neutron_api_utils.py index 143ca45c..5d31a281 100644 --- a/hooks/neutron_api_utils.py +++ b/hooks/neutron_api_utils.py @@ -44,7 +44,6 @@ BASE_PACKAGES = [ 'python-psycopg2', 'python-six', 'uuid', - 'python-six', ] KILO_PACKAGES = [ From 1a31fec1328030975d911e8d7feca28a4bde8520 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 14:20:01 +1000 Subject: [PATCH 120/125] [bradm] Sync charmhelpers --- .../charmhelpers/contrib/charmsupport/nrpe.py | 46 +- .../charmhelpers/contrib/hahelpers/cluster.py | 6 +- .../contrib/openstack/amulet/deployment.py | 7 +- .../contrib/openstack/files/__init__.py | 18 + .../contrib/openstack/files/check_haproxy.sh | 32 ++ .../files/check_haproxy_queue_depth.sh | 30 ++ hooks/charmhelpers/contrib/openstack/ip.py | 37 ++ hooks/charmhelpers/contrib/openstack/utils.py | 1 + hooks/charmhelpers/contrib/python/packages.py | 4 +- hooks/charmhelpers/core/fstab.py | 8 +- hooks/charmhelpers/core/host.py | 10 +- hooks/charmhelpers/core/strutils.py | 42 ++ hooks/charmhelpers/core/sysctl.py | 20 +- hooks/charmhelpers/core/templating.py | 6 +- hooks/charmhelpers/core/unitdata.py | 477 ++++++++++++++++++ hooks/charmhelpers/fetch/archiveurl.py | 20 +- hooks/charmhelpers/fetch/giturl.py | 2 +- 17 files changed, 725 insertions(+), 41 deletions(-) create mode 100644 hooks/charmhelpers/contrib/openstack/files/__init__.py create mode 100755 hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh create mode 100755 hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh create mode 100644 hooks/charmhelpers/core/strutils.py create mode 100644 hooks/charmhelpers/core/unitdata.py diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 0fd0a9d8..8229f6b5 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -24,6 +24,8 @@ import subprocess import pwd import grp import os +import glob +import shutil import re import shlex import yaml @@ -161,7 +163,7 @@ define service {{ log('Check command not found: {}'.format(parts[0])) return '' - def write(self, nagios_context, hostname, nagios_servicegroups=None): + def write(self, nagios_context, hostname, nagios_servicegroups): nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( self.command) with open(nrpe_check_file, 'w') as nrpe_check_config: @@ -177,14 +179,11 @@ define service {{ nagios_servicegroups) def write_service_config(self, nagios_context, hostname, - nagios_servicegroups=None): + nagios_servicegroups): for f in os.listdir(NRPE.nagios_exportdir): if re.search('.*{}.cfg'.format(self.command), f): os.remove(os.path.join(NRPE.nagios_exportdir, f)) - if not nagios_servicegroups: - nagios_servicegroups = nagios_context - templ_vars = { 'nagios_hostname': hostname, 'nagios_servicegroup': nagios_servicegroups, @@ -214,7 +213,7 @@ class NRPE(object): if 'nagios_servicegroups' in self.config: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: - self.nagios_servicegroups = 'juju' + self.nagios_servicegroups = self.nagios_context self.unit_name = local_unit().replace('/', '-') if hostname: self.hostname = hostname @@ -322,3 +321,38 @@ def add_init_service_checks(nrpe, services, unit_name): check_cmd='check_status_file.py -f ' '/var/lib/nagios/service-check-%s.txt' % svc, ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index 9a2588b6..9333efc3 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -48,6 +48,9 @@ from charmhelpers.core.hookenv import ( from charmhelpers.core.decorators import ( retry_on_exception, ) +from charmhelpers.core.strutils import ( + bool_from_string, +) class HAIncompleteConfig(Exception): @@ -164,7 +167,8 @@ def https(): . returns: boolean ''' - if config_get('use-https') == "yes": + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): return True if config_get('ssl_cert') and config_get('ssl_key'): return True diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index c50d3ec6..0cfeaa4c 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -71,16 +71,19 @@ class OpenStackAmuletDeployment(AmuletDeployment): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Openstack subordinate charms do not expose an origin option as that + # is controlled by the principle + ignore = ['neutron-openvswitch'] if self.openstack: for svc in services: - if svc['name'] not in use_source: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/hooks/charmhelpers/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..75876796 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 00000000..eb8527f5 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,32 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); +do + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances looking good" +exit 0 diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 00000000..3ebb5329 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 9eabed73..29bbddcb 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -26,6 +26,8 @@ from charmhelpers.contrib.network.ip import ( ) from charmhelpers.contrib.hahelpers.cluster import is_clustered +from functools import partial + PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' @@ -107,3 +109,38 @@ def resolve_address(endpoint_type=PUBLIC): "clustered=%s)" % (net_type, clustered)) return resolved_address + + +def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, + override=None): + """Returns the correct endpoint URL to advertise to Keystone. + + This method provides the correct endpoint URL which should be advertised to + the keystone charm for endpoint creation. This method allows for the url to + be overridden to force a keystone endpoint to have specific URL for any of + the defined scopes (admin, internal, public). + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param url_template: str format string for creating the url template. Only + two values will be passed - the scheme+hostname + returned by the canonical_url and the port. + :param endpoint_type: str endpoint type to resolve. + :param override: str the name of the config option which overrides the + endpoint URL defined by the charm itself. None will + disable any overrides (default). + """ + if override: + # Return any user-defined overrides for the keystone endpoint URL. + user_value = config(override) + if user_value: + return user_value.strip() + + return url_template % (canonical_url(configs, endpoint_type), port) + + +public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) + +internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) + +admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 26259a03..af2b3596 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -103,6 +103,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.1.0', 'juno'), ('2.2.0', 'juno'), ('2.2.1', 'kilo'), + ('2.2.2', 'kilo'), ]) DEFAULT_LOOPBACK_SIZE = '5G' diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index d848a120..8659516b 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = "Jorge Niedbalski " - from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import log @@ -29,6 +27,8 @@ except ImportError: apt_install('python-pip') from pip import main as pip_execute +__author__ = "Jorge Niedbalski " + def parse_options(given, available): """Given a set of options, check if available""" diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py index be7de248..3056fbac 100644 --- a/hooks/charmhelpers/core/fstab.py +++ b/hooks/charmhelpers/core/fstab.py @@ -17,11 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import io import os +__author__ = 'Jorge Niedbalski R. ' + class Fstab(io.FileIO): """This class extends file in order to implement a file reader/writer @@ -77,7 +77,7 @@ class Fstab(io.FileIO): for line in self.readlines(): line = line.decode('us-ascii') try: - if line.strip() and not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): yield self._hydrate_entry(line) except ValueError: pass @@ -104,7 +104,7 @@ class Fstab(io.FileIO): found = False for index, line in enumerate(lines): - if not line.startswith("#"): + if line.strip() and not line.strip().startswith("#"): if self._hydrate_entry(line) == entry: found = True break diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index cf2cbe14..b771c611 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -191,11 +191,11 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a string""" + """Create or overwrite a file with the contents of a byte string.""" log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'w') as target: + with open(path, 'wb') as target: os.fchown(target.fileno(), uid, gid) os.fchmod(target.fileno(), perms) target.write(content) @@ -305,11 +305,11 @@ def restart_on_change(restart_map, stopstart=False): ceph_client_changed function. """ def wrap(f): - def wrapped_f(*args): + def wrapped_f(*args, **kwargs): checksums = {} for path in restart_map: checksums[path] = file_hash(path) - f(*args) + f(*args, **kwargs) restarts = [] for path in restart_map: if checksums[path] != file_hash(path): @@ -361,7 +361,7 @@ def list_nics(nic_type): ip_output = (line for line in ip_output if line) for line in ip_output: if line.split()[1].startswith(int_type): - matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) + matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) if matched: interface = matched.groups()[0] else: diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py new file mode 100644 index 00000000..efc4402e --- /dev/null +++ b/hooks/charmhelpers/core/strutils.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import six + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't']: + return True + elif value in ['n', 'no', 'false', 'f']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py index d642a371..21cc8ab2 100644 --- a/hooks/charmhelpers/core/sysctl.py +++ b/hooks/charmhelpers/core/sysctl.py @@ -17,8 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -__author__ = 'Jorge Niedbalski R. ' - import yaml from subprocess import check_call @@ -26,25 +24,33 @@ from subprocess import check_call from charmhelpers.core.hookenv import ( log, DEBUG, + ERROR, ) +__author__ = 'Jorge Niedbalski R. ' + def create(sysctl_dict, sysctl_file): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } - :type sysctl_dict: dict + :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode :returns: None """ - sysctl_dict = yaml.load(sysctl_dict) + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict.items(): + for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), level=DEBUG) check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers/core/templating.py b/hooks/charmhelpers/core/templating.py index 97669092..45319998 100644 --- a/hooks/charmhelpers/core/templating.py +++ b/hooks/charmhelpers/core/templating.py @@ -21,7 +21,7 @@ from charmhelpers.core import hookenv def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None): + perms=0o444, templates_dir=None, encoding='UTF-8'): """ Render a template. @@ -64,5 +64,5 @@ def render(source, target, context, owner='root', group='root', level=hookenv.ERROR) raise e content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group) - host.write_file(target, content, owner, group, perms) + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..3000134a --- /dev/null +++ b/hooks/charmhelpers/core/unitdata.py @@ -0,0 +1,477 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . +# +# +# Authors: +# Kapil Thangavelu +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu ' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are automatically committed at hook exit. That's + currently regardless of exit code. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def _scoped_query(self, stmt, params=None): + if params is None: + params = [] + return stmt, params + + def get(self, key, default=None, record=False): + self.cursor.execute( + *self._scoped_query( + 'select data from kv where key=?', [key])) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + stmt = "select key, data from kv where key like '%s%%'" % key_prefix + self.cursor.execute(*self._scoped_query(stmt)) + result = self.cursor.fetchall() + + if not result: + return None + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + serialized = json.dumps(value) + + self.cursor.execute( + 'select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', data['env']) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index d25a0ddd..8dfce505 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -18,6 +18,16 @@ import os import hashlib import re +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + import six if six.PY3: from urllib.request import ( @@ -35,16 +45,6 @@ else: ) from urlparse import urlparse, urlunparse, parse_qs -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - def splituser(host): '''urllib.splituser(), but six's support of this seems broken''' diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index 5376786b..93aae87b 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -32,7 +32,7 @@ except ImportError: apt_install("python-git") from git import Repo -from git.exc import GitCommandError +from git.exc import GitCommandError # noqa E402 class GitUrlFetchHandler(BaseFetchHandler): From 6acd55cd2b53e2063e9edf9f0827956c70758e3c Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 14:21:44 +1000 Subject: [PATCH 121/125] [bradm] Add haproxy nrpe checks --- hooks/neutron_api_hooks.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hooks/neutron_api_hooks.py b/hooks/neutron_api_hooks.py index 1b9aa9c6..bae93807 100755 --- a/hooks/neutron_api_hooks.py +++ b/hooks/neutron_api_hooks.py @@ -380,7 +380,9 @@ def update_nrpe_config(): hostname = nrpe.get_nagios_hostname() current_unit = nrpe.get_nagios_unit_name() nrpe_setup = nrpe.NRPE(hostname=hostname) + nrpe.copy_nrpe_checks() nrpe.add_init_service_checks(nrpe_setup, services(), current_unit) + nrpe.add_haproxy_checks(nrpe_setup, current_unit) nrpe_setup.write() From c774379eca437ec26df9de53db8b49f475297e22 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Thu, 19 Feb 2015 14:22:29 +1000 Subject: [PATCH 122/125] [bradm] Add nagios_servicegroups config option --- config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config.yaml b/config.yaml index 8e6444c4..aec778db 100644 --- a/config.yaml +++ b/config.yaml @@ -213,3 +213,9 @@ options: juju-myservice-0 If you're running multiple environments with the same services in them this allows you to differentiate between them. + nagios_servicegroups: + default: "" + type: string + description: | + A comma-separated list of nagios servicegroups. + If left empty, the nagios_context will be used as the servicegroup From 404aecbc8d9d0c3e3bb2062e64047d89fab60364 Mon Sep 17 00:00:00 2001 From: Brad Marshall Date: Fri, 20 Feb 2015 10:25:29 +1000 Subject: [PATCH 123/125] [bradm] Handle case of empty nagios_servicegroups setting --- hooks/charmhelpers/contrib/charmsupport/nrpe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 8229f6b5..9d961cfb 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -210,7 +210,7 @@ class NRPE(object): super(NRPE, self).__init__() self.config = config() self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config: + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] else: self.nagios_servicegroups = self.nagios_context From a102b58b71fc659fd81754b6baba8f689e383b8b Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Tue, 24 Feb 2015 11:05:44 +0000 Subject: [PATCH 124/125] [trivial] charmhelpers sync --- tests/charmhelpers/contrib/amulet/utils.py | 125 ++++++++++++++++++++- 1 file changed, 122 insertions(+), 3 deletions(-) diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 253fb08f..65219d33 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -169,8 +169,13 @@ class AmuletUtils(object): cmd = 'pgrep -o -f {}'.format(service) else: cmd = 'pgrep -o {}'.format(service) - proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, pgrep_full=False, sleep_time=20): @@ -180,7 +185,6 @@ class AmuletUtils(object): (such as a config file for that service) to determine if the service has been restarted. """ - print service time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -188,6 +192,121 @@ class AmuletUtils(object): else: return False + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + def relation_error(self, name, data): return 'unexpected relation data in {} - {}'.format(name, data) From c8db9adfe5001abc31c1bd2dc6f07bd4665f5903 Mon Sep 17 00:00:00 2001 From: James Page Date: Wed, 4 Mar 2015 09:51:57 +0000 Subject: [PATCH 125/125] Automated resync of charm-helpers --- hooks/charmhelpers/contrib/network/ip.py | 85 ++++++++++++++++++- .../charmhelpers/contrib/openstack/context.py | 35 +++++++- hooks/charmhelpers/contrib/openstack/utils.py | 78 ++--------------- hooks/charmhelpers/core/services/helpers.py | 16 +++- 4 files changed, 136 insertions(+), 78 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index 98b17544..fff6d5ca 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -17,13 +17,16 @@ import glob import re import subprocess +import six +import socket from functools import partial from charmhelpers.core.hookenv import unit_get from charmhelpers.fetch import apt_install from charmhelpers.core.hookenv import ( - log + log, + WARNING, ) try: @@ -365,3 +368,83 @@ def is_bridge_member(nic): return True return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4 address + socket.inet_aton(address) + return True + except socket.error: + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + apt_install('python-dnspython') + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + answers = dns.resolver.query(address, rtype) + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + apt_install("python-dnspython") + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + if not result: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index d268ea8f..2d9a95cd 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -21,6 +21,7 @@ from base64 import b64decode from subprocess import check_call import six +import yaml from charmhelpers.fetch import ( apt_install, @@ -104,9 +105,41 @@ def context_complete(ctxt): def config_flags_parser(config_flags): """Parses config flags string into dict. + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + {'key1': 'value1', + 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + {'key1', 'value1', + 'key2', 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + {'key1', 'subkey1=value1, subkey2=value2'} + The provided config_flags string may be a list of comma-separated values which themselves may be comma-separated list of values. """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return yaml.safe_load(config_flags) + if config_flags.find('==') >= 0: log("config_flags is not in expected format (key=value)", level=ERROR) raise OSContextError @@ -191,7 +224,7 @@ class SharedDBContext(OSContextGenerator): unit=local_unit()) if set_hostname != access_hostname: relation_set(relation_settings={hostname_key: access_hostname}) - return ctxt # Defer any further hook execution for now.... + return None # Defer any further hook execution for now.... password_setting = 'password' if self.relation_prefix: diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index af2b3596..4f110c63 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -23,12 +23,13 @@ from functools import wraps import subprocess import json import os -import socket import sys import six import yaml +from charmhelpers.contrib.network import ip + from charmhelpers.core.hookenv import ( config, log as juju_log, @@ -421,77 +422,10 @@ def clean_storage(block_device): else: zap_disk(block_device) - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4 address - socket.inet_aton(address) - return True - except socket.error: - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - apt_install('python-dnspython') - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, six.string_types): - rtype = 'A' - else: - return None - - answers = dns.resolver.query(address, rtype) - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - return ns_query(hostname) - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - apt_install('python-dnspython') - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - if not result: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 5e3af9da..15b21664 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -45,12 +45,14 @@ class RelationContext(dict): """ name = None interface = None - required_keys = [] def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + if name is not None: self.name = name - if additional_required_keys is not None: + if additional_required_keys: self.required_keys.extend(additional_required_keys) self.get_data() @@ -134,7 +136,10 @@ class MysqlRelation(RelationContext): """ name = 'db' interface = 'mysql' - required_keys = ['host', 'user', 'password', 'database'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + super(HttpRelation).__init__(self, *args, **kwargs) class HttpRelation(RelationContext): @@ -146,7 +151,10 @@ class HttpRelation(RelationContext): """ name = 'website' interface = 'http' - required_keys = ['host', 'port'] + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + super(HttpRelation).__init__(self, *args, **kwargs) def provide_data(self): return {