Browse Source

Sync charm-helpers

Change-Id: I4f96a84b8dc7f447ac1b0b89a2a494be8e4946ad
changes/55/497655/1
Ryan Beisner 1 year ago
parent
commit
bbe3fae336

+ 61
- 0
hooks/charmhelpers/__init__.py View File

@@ -14,6 +14,11 @@
14 14
 
15 15
 # Bootstrap charm-helpers, installing its dependencies if necessary using
16 16
 # only standard libraries.
17
+from __future__ import print_function
18
+from __future__ import absolute_import
19
+
20
+import functools
21
+import inspect
17 22
 import subprocess
18 23
 import sys
19 24
 
@@ -34,3 +39,59 @@ except ImportError:
34 39
     else:
35 40
         subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
36 41
     import yaml  # flake8: noqa
42
+
43
+
44
+# Holds a list of mapping of mangled function names that have been deprecated
45
+# using the @deprecate decorator below.  This is so that the warning is only
46
+# printed once for each usage of the function.
47
+__deprecated_functions = {}
48
+
49
+
50
+def deprecate(warning, date=None, log=None):
51
+    """Add a deprecation warning the first time the function is used.
52
+    The date, which is a string in semi-ISO8660 format indicate the year-month
53
+    that the function is officially going to be removed.
54
+
55
+    usage:
56
+
57
+    @deprecate('use core/fetch/add_source() instead', '2017-04')
58
+    def contributed_add_source_thing(...):
59
+        ...
60
+
61
+    And it then prints to the log ONCE that the function is deprecated.
62
+    The reason for passing the logging function (log) is so that hookenv.log
63
+    can be used for a charm if needed.
64
+
65
+    :param warning:  String to indicat where it has moved ot.
66
+    :param date: optional sting, in YYYY-MM format to indicate when the
67
+                 function will definitely (probably) be removed.
68
+    :param log: The log function to call to log.  If not, logs to stdout
69
+    """
70
+    def wrap(f):
71
+
72
+        @functools.wraps(f)
73
+        def wrapped_f(*args, **kwargs):
74
+            try:
75
+                module = inspect.getmodule(f)
76
+                file = inspect.getsourcefile(f)
77
+                lines = inspect.getsourcelines(f)
78
+                f_name = "{}-{}-{}..{}-{}".format(
79
+                    module.__name__, file, lines[0], lines[-1], f.__name__)
80
+            except (IOError, TypeError):
81
+                # assume it was local, so just use the name of the function
82
+                f_name = f.__name__
83
+            if f_name not in __deprecated_functions:
84
+                __deprecated_functions[f_name] = True
85
+                s = "DEPRECATION WARNING: Function {} is being removed".format(
86
+                    f.__name__)
87
+                if date:
88
+                    s = "{} on/around {}".format(s, date)
89
+                if warning:
90
+                    s = "{} : {}".format(s, warning)
91
+                if log:
92
+                    log(s)
93
+                else:
94
+                    print(s)
95
+            return f(*args, **kwargs)
96
+        return wrapped_f
97
+    return wrap

+ 4
- 2
hooks/charmhelpers/contrib/network/ip.py View File

@@ -243,11 +243,13 @@ def is_ipv6_disabled():
243 243
     try:
244 244
         result = subprocess.check_output(
245 245
             ['sysctl', 'net.ipv6.conf.all.disable_ipv6'],
246
-            stderr=subprocess.STDOUT)
247
-        return "net.ipv6.conf.all.disable_ipv6 = 1" in result
246
+            stderr=subprocess.STDOUT,
247
+            universal_newlines=True)
248 248
     except subprocess.CalledProcessError:
249 249
         return True
250 250
 
251
+    return "net.ipv6.conf.all.disable_ipv6 = 1" in result
252
+
251 253
 
252 254
 def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
253 255
                    fatal=True, exc_list=None):

+ 68
- 35
hooks/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -25,9 +25,12 @@ import urlparse
25 25
 import cinderclient.v1.client as cinder_client
26 26
 import glanceclient.v1.client as glance_client
27 27
 import heatclient.v1.client as heat_client
28
-import keystoneclient.v2_0 as keystone_client
29
-from keystoneclient.auth.identity import v3 as keystone_id_v3
30
-from keystoneclient import session as keystone_session
28
+from keystoneclient.v2_0 import client as keystone_client
29
+from keystoneauth1.identity import (
30
+    v3,
31
+    v2,
32
+)
33
+from keystoneauth1 import session as keystone_session
31 34
 from keystoneclient.v3 import client as keystone_client_v3
32 35
 from novaclient import exceptions
33 36
 
@@ -368,12 +371,20 @@ class OpenStackAmuletUtils(AmuletUtils):
368 371
                                         port)
369 372
         if not api_version or api_version == 2:
370 373
             ep = base_ep + "/v2.0"
371
-            return keystone_client.Client(username=username, password=password,
372
-                                          tenant_name=project_name,
373
-                                          auth_url=ep)
374
+            auth = v2.Password(
375
+                username=username,
376
+                password=password,
377
+                tenant_name=project_name,
378
+                auth_url=ep
379
+            )
380
+            sess = keystone_session.Session(auth=auth)
381
+            client = keystone_client.Client(session=sess)
382
+            # This populates the client.service_catalog
383
+            client.auth_ref = auth.get_access(sess)
384
+            return client
374 385
         else:
375 386
             ep = base_ep + "/v3"
376
-            auth = keystone_id_v3.Password(
387
+            auth = v3.Password(
377 388
                 user_domain_name=user_domain_name,
378 389
                 username=username,
379 390
                 password=password,
@@ -382,36 +393,45 @@ class OpenStackAmuletUtils(AmuletUtils):
382 393
                 project_name=project_name,
383 394
                 auth_url=ep
384 395
             )
385
-            return keystone_client_v3.Client(
386
-                session=keystone_session.Session(auth=auth)
387
-            )
396
+            sess = keystone_session.Session(auth=auth)
397
+            client = keystone_client_v3.Client(session=sess)
398
+            # This populates the client.service_catalog
399
+            client.auth_ref = auth.get_access(sess)
400
+            return client
388 401
 
389 402
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
390 403
                                     tenant=None, api_version=None,
391
-                                    keystone_ip=None):
404
+                                    keystone_ip=None, user_domain_name=None,
405
+                                    project_domain_name=None,
406
+                                    project_name=None):
392 407
         """Authenticates admin user with the keystone admin endpoint."""
393 408
         self.log.debug('Authenticating keystone admin...')
394 409
         if not keystone_ip:
395 410
             keystone_ip = keystone_sentry.info['public-address']
396 411
 
397
-        user_domain_name = None
398
-        domain_name = None
399
-        if api_version == 3:
412
+        # To support backward compatibility usage of this function
413
+        if not project_name:
414
+            project_name = tenant
415
+        if api_version == 3 and not user_domain_name:
400 416
             user_domain_name = 'admin_domain'
401
-            domain_name = user_domain_name
402
-
403
-        return self.authenticate_keystone(keystone_ip, user, password,
404
-                                          project_name=tenant,
405
-                                          api_version=api_version,
406
-                                          user_domain_name=user_domain_name,
407
-                                          domain_name=domain_name,
408
-                                          admin_port=True)
417
+        if api_version == 3 and not project_domain_name:
418
+            project_domain_name = 'admin_domain'
419
+        if api_version == 3 and not project_name:
420
+            project_name = 'admin'
421
+
422
+        return self.authenticate_keystone(
423
+            keystone_ip, user, password,
424
+            api_version=api_version,
425
+            user_domain_name=user_domain_name,
426
+            project_domain_name=project_domain_name,
427
+            project_name=project_name,
428
+            admin_port=True)
409 429
 
410 430
     def authenticate_keystone_user(self, keystone, user, password, tenant):
411 431
         """Authenticates a regular user with the keystone public endpoint."""
412 432
         self.log.debug('Authenticating keystone user ({})...'.format(user))
413 433
         ep = keystone.service_catalog.url_for(service_type='identity',
414
-                                              endpoint_type='publicURL')
434
+                                              interface='publicURL')
415 435
         keystone_ip = urlparse.urlparse(ep).hostname
416 436
 
417 437
         return self.authenticate_keystone(keystone_ip, user, password,
@@ -421,22 +441,32 @@ class OpenStackAmuletUtils(AmuletUtils):
421 441
         """Authenticates admin user with glance."""
422 442
         self.log.debug('Authenticating glance admin...')
423 443
         ep = keystone.service_catalog.url_for(service_type='image',
424
-                                              endpoint_type='adminURL')
425
-        return glance_client.Client(ep, token=keystone.auth_token)
444
+                                              interface='adminURL')
445
+        if keystone.session:
446
+            return glance_client.Client(ep, session=keystone.session)
447
+        else:
448
+            return glance_client.Client(ep, token=keystone.auth_token)
426 449
 
427 450
     def authenticate_heat_admin(self, keystone):
428 451
         """Authenticates the admin user with heat."""
429 452
         self.log.debug('Authenticating heat admin...')
430 453
         ep = keystone.service_catalog.url_for(service_type='orchestration',
431
-                                              endpoint_type='publicURL')
432
-        return heat_client.Client(endpoint=ep, token=keystone.auth_token)
454
+                                              interface='publicURL')
455
+        if keystone.session:
456
+            return heat_client.Client(endpoint=ep, session=keystone.session)
457
+        else:
458
+            return heat_client.Client(endpoint=ep, token=keystone.auth_token)
433 459
 
434 460
     def authenticate_nova_user(self, keystone, user, password, tenant):
435 461
         """Authenticates a regular user with nova-api."""
436 462
         self.log.debug('Authenticating nova user ({})...'.format(user))
437 463
         ep = keystone.service_catalog.url_for(service_type='identity',
438
-                                              endpoint_type='publicURL')
439
-        if novaclient.__version__[0] >= "7":
464
+                                              interface='publicURL')
465
+        if keystone.session:
466
+            return nova_client.Client(NOVA_CLIENT_VERSION,
467
+                                      session=keystone.session,
468
+                                      auth_url=ep)
469
+        elif novaclient.__version__[0] >= "7":
440 470
             return nova_client.Client(NOVA_CLIENT_VERSION,
441 471
                                       username=user, password=password,
442 472
                                       project_name=tenant, auth_url=ep)
@@ -449,12 +479,15 @@ class OpenStackAmuletUtils(AmuletUtils):
449 479
         """Authenticates a regular user with swift api."""
450 480
         self.log.debug('Authenticating swift user ({})...'.format(user))
451 481
         ep = keystone.service_catalog.url_for(service_type='identity',
452
-                                              endpoint_type='publicURL')
453
-        return swiftclient.Connection(authurl=ep,
454
-                                      user=user,
455
-                                      key=password,
456
-                                      tenant_name=tenant,
457
-                                      auth_version='2.0')
482
+                                              interface='publicURL')
483
+        if keystone.session:
484
+            return swiftclient.Connection(session=keystone.session)
485
+        else:
486
+            return swiftclient.Connection(authurl=ep,
487
+                                          user=user,
488
+                                          key=password,
489
+                                          tenant_name=tenant,
490
+                                          auth_version='2.0')
458 491
 
459 492
     def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
460 493
                       ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):

+ 66
- 30
hooks/charmhelpers/contrib/openstack/context.py View File

@@ -41,9 +41,9 @@ from charmhelpers.core.hookenv import (
41 41
     charm_name,
42 42
     DEBUG,
43 43
     INFO,
44
-    WARNING,
45 44
     ERROR,
46 45
     status_set,
46
+    network_get_primary_address
47 47
 )
48 48
 
49 49
 from charmhelpers.core.sysctl import create as sysctl_create
@@ -80,6 +80,9 @@ from charmhelpers.contrib.openstack.neutron import (
80 80
 from charmhelpers.contrib.openstack.ip import (
81 81
     resolve_address,
82 82
     INTERNAL,
83
+    ADMIN,
84
+    PUBLIC,
85
+    ADDRESS_MAP,
83 86
 )
84 87
 from charmhelpers.contrib.network.ip import (
85 88
     get_address_in_network,
@@ -87,7 +90,6 @@ from charmhelpers.contrib.network.ip import (
87 90
     get_ipv6_addr,
88 91
     get_netmask_for_address,
89 92
     format_ipv6_addr,
90
-    is_address_in_network,
91 93
     is_bridge_member,
92 94
     is_ipv6_disabled,
93 95
 )
@@ -97,6 +99,7 @@ from charmhelpers.contrib.openstack.utils import (
97 99
     git_determine_usr_bin,
98 100
     git_determine_python_path,
99 101
     enable_memcache,
102
+    snap_install_requested,
100 103
 )
101 104
 from charmhelpers.core.unitdata import kv
102 105
 
@@ -244,6 +247,11 @@ class SharedDBContext(OSContextGenerator):
244 247
                     'database_password': rdata.get(password_setting),
245 248
                     'database_type': 'mysql'
246 249
                 }
250
+                # Note(coreycb): We can drop mysql+pymysql if we want when the
251
+                # following review lands, though it seems mysql+pymysql would
252
+                # be preferred. https://review.openstack.org/#/c/462190/
253
+                if snap_install_requested():
254
+                    ctxt['database_type'] = 'mysql+pymysql'
247 255
                 if self.context_complete(ctxt):
248 256
                     db_ssl(rdata, ctxt, self.ssl_dir)
249 257
                     return ctxt
@@ -510,6 +518,10 @@ class CephContext(OSContextGenerator):
510 518
                     ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
511 519
                 if not ctxt.get('key'):
512 520
                     ctxt['key'] = relation_get('key', rid=rid, unit=unit)
521
+                if not ctxt.get('rbd_features'):
522
+                    default_features = relation_get('rbd-features', rid=rid, unit=unit)
523
+                    if default_features is not None:
524
+                        ctxt['rbd_features'] = default_features
513 525
 
514 526
                 ceph_addrs = relation_get('ceph-public-address', rid=rid,
515 527
                                           unit=unit)
@@ -610,7 +622,6 @@ class HAProxyContext(OSContextGenerator):
610 622
             ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
611 623
 
612 624
         if config('prefer-ipv6'):
613
-            ctxt['ipv6'] = True
614 625
             ctxt['local_host'] = 'ip6-localhost'
615 626
             ctxt['haproxy_host'] = '::'
616 627
         else:
@@ -726,11 +737,17 @@ class ApacheSSLContext(OSContextGenerator):
726 737
         return sorted(list(set(cns)))
727 738
 
728 739
     def get_network_addresses(self):
729
-        """For each network configured, return corresponding address and vip
730
-           (if available).
740
+        """For each network configured, return corresponding address and
741
+           hostnamr or vip (if available).
731 742
 
732 743
         Returns a list of tuples of the form:
733 744
 
745
+            [(address_in_net_a, hostname_in_net_a),
746
+             (address_in_net_b, hostname_in_net_b),
747
+             ...]
748
+
749
+            or, if no hostnames(s) available:
750
+
734 751
             [(address_in_net_a, vip_in_net_a),
735 752
              (address_in_net_b, vip_in_net_b),
736 753
              ...]
@@ -742,32 +759,27 @@ class ApacheSSLContext(OSContextGenerator):
742 759
              ...]
743 760
         """
744 761
         addresses = []
745
-        if config('vip'):
746
-            vips = config('vip').split()
747
-        else:
748
-            vips = []
749
-
750
-        for net_type in ['os-internal-network', 'os-admin-network',
751
-                         'os-public-network']:
752
-            addr = get_address_in_network(config(net_type),
753
-                                          unit_get('private-address'))
754
-            if len(vips) > 1 and is_clustered():
755
-                if not config(net_type):
756
-                    log("Multiple networks configured but net_type "
757
-                        "is None (%s)." % net_type, level=WARNING)
758
-                    continue
759
-
760
-                for vip in vips:
761
-                    if is_address_in_network(config(net_type), vip):
762
-                        addresses.append((addr, vip))
763
-                        break
764
-
765
-            elif is_clustered() and config('vip'):
766
-                addresses.append((addr, config('vip')))
762
+        for net_type in [INTERNAL, ADMIN, PUBLIC]:
763
+            net_config = config(ADDRESS_MAP[net_type]['config'])
764
+            # NOTE(jamespage): Fallback must always be private address
765
+            #                  as this is used to bind services on the
766
+            #                  local unit.
767
+            fallback = unit_get("private-address")
768
+            if net_config:
769
+                addr = get_address_in_network(net_config,
770
+                                              fallback)
767 771
             else:
768
-                addresses.append((addr, addr))
772
+                try:
773
+                    addr = network_get_primary_address(
774
+                        ADDRESS_MAP[net_type]['binding']
775
+                    )
776
+                except NotImplementedError:
777
+                    addr = fallback
778
+
779
+            endpoint = resolve_address(net_type)
780
+            addresses.append((addr, endpoint))
769 781
 
770
-        return sorted(addresses)
782
+        return sorted(set(addresses))
771 783
 
772 784
     def __call__(self):
773 785
         if isinstance(self.external_ports, six.string_types):
@@ -794,7 +806,7 @@ class ApacheSSLContext(OSContextGenerator):
794 806
             self.configure_cert(cn)
795 807
 
796 808
         addresses = self.get_network_addresses()
797
-        for address, endpoint in sorted(set(addresses)):
809
+        for address, endpoint in addresses:
798 810
             for api_port in self.external_ports:
799 811
                 ext_port = determine_apache_port(api_port,
800 812
                                                  singlenode_mode=True)
@@ -1397,14 +1409,38 @@ class NeutronAPIContext(OSContextGenerator):
1397 1409
                 'rel_key': 'dns-domain',
1398 1410
                 'default': None,
1399 1411
             },
1412
+            'polling_interval': {
1413
+                'rel_key': 'polling-interval',
1414
+                'default': 2,
1415
+            },
1416
+            'rpc_response_timeout': {
1417
+                'rel_key': 'rpc-response-timeout',
1418
+                'default': 60,
1419
+            },
1420
+            'report_interval': {
1421
+                'rel_key': 'report-interval',
1422
+                'default': 30,
1423
+            },
1424
+            'enable_qos': {
1425
+                'rel_key': 'enable-qos',
1426
+                'default': False,
1427
+            },
1400 1428
         }
1401 1429
         ctxt = self.get_neutron_options({})
1402 1430
         for rid in relation_ids('neutron-plugin-api'):
1403 1431
             for unit in related_units(rid):
1404 1432
                 rdata = relation_get(rid=rid, unit=unit)
1433
+                # The l2-population key is used by the context as a way of
1434
+                # checking if the api service on the other end is sending data
1435
+                # in a recent format.
1405 1436
                 if 'l2-population' in rdata:
1406 1437
                     ctxt.update(self.get_neutron_options(rdata))
1407 1438
 
1439
+        if ctxt['enable_qos']:
1440
+            ctxt['extension_drivers'] = 'qos'
1441
+        else:
1442
+            ctxt['extension_drivers'] = ''
1443
+
1408 1444
         return ctxt
1409 1445
 
1410 1446
     def get_neutron_options(self, rdata):

+ 1
- 1
hooks/charmhelpers/contrib/openstack/keystone.py View File

@@ -29,7 +29,7 @@ def get_api_suffix(api_version):
29 29
     @returns the api suffix formatted according to the given api
30 30
     version
31 31
     """
32
-    return 'v2.0' if api_version in (2, "2.0") else 'v3'
32
+    return 'v2.0' if api_version in (2, "2", "2.0") else 'v3'
33 33
 
34 34
 
35 35
 def format_endpoint(schema, addr, port, api_version):

+ 4
- 1
hooks/charmhelpers/contrib/openstack/templates/ceph.conf View File

@@ -1,6 +1,6 @@
1 1
 ###############################################################################
2 2
 # [ WARNING ]
3
-# cinder configuration file maintained by Juju
3
+# ceph configuration file maintained by Juju
4 4
 # local changes may be overwritten.
5 5
 ###############################################################################
6 6
 [global]
@@ -12,6 +12,9 @@ mon host = {{ mon_hosts }}
12 12
 log to syslog = {{ use_syslog }}
13 13
 err to syslog = {{ use_syslog }}
14 14
 clog to syslog = {{ use_syslog }}
15
+{% if rbd_features %}
16
+rbd default features = {{ rbd_features }}
17
+{% endif %}
15 18
 
16 19
 [client]
17 20
 {% if rbd_client_cache_settings -%}

+ 2
- 4
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg View File

@@ -1,6 +1,6 @@
1 1
 global
2
-    log {{ local_host }} local0
3
-    log {{ local_host }} local1 notice
2
+    log /var/lib/haproxy/dev/log local0
3
+    log /var/lib/haproxy/dev/log local1 notice
4 4
     maxconn 20000
5 5
     user haproxy
6 6
     group haproxy
@@ -48,9 +48,7 @@ listen stats
48 48
 {% for service, ports in service_ports.items() -%}
49 49
 frontend tcp-in_{{ service }}
50 50
     bind *:{{ ports[0] }}
51
-    {% if ipv6 -%}
52 51
     bind :::{{ ports[0] }}
53
-    {% endif -%}
54 52
     {% for frontend in frontends -%}
55 53
     acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
56 54
     use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}

+ 8
- 0
hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications View File

@@ -0,0 +1,8 @@
1
+{% if transport_url -%}
2
+[oslo_messaging_notifications]
3
+driver = messagingv2
4
+transport_url = {{ transport_url }}
5
+{% if notification_topics -%}
6
+topics = {{ notification_topics }}
7
+{% endif -%}
8
+{% endif -%}

+ 5
- 2
hooks/charmhelpers/contrib/openstack/templating.py View File

@@ -20,7 +20,8 @@ from charmhelpers.fetch import apt_install, apt_update
20 20
 from charmhelpers.core.hookenv import (
21 21
     log,
22 22
     ERROR,
23
-    INFO
23
+    INFO,
24
+    TRACE
24 25
 )
25 26
 from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
26 27
 
@@ -80,8 +81,10 @@ def get_loader(templates_dir, os_release):
80 81
             loaders.insert(0, FileSystemLoader(tmpl_dir))
81 82
         if rel == os_release:
82 83
             break
84
+    # demote this log to the lowest level; we don't really need to see these
85
+    # lots in production even when debugging.
83 86
     log('Creating choice loader with dirs: %s' %
84
-        [l.searchpath for l in loaders], level=INFO)
87
+        [l.searchpath for l in loaders], level=TRACE)
85 88
     return ChoiceLoader(loaders)
86 89
 
87 90
 

+ 210
- 151
hooks/charmhelpers/contrib/openstack/utils.py View File

@@ -26,11 +26,12 @@ import functools
26 26
 import shutil
27 27
 
28 28
 import six
29
-import tempfile
30 29
 import traceback
31 30
 import uuid
32 31
 import yaml
33 32
 
33
+from charmhelpers import deprecate
34
+
34 35
 from charmhelpers.contrib.network import ip
35 36
 
36 37
 from charmhelpers.core import unitdata
@@ -41,7 +42,6 @@ from charmhelpers.core.hookenv import (
41 42
     config,
42 43
     log as juju_log,
43 44
     charm_dir,
44
-    DEBUG,
45 45
     INFO,
46 46
     ERROR,
47 47
     related_units,
@@ -51,6 +51,7 @@ from charmhelpers.core.hookenv import (
51 51
     status_set,
52 52
     hook_name,
53 53
     application_version_set,
54
+    cached,
54 55
 )
55 56
 
56 57
 from charmhelpers.core.strutils import BasicStringComparator
@@ -82,11 +83,21 @@ from charmhelpers.core.host import (
82 83
     restart_on_change_helper,
83 84
 )
84 85
 from charmhelpers.fetch import (
85
-    apt_install,
86 86
     apt_cache,
87 87
     install_remote,
88
+    import_key as fetch_import_key,
89
+    add_source as fetch_add_source,
90
+    SourceConfigError,
91
+    GPGKeyError,
88 92
     get_upstream_version
89 93
 )
94
+
95
+from charmhelpers.fetch.snap import (
96
+    snap_install,
97
+    snap_refresh,
98
+    SNAP_CHANNELS,
99
+)
100
+
90 101
 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
91 102
 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
92 103
 from charmhelpers.contrib.openstack.exceptions import OSContextError
@@ -175,7 +186,7 @@ SWIFT_CODENAMES = OrderedDict([
175 186
     ('ocata',
176 187
         ['2.11.0', '2.12.0', '2.13.0']),
177 188
     ('pike',
178
-        ['2.13.0']),
189
+        ['2.13.0', '2.15.0']),
179 190
 ])
180 191
 
181 192
 # >= Liberty version->codename mapping
@@ -324,8 +335,10 @@ def get_os_codename_install_source(src):
324 335
         return ca_rel
325 336
 
326 337
     # Best guess match based on deb string provided
327
-    if src.startswith('deb') or src.startswith('ppa'):
328
-        for k, v in six.iteritems(OPENSTACK_CODENAMES):
338
+    if (src.startswith('deb') or
339
+            src.startswith('ppa') or
340
+            src.startswith('snap')):
341
+        for v in OPENSTACK_CODENAMES.values():
329 342
             if v in src:
330 343
                 return v
331 344
 
@@ -394,6 +407,19 @@ def get_swift_codename(version):
394 407
 
395 408
 def get_os_codename_package(package, fatal=True):
396 409
     '''Derive OpenStack release codename from an installed package.'''
410
+
411
+    if snap_install_requested():
412
+        cmd = ['snap', 'list', package]
413
+        try:
414
+            out = subprocess.check_output(cmd)
415
+        except subprocess.CalledProcessError as e:
416
+            return None
417
+        lines = out.split('\n')
418
+        for line in lines:
419
+            if package in line:
420
+                # Second item in list is Version
421
+                return line.split()[1]
422
+
397 423
     import apt_pkg as apt
398 424
 
399 425
     cache = apt_cache()
@@ -469,13 +495,14 @@ def get_os_version_package(pkg, fatal=True):
469 495
     # error_out(e)
470 496
 
471 497
 
472
-os_rel = None
498
+# Module local cache variable for the os_release.
499
+_os_rel = None
473 500
 
474 501
 
475 502
 def reset_os_release():
476 503
     '''Unset the cached os_release version'''
477
-    global os_rel
478
-    os_rel = None
504
+    global _os_rel
505
+    _os_rel = None
479 506
 
480 507
 
481 508
 def os_release(package, base='essex', reset_cache=False):
@@ -489,150 +516,77 @@ def os_release(package, base='essex', reset_cache=False):
489 516
     the installation source, the earliest release supported by the charm should
490 517
     be returned.
491 518
     '''
492
-    global os_rel
519
+    global _os_rel
493 520
     if reset_cache:
494 521
         reset_os_release()
495
-    if os_rel:
496
-        return os_rel
497
-    os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or
498
-              get_os_codename_package(package, fatal=False) or
499
-              get_os_codename_install_source(config('openstack-origin')) or
500
-              base)
501
-    return os_rel
522
+    if _os_rel:
523
+        return _os_rel
524
+    _os_rel = (
525
+        git_os_codename_install_source(config('openstack-origin-git')) or
526
+        get_os_codename_package(package, fatal=False) or
527
+        get_os_codename_install_source(config('openstack-origin')) or
528
+        base)
529
+    return _os_rel
502 530
 
503 531
 
532
+@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log)
504 533
 def import_key(keyid):
505
-    key = keyid.strip()
506
-    if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
507
-            key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
508
-        juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
509
-        juju_log("Importing ASCII Armor PGP key", level=DEBUG)
510
-        with tempfile.NamedTemporaryFile() as keyfile:
511
-            with open(keyfile.name, 'w') as fd:
512
-                fd.write(key)
513
-                fd.write("\n")
514
-
515
-            cmd = ['apt-key', 'add', keyfile.name]
516
-            try:
517
-                subprocess.check_call(cmd)
518
-            except subprocess.CalledProcessError:
519
-                error_out("Error importing PGP key '%s'" % key)
520
-    else:
521
-        juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
522
-        juju_log("Importing PGP key from keyserver", level=DEBUG)
523
-        cmd = ['apt-key', 'adv', '--keyserver',
524
-               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
525
-        try:
526
-            subprocess.check_call(cmd)
527
-        except subprocess.CalledProcessError:
528
-            error_out("Error importing PGP key '%s'" % key)
529
-
530
-
531
-def get_source_and_pgp_key(input):
532
-    """Look for a pgp key ID or ascii-armor key in the given input."""
533
-    index = input.strip()
534
-    index = input.rfind('|')
535
-    if index < 0:
536
-        return input, None
537
-
538
-    key = input[index + 1:].strip('|')
539
-    source = input[:index]
540
-    return source, key
541
-
542
-
543
-def configure_installation_source(rel):
544
-    '''Configure apt installation source.'''
545
-    if rel == 'distro':
546
-        return
547
-    elif rel == 'distro-proposed':
548
-        ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
549
-        with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
550
-            f.write(DISTRO_PROPOSED % ubuntu_rel)
551
-    elif rel[:4] == "ppa:":
552
-        src, key = get_source_and_pgp_key(rel)
553
-        if key:
554
-            import_key(key)
555
-
556
-        subprocess.check_call(["add-apt-repository", "-y", src])
557
-    elif rel[:3] == "deb":
558
-        src, key = get_source_and_pgp_key(rel)
559
-        if key:
560
-            import_key(key)
561
-
562
-        with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
563
-            f.write(src)
564
-    elif rel[:6] == 'cloud:':
565
-        ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
566
-        rel = rel.split(':')[1]
567
-        u_rel = rel.split('-')[0]
568
-        ca_rel = rel.split('-')[1]
569
-
570
-        if u_rel != ubuntu_rel:
571
-            e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
572
-                'version (%s)' % (ca_rel, ubuntu_rel)
573
-            error_out(e)
534
+    """Import a key, either ASCII armored, or a GPG key id.
574 535
 
575
-        if 'staging' in ca_rel:
576
-            # staging is just a regular PPA.
577
-            os_rel = ca_rel.split('/')[0]
578
-            ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
579
-            cmd = 'add-apt-repository -y %s' % ppa
580
-            subprocess.check_call(cmd.split(' '))
581
-            return
582
-
583
-        # map charm config options to actual archive pockets.
584
-        pockets = {
585
-            'folsom': 'precise-updates/folsom',
586
-            'folsom/updates': 'precise-updates/folsom',
587
-            'folsom/proposed': 'precise-proposed/folsom',
588
-            'grizzly': 'precise-updates/grizzly',
589
-            'grizzly/updates': 'precise-updates/grizzly',
590
-            'grizzly/proposed': 'precise-proposed/grizzly',
591
-            'havana': 'precise-updates/havana',
592
-            'havana/updates': 'precise-updates/havana',
593
-            'havana/proposed': 'precise-proposed/havana',
594
-            'icehouse': 'precise-updates/icehouse',
595
-            'icehouse/updates': 'precise-updates/icehouse',
596
-            'icehouse/proposed': 'precise-proposed/icehouse',
597
-            'juno': 'trusty-updates/juno',
598
-            'juno/updates': 'trusty-updates/juno',
599
-            'juno/proposed': 'trusty-proposed/juno',
600
-            'kilo': 'trusty-updates/kilo',
601
-            'kilo/updates': 'trusty-updates/kilo',
602
-            'kilo/proposed': 'trusty-proposed/kilo',
603
-            'liberty': 'trusty-updates/liberty',
604
-            'liberty/updates': 'trusty-updates/liberty',
605
-            'liberty/proposed': 'trusty-proposed/liberty',
606
-            'mitaka': 'trusty-updates/mitaka',
607
-            'mitaka/updates': 'trusty-updates/mitaka',
608
-            'mitaka/proposed': 'trusty-proposed/mitaka',
609
-            'newton': 'xenial-updates/newton',
610
-            'newton/updates': 'xenial-updates/newton',
611
-            'newton/proposed': 'xenial-proposed/newton',
612
-            'ocata': 'xenial-updates/ocata',
613
-            'ocata/updates': 'xenial-updates/ocata',
614
-            'ocata/proposed': 'xenial-proposed/ocata',
615
-            'pike': 'xenial-updates/pike',
616
-            'pike/updates': 'xenial-updates/pike',
617
-            'pike/proposed': 'xenial-proposed/pike',
618
-            'queens': 'xenial-updates/queens',
619
-            'queens/updates': 'xenial-updates/queens',
620
-            'queens/proposed': 'xenial-proposed/queens',
621
-        }
536
+    @param keyid: the key in ASCII armor format, or a GPG key id.
537
+    @raises SystemExit() via sys.exit() on failure.
538
+    """
539
+    try:
540
+        return fetch_import_key(keyid)
541
+    except GPGKeyError as e:
542
+        error_out("Could not import key: {}".format(str(e)))
622 543
 
623
-        try:
624
-            pocket = pockets[ca_rel]
625
-        except KeyError:
626
-            e = 'Invalid Cloud Archive release specified: %s' % rel
627
-            error_out(e)
628 544
 
629
-        src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
630
-        apt_install('ubuntu-cloud-keyring', fatal=True)
545
+def get_source_and_pgp_key(source_and_key):
546
+    """Look for a pgp key ID or ascii-armor key in the given input.
631 547
 
632
-        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
633
-            f.write(src)
634
-    else:
635
-        error_out("Invalid openstack-release specified: %s" % rel)
548
+    :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is
549
+        optional.
550
+    :returns (source_spec, key_id OR None) as a tuple.  Returns None for key_id
551
+        if there was no '|' in the source_and_key string.
552
+    """
553
+    try:
554
+        source, key = source_and_key.split('|', 2)
555
+        return source, key or None
556
+    except ValueError:
557
+        return source_and_key, None
558
+
559
+
560
+@deprecate("use charmhelpers.fetch.add_source() instead.",
561
+           "2017-07", log=juju_log)
562
+def configure_installation_source(source_plus_key):
563
+    """Configure an installation source.
564
+
565
+    The functionality is provided by charmhelpers.fetch.add_source()
566
+    The difference between the two functions is that add_source() signature
567
+    requires the key to be passed directly, whereas this function passes an
568
+    optional key by appending '|<key>' to the end of the source specificiation
569
+    'source'.
570
+
571
+    Another difference from add_source() is that the function calls sys.exit(1)
572
+    if the configuration fails, whereas add_source() raises
573
+    SourceConfigurationError().  Another difference, is that add_source()
574
+    silently fails (with a juju_log command) if there is no matching source to
575
+    configure, whereas this function fails with a sys.exit(1)
576
+
577
+    :param source: String_plus_key -- see above for details.
578
+
579
+    Note that the behaviour on error is to log the error to the juju log and
580
+    then call sys.exit(1).
581
+    """
582
+    # extract the key if there is one, denoted by a '|' in the rel
583
+    source, key = get_source_and_pgp_key(source_plus_key)
584
+
585
+    # handle the ordinary sources via add_source
586
+    try:
587
+        fetch_add_source(source, key, fail_invalid=True)
588
+    except SourceConfigError as se:
589
+        error_out(str(se))
636 590
 
637 591
 
638 592
 def config_value_changed(option):
@@ -677,12 +631,14 @@ def openstack_upgrade_available(package):
677 631
 
678 632
     :returns: bool:    : Returns True if configured installation source offers
679 633
                          a newer version of package.
680
-
681 634
     """
682 635
 
683 636
     import apt_pkg as apt
684 637
     src = config('openstack-origin')
685 638
     cur_vers = get_os_version_package(package)
639
+    if not cur_vers:
640
+        # The package has not been installed yet do not attempt upgrade
641
+        return False
686 642
     if "swift" in package:
687 643
         codename = get_os_codename_install_source(src)
688 644
         avail_vers = get_os_version_codename_swift(codename)
@@ -1933,6 +1889,30 @@ def pausable_restart_on_change(restart_map, stopstart=False,
1933 1889
     return wrap
1934 1890
 
1935 1891
 
1892
+def ordered(orderme):
1893
+    """Converts the provided dictionary into a collections.OrderedDict.
1894
+
1895
+    The items in the returned OrderedDict will be inserted based on the
1896
+    natural sort order of the keys. Nested dictionaries will also be sorted
1897
+    in order to ensure fully predictable ordering.
1898
+
1899
+    :param orderme: the dict to order
1900
+    :return: collections.OrderedDict
1901
+    :raises: ValueError: if `orderme` isn't a dict instance.
1902
+    """
1903
+    if not isinstance(orderme, dict):
1904
+        raise ValueError('argument must be a dict type')
1905
+
1906
+    result = OrderedDict()
1907
+    for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]):
1908
+        if isinstance(v, dict):
1909
+            result[k] = ordered(v)
1910
+        else:
1911
+            result[k] = v
1912
+
1913
+    return result
1914
+
1915
+
1936 1916
 def config_flags_parser(config_flags):
1937 1917
     """Parses config flags string into dict.
1938 1918
 
@@ -1944,15 +1924,13 @@ def config_flags_parser(config_flags):
1944 1924
          example, a string in the format of 'key1=value1, key2=value2' will
1945 1925
          return a dict of:
1946 1926
 
1947
-             {'key1': 'value1',
1948
-              'key2': 'value2'}.
1927
+             {'key1': 'value1', 'key2': 'value2'}.
1949 1928
 
1950 1929
       2. A string in the above format, but supporting a comma-delimited list
1951 1930
          of values for the same key. For example, a string in the format of
1952 1931
          'key1=value1, key2=value3,value4,value5' will return a dict of:
1953 1932
 
1954
-             {'key1', 'value1',
1955
-              'key2', 'value2,value3,value4'}
1933
+             {'key1': 'value1', 'key2': 'value2,value3,value4'}
1956 1934
 
1957 1935
       3. A string containing a colon character (:) prior to an equal
1958 1936
          character (=) will be treated as yaml and parsed as such. This can be
@@ -1972,7 +1950,7 @@ def config_flags_parser(config_flags):
1972 1950
     equals = config_flags.find('=')
1973 1951
     if colon > 0:
1974 1952
         if colon < equals or equals < 0:
1975
-            return yaml.safe_load(config_flags)
1953
+            return ordered(yaml.safe_load(config_flags))
1976 1954
 
1977 1955
     if config_flags.find('==') >= 0:
1978 1956
         juju_log("config_flags is not in expected format (key=value)",
@@ -1985,7 +1963,7 @@ def config_flags_parser(config_flags):
1985 1963
     # split on '='.
1986 1964
     split = config_flags.strip(' =').split('=')
1987 1965
     limit = len(split)
1988
-    flags = {}
1966
+    flags = OrderedDict()
1989 1967
     for i in range(0, limit - 1):
1990 1968
         current = split[i]
1991 1969
         next = split[i + 1]
@@ -2052,3 +2030,84 @@ def token_cache_pkgs(source=None, release=None):
2052 2030
     if enable_memcache(source=source, release=release):
2053 2031
         packages.extend(['memcached', 'python-memcache'])
2054 2032
     return packages
2033
+
2034
+
2035
+def update_json_file(filename, items):
2036
+    """Updates the json `filename` with a given dict.
2037
+    :param filename: json filename (i.e.: /etc/glance/policy.json)
2038
+    :param items: dict of items to update
2039
+    """
2040
+    with open(filename) as fd:
2041
+        policy = json.load(fd)
2042
+    policy.update(items)
2043
+    with open(filename, "w") as fd:
2044
+        fd.write(json.dumps(policy, indent=4))
2045
+
2046
+
2047
+@cached
2048
+def snap_install_requested():
2049
+    """ Determine if installing from snaps
2050
+
2051
+    If openstack-origin is of the form snap:channel-series-release
2052
+    and channel is in SNAPS_CHANNELS return True.
2053
+    """
2054
+    origin = config('openstack-origin') or ""
2055
+    if not origin.startswith('snap:'):
2056
+        return False
2057
+
2058
+    _src = origin[5:]
2059
+    channel, series, release = _src.split('-')
2060
+    if channel.lower() in SNAP_CHANNELS:
2061
+        return True
2062
+    return False
2063
+
2064
+
2065
+def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
2066
+    """Generate a dictionary of snap install information from origin
2067
+
2068
+    @param snaps: List of snaps
2069
+    @param src: String of openstack-origin or source of the form
2070
+        snap:channel-series-track
2071
+    @param mode: String classic, devmode or jailmode
2072
+    @returns: Dictionary of snaps with channels and modes
2073
+    """
2074
+
2075
+    if not src.startswith('snap:'):
2076
+        juju_log("Snap source is not a snap origin", 'WARN')
2077
+        return {}
2078
+
2079
+    _src = src[5:]
2080
+    _channel, _series, _release = _src.split('-')
2081
+    channel = '--channel={}/{}'.format(_release, _channel)
2082
+
2083
+    return {snap: {'channel': channel, 'mode': mode}
2084
+            for snap in snaps}
2085
+
2086
+
2087
+def install_os_snaps(snaps, refresh=False):
2088
+    """Install OpenStack snaps from channel and with mode
2089
+
2090
+    @param snaps: Dictionary of snaps with channels and modes of the form:
2091
+        {'snap_name': {'channel': 'snap_channel',
2092
+                       'mode': 'snap_mode'}}
2093
+        Where channel a snapstore channel and mode is --classic, --devmode or
2094
+        --jailmode.
2095
+    @param post_snap_install: Callback function to run after snaps have been
2096
+    installed
2097
+    """
2098
+
2099
+    def _ensure_flag(flag):
2100
+        if flag.startswith('--'):
2101
+            return flag
2102
+        return '--{}'.format(flag)
2103
+
2104
+    if refresh:
2105
+        for snap in snaps.keys():
2106
+            snap_refresh(snap,
2107
+                         _ensure_flag(snaps[snap]['channel']),
2108
+                         _ensure_flag(snaps[snap]['mode']))
2109
+    else:
2110
+        for snap in snaps.keys():
2111
+            snap_install(snap,
2112
+                         _ensure_flag(snaps[snap]['channel']),
2113
+                         _ensure_flag(snaps[snap]['mode']))

+ 74
- 0
hooks/charmhelpers/contrib/storage/linux/bcache.py View File

@@ -0,0 +1,74 @@
1
+# Copyright 2017 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+import os
15
+import json
16
+
17
+from charmhelpers.core.hookenv import log
18
+
19
+stats_intervals = ['stats_day', 'stats_five_minute',
20
+                   'stats_hour', 'stats_total']
21
+
22
+SYSFS = '/sys'
23
+
24
+
25
+class Bcache(object):
26
+    """Bcache behaviour
27
+    """
28
+
29
+    def __init__(self, cachepath):
30
+        self.cachepath = cachepath
31
+
32
+    @classmethod
33
+    def fromdevice(cls, devname):
34
+        return cls('{}/block/{}/bcache'.format(SYSFS, devname))
35
+
36
+    def __str__(self):
37
+        return self.cachepath
38
+
39
+    def get_stats(self, interval):
40
+        """Get cache stats
41
+        """
42
+        intervaldir = 'stats_{}'.format(interval)
43
+        path = "{}/{}".format(self.cachepath, intervaldir)
44
+        out = dict()
45
+        for elem in os.listdir(path):
46
+            out[elem] = open('{}/{}'.format(path, elem)).read().strip()
47
+        return out
48
+
49
+
50
+def get_bcache_fs():
51
+    """Return all cache sets
52
+    """
53
+    cachesetroot = "{}/fs/bcache".format(SYSFS)
54
+    try:
55
+        dirs = os.listdir(cachesetroot)
56
+    except OSError:
57
+        log("No bcache fs found")
58
+        return []
59
+    cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')])
60
+    return cacheset
61
+
62
+
63
+def get_stats_action(cachespec, interval):
64
+    """Action for getting bcache statistics for a given cachespec.
65
+    Cachespec can either be a device name, eg. 'sdb', which will retrieve
66
+    cache stats for the given device, or 'global', which will retrieve stats
67
+    for all cachesets
68
+    """
69
+    if cachespec == 'global':
70
+        caches = get_bcache_fs()
71
+    else:
72
+        caches = [Bcache.fromdevice(cachespec)]
73
+    res = dict((c.cachepath, c.get_stats(interval)) for c in caches)
74
+    return json.dumps(res, indent=4, separators=(',', ': '))

+ 43
- 1
hooks/charmhelpers/contrib/storage/linux/ceph.py View File

@@ -63,6 +63,7 @@ from charmhelpers.core.host import (
63 63
 from charmhelpers.fetch import (
64 64
     apt_install,
65 65
 )
66
+from charmhelpers.core.unitdata import kv
66 67
 
67 68
 from charmhelpers.core.kernel import modprobe
68 69
 from charmhelpers.contrib.openstack.utils import config_flags_parser
@@ -1314,6 +1315,47 @@ def send_request_if_needed(request, relation='ceph'):
1314 1315
             relation_set(relation_id=rid, broker_req=request.request)
1315 1316
 
1316 1317
 
1318
+def is_broker_action_done(action, rid=None, unit=None):
1319
+    """Check whether broker action has completed yet.
1320
+
1321
+    @param action: name of action to be performed
1322
+    @returns True if action complete otherwise False
1323
+    """
1324
+    rdata = relation_get(rid, unit) or {}
1325
+    broker_rsp = rdata.get(get_broker_rsp_key())
1326
+    if not broker_rsp:
1327
+        return False
1328
+
1329
+    rsp = CephBrokerRsp(broker_rsp)
1330
+    unit_name = local_unit().partition('/')[2]
1331
+    key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
1332
+    kvstore = kv()
1333
+    val = kvstore.get(key=key)
1334
+    if val and val == rsp.request_id:
1335
+        return True
1336
+
1337
+    return False
1338
+
1339
+
1340
+def mark_broker_action_done(action, rid=None, unit=None):
1341
+    """Mark action as having been completed.
1342
+
1343
+    @param action: name of action to be performed
1344
+    @returns None
1345
+    """
1346
+    rdata = relation_get(rid, unit) or {}
1347
+    broker_rsp = rdata.get(get_broker_rsp_key())
1348
+    if not broker_rsp:
1349
+        return
1350
+
1351
+    rsp = CephBrokerRsp(broker_rsp)
1352
+    unit_name = local_unit().partition('/')[2]
1353
+    key = "unit_{}_ceph_broker_action.{}".format(unit_name, action)
1354
+    kvstore = kv()
1355
+    kvstore.set(key=key, value=rsp.request_id)
1356
+    kvstore.flush()
1357
+
1358
+
1317 1359
 class CephConfContext(object):
1318 1360
     """Ceph config (ceph.conf) context.
1319 1361
 
@@ -1330,7 +1372,7 @@ class CephConfContext(object):
1330 1372
             return {}
1331 1373
 
1332 1374
         conf = config_flags_parser(conf)
1333
-        if type(conf) != dict:
1375
+        if not isinstance(conf, dict):
1334 1376
             log("Provided config-flags is not a dictionary - ignoring",
1335 1377
                 level=WARNING)
1336 1378
             return {}

+ 40
- 0
hooks/charmhelpers/core/hookenv.py View File

@@ -43,6 +43,7 @@ ERROR = "ERROR"
43 43
 WARNING = "WARNING"
44 44
 INFO = "INFO"
45 45
 DEBUG = "DEBUG"
46
+TRACE = "TRACE"
46 47
 MARKER = object()
47 48
 
48 49
 cache = {}
@@ -202,6 +203,27 @@ def service_name():
202 203
     return local_unit().split('/')[0]
203 204
 
204 205
 
206
+def principal_unit():
207
+    """Returns the principal unit of this unit, otherwise None"""
208
+    # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
209
+    principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
210
+    # If it's empty, then this unit is the principal
211
+    if principal_unit == '':
212
+        return os.environ['JUJU_UNIT_NAME']
213
+    elif principal_unit is not None:
214
+        return principal_unit
215
+    # For Juju 2.1 and below, let's try work out the principle unit by
216
+    # the various charms' metadata.yaml.
217
+    for reltype in relation_types():
218
+        for rid in relation_ids(reltype):
219
+            for unit in related_units(rid):
220
+                md = _metadata_unit(unit)
221
+                subordinate = md.pop('subordinate', None)
222
+                if not subordinate:
223
+                    return unit
224
+    return None
225
+
226
+
205 227
 @cached
206 228
 def remote_service_name(relid=None):
207 229
     """The remote service name for a given relation-id (or the current relation)"""
@@ -478,6 +500,21 @@ def metadata():
478 500
         return yaml.safe_load(md)
479 501
 
480 502
 
503
+def _metadata_unit(unit):
504
+    """Given the name of a unit (e.g. apache2/0), get the unit charm's
505
+    metadata.yaml. Very similar to metadata() but allows us to inspect
506
+    other units. Unit needs to be co-located, such as a subordinate or
507
+    principal/primary.
508
+
509
+    :returns: metadata.yaml as a python object.
510
+
511
+    """
512
+    basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
513
+    unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
514
+    with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
515
+        return yaml.safe_load(md)
516
+
517
+
481 518
 @cached
482 519
 def relation_types():
483 520
     """Get a list of relation types supported by this charm"""
@@ -753,6 +790,9 @@ class Hooks(object):
753 790
 
754 791
 def charm_dir():
755 792
     """Return the root directory of the current charm"""
793
+    d = os.environ.get('JUJU_CHARM_DIR')
794
+    if d is not None:
795
+        return d
756 796
     return os.environ.get('CHARM_DIR')
757 797
 
758 798
 

+ 32
- 6
hooks/charmhelpers/core/host.py View File

@@ -34,7 +34,7 @@ import six
34 34
 
35 35
 from contextlib import contextmanager
36 36
 from collections import OrderedDict
37
-from .hookenv import log
37
+from .hookenv import log, DEBUG
38 38
 from .fstab import Fstab
39 39
 from charmhelpers.osplatform import get_platform
40 40
 
@@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
191 191
     upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
192 192
     sysv_file = os.path.join(initd_dir, service_name)
193 193
     if init_is_systemd():
194
+        service('disable', service_name)
194 195
         service('mask', service_name)
195 196
     elif os.path.exists(upstart_file):
196 197
         override_path = os.path.join(
@@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init",
225 226
     sysv_file = os.path.join(initd_dir, service_name)
226 227
     if init_is_systemd():
227 228
         service('unmask', service_name)
229
+        service('enable', service_name)
228 230
     elif os.path.exists(upstart_file):
229 231
         override_path = os.path.join(
230 232
             init_dir, '{}.override'.format(service_name))
@@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
485 487
 
486 488
 def write_file(path, content, owner='root', group='root', perms=0o444):
487 489
     """Create or overwrite a file with the contents of a byte string."""
488
-    log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
489 490
     uid = pwd.getpwnam(owner).pw_uid
490 491
     gid = grp.getgrnam(group).gr_gid
491
-    with open(path, 'wb') as target:
492
-        os.fchown(target.fileno(), uid, gid)
493
-        os.fchmod(target.fileno(), perms)
494
-        target.write(content)
492
+    # lets see if we can grab the file and compare the context, to avoid doing
493
+    # a write.
494
+    existing_content = None
495
+    existing_uid, existing_gid = None, None
496
+    try:
497
+        with open(path, 'rb') as target:
498
+            existing_content = target.read()
499
+        stat = os.stat(path)
500
+        existing_uid, existing_gid = stat.st_uid, stat.st_gid
501
+    except:
502
+        pass
503
+    if content != existing_content:
504
+        log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
505
+            level=DEBUG)
506
+        with open(path, 'wb') as target:
507
+            os.fchown(target.fileno(), uid, gid)
508
+            os.fchmod(target.fileno(), perms)
509
+            target.write(content)
510
+        return
511
+    # the contents were the same, but we might still need to change the
512
+    # ownership.
513
+    if existing_uid != uid:
514
+        log("Changing uid on already existing content: {} -> {}"
515
+            .format(existing_uid, uid), level=DEBUG)
516
+        os.chown(path, uid, -1)
517
+    if existing_gid != gid:
518
+        log("Changing gid on already existing content: {} -> {}"
519
+            .format(existing_gid, gid), level=DEBUG)
520
+        os.chown(path, -1, gid)
495 521
 
496 522
 
497 523
 def fstab_remove(mp):

+ 17
- 9
hooks/charmhelpers/fetch/__init__.py View File

@@ -48,6 +48,13 @@ class AptLockError(Exception):
48 48
     pass
49 49
 
50 50
 
51
+class GPGKeyError(Exception):
52
+    """Exception occurs when a GPG key cannot be fetched or used.  The message
53
+    indicates what the problem is.
54
+    """
55
+    pass
56
+
57
+
51 58
 class BaseFetchHandler(object):
52 59
 
53 60
     """Base class for FetchHandler implementations in fetch plugins"""
@@ -77,21 +84,22 @@ module = "charmhelpers.fetch.%s" % __platform__
77 84
 fetch = importlib.import_module(module)
78 85
 
79 86
 filter_installed_packages = fetch.filter_installed_packages
80
-install = fetch.install
81
-upgrade = fetch.upgrade
82
-update = fetch.update
83
-purge = fetch.purge
87
+install = fetch.apt_install
88
+upgrade = fetch.apt_upgrade
89
+update = _fetch_update = fetch.apt_update
90
+purge = fetch.apt_purge
84 91
 add_source = fetch.add_source
85 92
 
86 93
 if __platform__ == "ubuntu":
87 94
     apt_cache = fetch.apt_cache
88
-    apt_install = fetch.install
89
-    apt_update = fetch.update
90
-    apt_upgrade = fetch.upgrade
91
-    apt_purge = fetch.purge
95
+    apt_install = fetch.apt_install
96
+    apt_update = fetch.apt_update
97
+    apt_upgrade = fetch.apt_upgrade
98
+    apt_purge = fetch.apt_purge
92 99
     apt_mark = fetch.apt_mark
93 100
     apt_hold = fetch.apt_hold
94 101
     apt_unhold = fetch.apt_unhold
102
+    import_key = fetch.import_key
95 103
     get_upstream_version = fetch.get_upstream_version
96 104
 elif __platform__ == "centos":
97 105
     yum_search = fetch.yum_search
@@ -135,7 +143,7 @@ def configure_sources(update=False,
135 143
         for source, key in zip(sources, keys):
136 144
             add_source(source, key)
137 145
     if update:
138
-        fetch.update(fatal=True)
146
+        _fetch_update(fatal=True)
139 147
 
140 148
 
141 149
 def install_remote(source, *args, **kwargs):

+ 1
- 1
hooks/charmhelpers/fetch/centos.py View File

@@ -132,7 +132,7 @@ def add_source(source, key=None):
132 132
                 key_file.write(key)
133 133
                 key_file.flush()
134 134
                 key_file.seek(0)
135
-            subprocess.check_call(['rpm', '--import', key_file])
135
+                subprocess.check_call(['rpm', '--import', key_file.name])
136 136
         else:
137 137
             subprocess.check_call(['rpm', '--import', key])
138 138
 

+ 17
- 5
hooks/charmhelpers/fetch/snap.py View File

@@ -18,15 +18,23 @@ If writing reactive charms, use the snap layer:
18 18
 https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
19 19
 """
20 20
 import subprocess
21
-from os import environ
21
+import os
22 22
 from time import sleep
23 23
 from charmhelpers.core.hookenv import log
24 24
 
25 25
 __author__ = 'Joseph Borg <joseph.borg@canonical.com>'
26 26
 
27
-SNAP_NO_LOCK = 1  # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved).
27
+# The return code for "couldn't acquire lock" in Snap
28
+# (hopefully this will be improved).
29
+SNAP_NO_LOCK = 1
28 30
 SNAP_NO_LOCK_RETRY_DELAY = 10  # Wait X seconds between Snap lock checks.
29 31
 SNAP_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
32
+SNAP_CHANNELS = [
33
+    'edge',
34
+    'beta',
35
+    'candidate',
36
+    'stable',
37
+]
30 38
 
31 39
 
32 40
 class CouldNotAcquireLockException(Exception):
@@ -47,13 +55,17 @@ def _snap_exec(commands):
47 55
 
48 56
     while return_code is None or return_code == SNAP_NO_LOCK:
49 57
         try:
50
-            return_code = subprocess.check_call(['snap'] + commands, env=environ)
58
+            return_code = subprocess.check_call(['snap'] + commands,
59
+                                                env=os.environ)
51 60
         except subprocess.CalledProcessError as e:
52 61
             retry_count += + 1
53 62
             if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
54
-                raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT)
63
+                raise CouldNotAcquireLockException(
64
+                    'Could not aquire lock after {} attempts'
65
+                    .format(SNAP_NO_LOCK_RETRY_COUNT))
55 66
             return_code = e.returncode
56
-            log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN')
67
+            log('Snap failed to acquire lock, trying again in {} seconds.'
68
+                .format(SNAP_NO_LOCK_RETRY_DELAY, level='WARN'))
57 69
             sleep(SNAP_NO_LOCK_RETRY_DELAY)
58 70
 
59 71
     return return_code

+ 265
- 62
hooks/charmhelpers/fetch/ubuntu.py View File

@@ -12,29 +12,48 @@
12 12
 # See the License for the specific language governing permissions and
13 13
 # limitations under the License.
14 14
 
15
+from collections import OrderedDict
15 16
 import os
17
+import platform
18
+import re
16 19
 import six
17 20
 import time
18 21
 import subprocess
19
-
20 22
 from tempfile import NamedTemporaryFile
23
+
21 24
 from charmhelpers.core.host import (
22 25
     lsb_release
23 26
 )
24
-from charmhelpers.core.hookenv import log
25
-from charmhelpers.fetch import SourceConfigError
26
-
27
+from charmhelpers.core.hookenv import (
28
+    log,
29
+    DEBUG,
30
+    WARNING,
31
+)
32
+from charmhelpers.fetch import SourceConfigError, GPGKeyError
33
+
34
+PROPOSED_POCKET = (
35
+    "# Proposed\n"
36
+    "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe "
37
+    "multiverse restricted\n")
38
+PROPOSED_PORTS_POCKET = (
39
+    "# Proposed\n"
40
+    "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe "
41
+    "multiverse restricted\n")
42
+# Only supports 64bit and ppc64 at the moment.
43
+ARCH_TO_PROPOSED_POCKET = {
44
+    'x86_64': PROPOSED_POCKET,
45
+    'ppc64le': PROPOSED_PORTS_POCKET,
46
+    'aarch64': PROPOSED_PORTS_POCKET,
47
+}
48
+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
49
+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
27 50
 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
28 51
 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
29 52
 """
30
-
31
-PROPOSED_POCKET = """# Proposed
32
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
33
-"""
34
-
35 53
 CLOUD_ARCHIVE_POCKETS = {
36 54
     # Folsom
37 55
     'folsom': 'precise-updates/folsom',
56
+    'folsom/updates': 'precise-updates/folsom',
38 57
     'precise-folsom': 'precise-updates/folsom',
39 58
     'precise-folsom/updates': 'precise-updates/folsom',
40 59
     'precise-updates/folsom': 'precise-updates/folsom',
@@ -43,6 +62,7 @@ CLOUD_ARCHIVE_POCKETS = {
43 62
     'precise-proposed/folsom': 'precise-proposed/folsom',
44 63
     # Grizzly
45 64
     'grizzly': 'precise-updates/grizzly',
65
+    'grizzly/updates': 'precise-updates/grizzly',
46 66
     'precise-grizzly': 'precise-updates/grizzly',
47 67
     'precise-grizzly/updates': 'precise-updates/grizzly',
48 68
     'precise-updates/grizzly': 'precise-updates/grizzly',
@@ -51,6 +71,7 @@ CLOUD_ARCHIVE_POCKETS = {
51 71
     'precise-proposed/grizzly': 'precise-proposed/grizzly',
52 72
     # Havana
53 73
     'havana': 'precise-updates/havana',
74
+    'havana/updates': 'precise-updates/havana',
54 75
     'precise-havana': 'precise-updates/havana',
55 76
     'precise-havana/updates': 'precise-updates/havana',
56 77
     'precise-updates/havana': 'precise-updates/havana',
@@ -59,6 +80,7 @@ CLOUD_ARCHIVE_POCKETS = {
59 80
     'precise-proposed/havana': 'precise-proposed/havana',
60 81
     # Icehouse
61 82
     'icehouse': 'precise-updates/icehouse',
83
+    'icehouse/updates': 'precise-updates/icehouse',
62 84
     'precise-icehouse': 'precise-updates/icehouse',
63 85
     'precise-icehouse/updates': 'precise-updates/icehouse',
64 86
     'precise-updates/icehouse': 'precise-updates/icehouse',
@@ -67,6 +89,7 @@ CLOUD_ARCHIVE_POCKETS = {
67 89
     'precise-proposed/icehouse': 'precise-proposed/icehouse',
68 90
     # Juno
69 91
     'juno': 'trusty-updates/juno',
92
+    'juno/updates': 'trusty-updates/juno',
70 93
     'trusty-juno': 'trusty-updates/juno',
71 94
     'trusty-juno/updates': 'trusty-updates/juno',
72 95
     'trusty-updates/juno': 'trusty-updates/juno',
@@ -75,6 +98,7 @@ CLOUD_ARCHIVE_POCKETS = {
75 98
     'trusty-proposed/juno': 'trusty-proposed/juno',
76 99
     # Kilo
77 100
     'kilo': 'trusty-updates/kilo',
101
+    'kilo/updates': 'trusty-updates/kilo',
78 102
     'trusty-kilo': 'trusty-updates/kilo',
79 103
     'trusty-kilo/updates': 'trusty-updates/kilo',
80 104
     'trusty-updates/kilo': 'trusty-updates/kilo',
@@ -83,6 +107,7 @@ CLOUD_ARCHIVE_POCKETS = {
83 107
     'trusty-proposed/kilo': 'trusty-proposed/kilo',
84 108
     # Liberty
85 109
     'liberty': 'trusty-updates/liberty',
110
+    'liberty/updates': 'trusty-updates/liberty',
86 111
     'trusty-liberty': 'trusty-updates/liberty',
87 112
     'trusty-liberty/updates': 'trusty-updates/liberty',
88 113
     'trusty-updates/liberty': 'trusty-updates/liberty',
@@ -91,6 +116,7 @@ CLOUD_ARCHIVE_POCKETS = {
91 116
     'trusty-proposed/liberty': 'trusty-proposed/liberty',
92 117
     # Mitaka
93 118
     'mitaka': 'trusty-updates/mitaka',
119
+    'mitaka/updates': 'trusty-updates/mitaka',
94 120
     'trusty-mitaka': 'trusty-updates/mitaka',
95 121
     'trusty-mitaka/updates': 'trusty-updates/mitaka',
96 122
     'trusty-updates/mitaka': 'trusty-updates/mitaka',
@@ -99,6 +125,7 @@ CLOUD_ARCHIVE_POCKETS = {
99 125
     'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
100 126
     # Newton
101 127
     'newton': 'xenial-updates/newton',
128
+    'newton/updates': 'xenial-updates/newton',
102 129
     'xenial-newton': 'xenial-updates/newton',
103 130
     'xenial-newton/updates': 'xenial-updates/newton',
104 131
     'xenial-updates/newton': 'xenial-updates/newton',
@@ -107,12 +134,13 @@ CLOUD_ARCHIVE_POCKETS = {
107 134
     'xenial-proposed/newton': 'xenial-proposed/newton',
108 135
     # Ocata
109 136
     'ocata': 'xenial-updates/ocata',
137
+    'ocata/updates': 'xenial-updates/ocata',
110 138
     'xenial-ocata': 'xenial-updates/ocata',
111 139
     'xenial-ocata/updates': 'xenial-updates/ocata',
112 140
     'xenial-updates/ocata': 'xenial-updates/ocata',
113 141
     'ocata/proposed': 'xenial-proposed/ocata',
114 142
     'xenial-ocata/proposed': 'xenial-proposed/ocata',
115
-    'xenial-ocata/newton': 'xenial-proposed/ocata',
143
+    'xenial-proposed/ocata': 'xenial-proposed/ocata',
116 144
     # Pike
117 145
     'pike': 'xenial-updates/pike',
118 146
     'xenial-pike': 'xenial-updates/pike',
@@ -120,7 +148,7 @@ CLOUD_ARCHIVE_POCKETS = {
120 148
     'xenial-updates/pike': 'xenial-updates/pike',
121 149
     'pike/proposed': 'xenial-proposed/pike',
122 150
     'xenial-pike/proposed': 'xenial-proposed/pike',
123
-    'xenial-pike/newton': 'xenial-proposed/pike',
151
+    'xenial-proposed/pike': 'xenial-proposed/pike',
124 152
     # Queens
125 153
     'queens': 'xenial-updates/queens',
126 154
     'xenial-queens': 'xenial-updates/queens',
@@ -128,12 +156,13 @@ CLOUD_ARCHIVE_POCKETS = {
128 156
     'xenial-updates/queens': 'xenial-updates/queens',
129 157
     'queens/proposed': 'xenial-proposed/queens',
130 158
     'xenial-queens/proposed': 'xenial-proposed/queens',
131
-    'xenial-queens/newton': 'xenial-proposed/queens',
159
+    'xenial-proposed/queens': 'xenial-proposed/queens',
132 160
 }
133 161
 
162
+
134 163
 APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
135 164
 CMD_RETRY_DELAY = 10  # Wait 10 seconds between command retries.
136
-CMD_RETRY_COUNT = 30  # Retry a failing fatal command X times.
165
+CMD_RETRY_COUNT = 3  # Retry a failing fatal command X times.
137 166
 
138 167
 
139 168
 def filter_installed_packages(packages):
@@ -161,7 +190,7 @@ def apt_cache(in_memory=True, progress=None):
161 190
     return apt_pkg.Cache(progress)
162 191
 
163 192
 
164
-def install(packages, options=None, fatal=False):
193
+def apt_install(packages, options=None, fatal=False):
165 194
     """Install one or more packages."""
166 195
     if options is None:
167 196
         options = ['--option=Dpkg::Options::=--force-confold']
@@ -178,7 +207,7 @@ def install(packages, options=None, fatal=False):
178 207
     _run_apt_command(cmd, fatal)
179 208
 
180 209
 
181
-def upgrade(options=None, fatal=False, dist=False):
210
+def apt_upgrade(options=None, fatal=False, dist=False):
182 211
     """Upgrade all packages."""
183 212
     if options is None:
184 213
         options = ['--option=Dpkg::Options::=--force-confold']
@@ -193,13 +222,13 @@ def upgrade(options=None, fatal=False, dist=False):
193 222
     _run_apt_command(cmd, fatal)
194 223
 
195 224
 
196
-def update(fatal=False):
225
+def apt_update(fatal=False):
197 226
     """Update local apt cache."""
198 227
     cmd = ['apt-get', 'update']
199 228
     _run_apt_command(cmd, fatal)
200 229
 
201 230
 
202
-def purge(packages, fatal=False):
231
+def apt_purge(packages, fatal=False):
203 232
     """Purge one or more packages."""
204 233
     cmd = ['apt-get', '--assume-yes', 'purge']
205 234
     if isinstance(packages, six.string_types):
@@ -233,7 +262,58 @@ def apt_unhold(packages, fatal=False):
233 262
     return apt_mark(packages, 'unhold', fatal=fatal)
234 263
 
235 264
 
236
-def add_source(source, key=None):
265
+def import_key(key):
266
+    """Import an ASCII Armor key.
267
+
268
+    /!\ A Radix64 format keyid is also supported for backwards
269
+    compatibility, but should never be used; the key retrieval
270
+    mechanism is insecure and subject to man-in-the-middle attacks
271
+    voiding all signature checks using that key.
272
+
273
+    :param keyid: The key in ASCII armor format,
274
+                  including BEGIN and END markers.
275
+    :raises: GPGKeyError if the key could not be imported
276
+    """
277
+    key = key.strip()
278
+    if '-' in key or '\n' in key:
279
+        # Send everything not obviously a keyid to GPG to import, as
280
+        # we trust its validation better than our own. eg. handling
281
+        # comments before the key.
282
+        log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
283
+        if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
284
+                '-----END PGP PUBLIC KEY BLOCK-----' in key):
285
+            log("Importing ASCII Armor PGP key", level=DEBUG)
286
+            with NamedTemporaryFile() as keyfile:
287
+                with open(keyfile.name, 'w') as fd:
288
+                    fd.write(key)
289
+                    fd.write("\n")
290
+                cmd = ['apt-key', 'add', keyfile.name]
291
+                try:
292
+                    subprocess.check_call(cmd)
293
+                except subprocess.CalledProcessError:
294
+                    error = "Error importing PGP key '{}'".format(key)
295
+                    log(error)
296
+                    raise GPGKeyError(error)
297
+        else:
298
+            raise GPGKeyError("ASCII armor markers missing from GPG key")
299
+    else:
300
+        # We should only send things obviously not a keyid offsite
301
+        # via this unsecured protocol, as it may be a secret or part
302
+        # of one.
303
+        log("PGP key found (looks like Radix64 format)", level=WARNING)
304
+        log("INSECURLY importing PGP key from keyserver; "
305
+            "full key not provided.", level=WARNING)
306
+        cmd = ['apt-key', 'adv', '--keyserver',
307
+               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
308
+        try:
309
+            subprocess.check_call(cmd)
310
+        except subprocess.CalledProcessError:
311
+            error = "Error importing PGP key '{}'".format(key)
312
+            log(error)
313
+            raise GPGKeyError(error)
314
+
315
+
316
+def add_source(source, key=None, fail_invalid=False):
237 317
     """Add a package source to this system.
238 318
 
239 319
     @param source: a URL or sources.list entry, as supported by
@@ -249,6 +329,33 @@ def add_source(source, key=None):
249 329
         such as 'cloud:icehouse'
250 330
         'distro' may be used as a noop
251 331
 
332
+    Full list of source specifications supported by the function are:
333
+
334
+    'distro': A NOP; i.e. it has no effect.
335
+    'proposed': the proposed deb spec [2] is wrtten to
336
+      /etc/apt/sources.list/proposed
337
+    'distro-proposed': adds <version>-proposed to the debs [2]
338
+    'ppa:<ppa-name>': add-apt-repository --yes <ppa_name>
339
+    'deb <deb-spec>': add-apt-repository --yes deb <deb-spec>
340
+    'http://....': add-apt-repository --yes http://...
341
+    'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec>
342
+    'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with
343
+      optional staging version.  If staging is used then the staging PPA [2]
344
+      with be used.  If staging is NOT used then the cloud archive [3] will be
345
+      added, and the 'ubuntu-cloud-keyring' package will be added for the
346
+      current distro.
347
+
348
+    Otherwise the source is not recognised and this is logged to the juju log.
349
+    However, no error is raised, unless sys_error_on_exit is True.
350
+
351
+    [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
352
+        where {} is replaced with the derived pocket name.
353
+    [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \
354
+        main universe multiverse restricted
355
+        where {} is replaced with the lsb_release codename (e.g. xenial)
356
+    [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket>
357
+        to /etc/apt/sources.list.d/cloud-archive-list
358
+
252 359
     @param key: A key to be added to the system's APT keyring and used
253 360
     to verify the signatures on packages. Ideally, this should be an
254 361
     ASCII format GPG public key including the block headers. A GPG key
@@ -256,51 +363,142 @@ def add_source(source, key=None):
256 363
     available to retrieve the actual public key from a public keyserver
257 364
     placing your Juju environment at risk. ppa and cloud archive keys
258 365
     are securely added automtically, so sould not be provided.
366
+
367
+    @param fail_invalid: (boolean) if True, then the function raises a
368
+    SourceConfigError is there is no matching installation source.
369
+
370
+    @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
371
+    valid pocket in CLOUD_ARCHIVE_POCKETS
259 372
     """
373
+    _mapping = OrderedDict([
374
+        (r"^distro$", lambda: None),  # This is a NOP
375
+        (r"^(?:proposed|distro-proposed)$", _add_proposed),
376
+        (r"^cloud-archive:(.*)$", _add_apt_repository),
377
+        (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
378
+        (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
379
+        (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
380
+        (r"^cloud:(.*)$", _add_cloud_pocket),
381
+        (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
382
+    ])
260 383
     if source is None:
261
-        log('Source is not present. Skipping')
262
-        return
263
-
264
-    if (source.startswith('ppa:') or
265
-        source.startswith('http') or
266
-        source.startswith('deb ') or
267
-            source.startswith('cloud-archive:')):
268
-        cmd = ['add-apt-repository', '--yes', source]
269
-        _run_with_retries(cmd)
270
-    elif source.startswith('cloud:'):
271
-        install(filter_installed_packages(['ubuntu-cloud-keyring']),
272
-                fatal=True)
273
-        pocket = source.split(':')[-1]
274
-        if pocket not in CLOUD_ARCHIVE_POCKETS:
275
-            raise SourceConfigError(
276
-                'Unsupported cloud: source option %s' %
277
-                pocket)
278
-        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
279
-        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
280
-            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
281
-    elif source == 'proposed':
282
-        release = lsb_release()['DISTRIB_CODENAME']
283
-        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
284
-            apt.write(PROPOSED_POCKET.format(release))
285
-    elif source == 'distro':
286
-        pass
384
+        source = ''
385
+    for r, fn in six.iteritems(_mapping):
386
+        m = re.match(r, source)
387
+        if m:
388
+            # call the assoicated function with the captured groups
389
+            # raises SourceConfigError on error.
390
+            fn(*m.groups())
391
+            if key:
392
+                try:
393
+                    import_key(key)
394
+                except GPGKeyError as e:
395
+                    raise SourceConfigError(str(e))
396
+            break
287 397
     else:
288
-        log("Unknown source: {!r}".format(source))
289
-
290
-    if key:
291
-        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
292
-            with NamedTemporaryFile('w+') as key_file:
293
-                key_file.write(key)
294
-                key_file.flush()
295
-                key_file.seek(0)
296
-                subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
297
-        else:
298
-            # Note that hkp: is in no way a secure protocol. Using a
299
-            # GPG key id is pointless from a security POV unless you
300
-            # absolutely trust your network and DNS.
301
-            subprocess.check_call(['apt-key', 'adv', '--keyserver',
302
-                                   'hkp://keyserver.ubuntu.com:80', '--recv',
303
-                                   key])
398
+        # nothing matched.  log an error and maybe sys.exit
399
+        err = "Unknown source: {!r}".format(source)
400
+        log(err)
401
+        if fail_invalid:
402
+            raise SourceConfigError(err)
403
+
404
+
405
+def _add_proposed():
406
+    """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
407
+
408
+    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
409
+    the deb line.
410
+
411
+    For intel architecutres PROPOSED_POCKET is used for the release, but for
412
+    other architectures PROPOSED_PORTS_POCKET is used for the release.
413
+    """
414
+    release = lsb_release()['DISTRIB_CODENAME']
415
+    arch = platform.machine()
416
+    if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
417
+        raise SourceConfigError("Arch {} not supported for (distro-)proposed"
418
+                                .format(arch))
419
+    with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
420
+        apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release))
421
+
422
+
423
+def _add_apt_repository(spec):
424
+    """Add the spec using add_apt_repository
425
+
426
+    :param spec: the parameter to pass to add_apt_repository
427
+    """
428
+    _run_with_retries(['add-apt-repository', '--yes', spec])
429
+
430
+
431
+def _add_cloud_pocket(pocket):
432
+    """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
433
+
434
+    Note that this overwrites the existing file if there is one.
435
+
436
+    This function also converts the simple pocket in to the actual pocket using
437
+    the CLOUD_ARCHIVE_POCKETS mapping.
438
+
439
+    :param pocket: string representing the pocket to add a deb spec for.
440
+    :raises: SourceConfigError if the cloud pocket doesn't exist or the
441
+        requested release doesn't match the current distro version.
442
+    """
443
+    apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
444
+                fatal=True)
445
+    if pocket not in CLOUD_ARCHIVE_POCKETS:
446
+        raise SourceConfigError(
447
+            'Unsupported cloud: source option %s' %
448
+            pocket)
449
+    actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
450
+    with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
451
+        apt.write(CLOUD_ARCHIVE.format(actual_pocket))
452
+
453
+
454
+def _add_cloud_staging(cloud_archive_release, openstack_release):
455
+    """Add the cloud staging repository which is in
456
+    ppa:ubuntu-cloud-archive/<openstack_release>-staging
457
+
458
+    This function checks that the cloud_archive_release matches the current
459
+    codename for the distro that charm is being installed on.
460
+
461
+    :param cloud_archive_release: string, codename for the release.
462
+    :param openstack_release: String, codename for the openstack release.
463
+    :raises: SourceConfigError if the cloud_archive_release doesn't match the
464
+        current version of the os.
465
+    """
466
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
467
+    ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release)
468
+    cmd = 'add-apt-repository -y {}'.format(ppa)
469
+    _run_with_retries(cmd.split(' '))
470
+
471
+
472
+def _add_cloud_distro_check(cloud_archive_release, openstack_release):
473
+    """Add the cloud pocket, but also check the cloud_archive_release against
474
+    the current distro, and use the openstack_release as the full lookup.
475
+
476
+    This just calls _add_cloud_pocket() with the openstack_release as pocket
477
+    to get the correct cloud-archive.list for dpkg to work with.
478
+
479
+    :param cloud_archive_release:String, codename for the distro release.
480
+    :param openstack_release: String, spec for the release to look up in the
481
+        CLOUD_ARCHIVE_POCKETS
482
+    :raises: SourceConfigError if this is the wrong distro, or the pocket spec
483
+        doesn't exist.
484
+    """
485
+    _verify_is_ubuntu_rel(cloud_archive_release, openstack_release)
486
+    _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release))
487
+
488
+
489
+def _verify_is_ubuntu_rel(release, os_release):
490
+    """Verify that the release is in the same as the current ubuntu release.
491
+
492
+    :param release: String, lowercase for the release.
493
+    :param os_release: String, the os_release being asked for
494
+    :raises: SourceConfigError if the release is not the same as the ubuntu
495
+        release.
496
+    """
497
+    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
498
+    if release != ubuntu_rel:
499
+        raise SourceConfigError(
500
+            'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'
501
+            'version ({})'.format(release, os_release, ubuntu_rel))
304 502
 
305 503
 
306 504
 def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
@@ -316,9 +514,12 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
316 514
     :param: cmd_env: dict: Environment variables to add to the command run.
317 515
     """
318 516
 
319
-    env = os.environ.copy()
517
+    env = None
518
+    kwargs = {}
320 519
     if cmd_env:
520
+        env = os.environ.copy()
321 521
         env.update(cmd_env)
522
+        kwargs['env'] = env
322 523
 
323 524
     if not retry_message:
324 525
         retry_message = "Failed executing '{}'".format(" ".join(cmd))
@@ -330,7 +531,8 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
330 531
     retry_results = (None,) + retry_exitcodes
331 532
     while result in retry_results:
332 533
         try:
333
-            result = subprocess.check_call(cmd, env=env)
534
+            # result = subprocess.check_call(cmd, env=env)
535
+            result = subprocess.check_call(cmd, **kwargs)
334 536
         except subprocess.CalledProcessError as e:
335 537
             retry_count = retry_count + 1
336 538
             if retry_count > max_retries:
@@ -343,6 +545,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
343 545
 def _run_apt_command(cmd, fatal=False):
344 546
     """Run an apt command with optional retries.
345 547
 
548
+    :param: cmd: str: The apt command to run.
346 549
     :param: fatal: bool: Whether the command's output should be checked and
347 550
         retried.
348 551
     """

+ 61
- 0
tests/charmhelpers/__init__.py View File

@@ -14,6 +14,11 @@
14 14
 
15 15
 # Bootstrap charm-helpers, installing its dependencies if necessary using
16 16
 # only standard libraries.
17
+from __future__ import print_function
18
+from __future__ import absolute_import
19
+
20
+import functools
21
+import inspect
17 22
 import subprocess
18 23
 import sys
19 24
 
@@ -34,3 +39,59 @@ except ImportError:
34 39
     else:
35 40
         subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
36 41
     import yaml  # flake8: noqa
42
+
43
+
44
+# Holds a list of mapping of mangled function names that have been deprecated
45
+# using the @deprecate decorator below.  This is so that the warning is only
46
+# printed once for each usage of the function.
47
+__deprecated_functions = {}
48
+
49
+
50
+def deprecate(warning, date=None, log=None):
51
+    """Add a deprecation warning the first time the function is used.
52
+    The date, which is a string in semi-ISO8660 format indicate the year-month
53
+    that the function is officially going to be removed.
54
+
55
+    usage:
56
+
57
+    @deprecate('use core/fetch/add_source() instead', '2017-04')
58
+    def contributed_add_source_thing(...):
59
+        ...
60
+
61
+    And it then prints to the log ONCE that the function is deprecated.
62
+    The reason for passing the logging function (log) is so that hookenv.log
63
+    can be used for a charm if needed.
64
+
65
+    :param warning:  String to indicat where it has moved ot.
66
+    :param date: optional sting, in YYYY-MM format to indicate when the
67
+                 function will definitely (probably) be removed.
68
+    :param log: The log function to call to log.  If not, logs to stdout
69
+    """
70
+    def wrap(f):
71
+
72
+        @functools.wraps(f)
73
+        def wrapped_f(*args, **kwargs):
74
+            try:
75
+                module = inspect.getmodule(f)
76
+                file = inspect.getsourcefile(f)
77
+                lines = inspect.getsourcelines(f)
78
+                f_name = "{}-{}-{}..{}-{}".format(
79
+                    module.__name__, file, lines[0], lines[-1], f.__name__)
80
+            except (IOError, TypeError):
81
+                # assume it was local, so just use the name of the function
82
+                f_name = f.__name__
83
+            if f_name not in __deprecated_functions:
84
+                __deprecated_functions[f_name] = True
85
+                s = "DEPRECATION WARNING: Function {} is being removed".format(
86
+                    f.__name__)
87
+                if date:
88
+                    s = "{} on/around {}".format(s, date)
89
+                if warning:
90
+                    s = "{} : {}".format(s, warning)
91
+                if log:
92
+                    log(s)
93
+                else:
94
+                    print(s)
95
+            return f(*args, **kwargs)
96
+        return wrapped_f
97
+    return wrap

+ 68
- 35
tests/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -25,9 +25,12 @@ import urlparse
25 25
 import cinderclient.v1.client as cinder_client
26 26
 import glanceclient.v1.client as glance_client
27 27
 import heatclient.v1.client as heat_client
28
-import keystoneclient.v2_0 as keystone_client
29
-from keystoneclient.auth.identity import v3 as keystone_id_v3
30
-from keystoneclient import session as keystone_session
28
+from keystoneclient.v2_0 import client as keystone_client
29
+from keystoneauth1.identity import (
30
+    v3,
31
+    v2,
32
+)
33
+from keystoneauth1 import session as keystone_session
31 34
 from keystoneclient.v3 import client as keystone_client_v3
32 35
 from novaclient import exceptions
33 36
 
@@ -368,12 +371,20 @@ class OpenStackAmuletUtils(AmuletUtils):
368 371
                                         port)
369 372
         if not api_version or api_version == 2:
370 373
             ep = base_ep + "/v2.0"
371
-            return keystone_client.Client(username=username, password=password,
372
-                                          tenant_name=project_name,
373
-                                          auth_url=ep)
374
+            auth = v2.Password(
375
+                username=username,
376
+                password=password,
377
+                tenant_name=project_name,
378
+                auth_url=ep
379
+            )
380
+            sess = keystone_session.Session(auth=auth)
381
+            client = keystone_client.Client(session=sess)
382
+            # This populates the client.service_catalog
383
+            client.auth_ref = auth.get_access(sess)
384
+            return client
374 385
         else:
375 386
             ep = base_ep + "/v3"
376
-            auth = keystone_id_v3.Password(
387
+            auth = v3.Password(
377 388
                 user_domain_name=user_domain_name,
378 389
                 username=username,
379 390
                 password=password,
@@ -382,36 +393,45 @@ class OpenStackAmuletUtils(AmuletUtils):
382 393
                 project_name=project_name,
383 394
                 auth_url=ep
384 395
             )
385
-            return keystone_client_v3.Client(
386
-                session=keystone_session.Session(auth=auth)
387
-            )
396
+            sess = keystone_session.Session(auth=auth)
397
+            client = keystone_client_v3.Client(session=sess)
398
+            # This populates the client.service_catalog
399
+            client.auth_ref = auth.get_access(sess)
400
+            return client
388 401
 
389 402
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
390 403
                                     tenant=None, api_version=None,
391
-                                    keystone_ip=None):
404
+                                    keystone_ip=None, user_domain_name=None,
405
+                                    project_domain_name=None,
406
+                                    project_name=None):
392 407
         """Authenticates admin user with the keystone admin endpoint."""
393 408
         self.log.debug('Authenticating keystone admin...')
394 409
         if not keystone_ip:
395 410
             keystone_ip = keystone_sentry.info['public-address']
396 411
 
397
-        user_domain_name = None
398
-        domain_name = None
399
-        if api_version == 3:
412
+        # To support backward compatibility usage of this function
413
+        if not project_name:
414
+            project_name = tenant
415
+        if api_version == 3 and not user_domain_name:
400 416
             user_domain_name = 'admin_domain'
401
-            domain_name = user_domain_name
402
-
403
-        return self.authenticate_keystone(keystone_ip, user, password,
404
-                                          project_name=tenant,
405
-                                          api_version=api_version,
406
-                                          user_domain_name=user_domain_name,
407
-                                          domain_name=domain_name,
408
-                                          admin_port=True)
417
+        if api_version == 3 and not project_domain_name:
418
+            project_domain_name = 'admin_domain'
419
+        if api_version == 3 and not project_name:
420
+            project_name = 'admin'
421
+
422
+        return self.authenticate_keystone(
423
+            keystone_ip, user, password,
424
+            api_version=api_version,
425
+            user_domain_name=user_domain_name,
426
+            project_domain_name=project_domain_name,
427
+            project_name=project_name,
428
+            admin_port=True)
409 429
 
410 430
     def authenticate_keystone_user(self, keystone, user, password, tenant):
411 431
         """Authenticates a regular user with the keystone public endpoint."""
412 432
         self.log.debug('Authenticating keystone user ({})...'.format(user))
413 433
         ep = keystone.service_catalog.url_for(service_type='identity',
414
-                                              endpoint_type='publicURL')
434
+                                              interface='publicURL')
415 435
         keystone_ip = urlparse.urlparse(ep).hostname
416 436
 
417 437
         return self.authenticate_keystone(keystone_ip, user, password,
@@ -421,22 +441,32 @@ class OpenStackAmuletUtils(AmuletUtils):
421 441
         """Authenticates admin user with glance."""
422 442
         self.log.debug('Authenticating glance admin...')
423 443
         ep = keystone.service_catalog.url_for(service_type='image',
424
-                                              endpoint_type='adminURL')
425
-        return glance_client.Client(ep, token=keystone.auth_token)
444
+                                              interface='adminURL')
445
+        if keystone.session:
446
+            return glance_client.Client(ep, session=keystone.session)
447
+        else:
448
+            return glance_client.Client(ep, token=keystone.auth_token)
426 449
 
427 450
     def authenticate_heat_admin(self, keystone):
428 451
         """Authenticates the admin user with heat."""
429 452
         self.log.debug('Authenticating heat admin...')
430 453
         ep = keystone.service_catalog.url_for(service_type='orchestration',
431
-                                              endpoint_type='publicURL')
432
-        return heat_client.Client(endpoint=ep, token=keystone.auth_token)
454
+                                              interface='publicURL')
455
+        if keystone.session:
456
+            return heat_client.Client(endpoint=ep, session=keystone.session)
457
+        else:
458
+            return heat_client.Client(endpoint=ep, token=keystone.auth_token)
433 459
 
434 460
     def authenticate_nova_user(self, keystone, user, password, tenant):
435 461
         """Authenticates a regular user with nova-api."""
436 462
         self.log.debug('Authenticating nova user ({})...'.format(user))
437 463
         ep = keystone.service_catalog.url_for(service_type='identity',
438
-                                              endpoint_type='publicURL')
439
-        if novaclient.__version__[0] >= "7":
464
+                                              interface='publicURL')
465
+        if keystone.session:
466
+            return nova_client.Client(NOVA_CLIENT_VERSION,
467
+                                      session=keystone.session,
468
+                                      auth_url=ep)
469
+        elif novaclient.__version__[0] >= "7":
440 470
             return nova_client.Client(NOVA_CLIENT_VERSION,
441 471
                                       username=user, password=password,
442 472
                                       project_name=tenant, auth_url=ep)
@@ -449,12 +479,15 @@ class OpenStackAmuletUtils(AmuletUtils):
449 479
         """Authenticates a regular user with swift api."""
450 480
         self.log.debug('Authenticating swift user ({})...'.format(user))
451 481
         ep = keystone.service_catalog.url_for(service_type='identity',
452
-                                              endpoint_type='publicURL')
453
-        return swiftclient.Connection(authurl=ep,
454
-                                      user=user,
455
-                                      key=password,
456
-                                      tenant_name=tenant,
457
-                                      auth_version='2.0')
482
+                                              interface='publicURL')
483
+        if keystone.session:
484
+            return swiftclient.Connection(session=keystone.session)
485
+        else:
486
+            return swiftclient.Connection(authurl=ep,
487
+                                          user=user,
488
+                                          key=password,
489
+                                          tenant_name=tenant,
490
+                                          auth_version='2.0')
458 491
 
459 492
     def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
460 493
                       ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):

+ 40
- 0
tests/charmhelpers/core/hookenv.py View File

@@ -43,6 +43,7 @@ ERROR = "ERROR"
43 43
 WARNING = "WARNING"
44 44
 INFO = "INFO"
45 45
 DEBUG = "DEBUG"
46
+TRACE = "TRACE"
46 47
 MARKER = object()
47 48
 
48 49
 cache = {}
@@ -202,6 +203,27 @@ def service_name():
202 203
     return local_unit().split('/')[0]
203 204
 
204 205
 
206
+def principal_unit():
207
+    """Returns the principal unit of this unit, otherwise None"""
208
+    # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
209
+    principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
210
+    # If it's empty, then this unit is the principal
211
+    if principal_unit == '':
212
+        return os.environ['JUJU_UNIT_NAME']
213
+    elif principal_unit is not None:
214
+        return principal_unit
215
+    # For Juju 2.1 and below, let's try work out the principle unit by
216
+    # the various charms' metadata.yaml.
217
+    for reltype in relation_types():
218
+        for rid in relation_ids(reltype):
219
+            for unit in related_units(rid):
220
+                md = _metadata_unit(unit)
221
+                subordinate = md.pop('subordinate', None)
222
+                if not subordinate:
223
+                    return unit
224
+    return None
225
+
226
+
205 227
 @cached
206 228
 def remote_service_name(relid=None):
207 229
     """The remote service name for a given relation-id (or the current relation)"""
@@ -478,6 +500,21 @@ def metadata():
478 500
         return yaml.safe_load(md)
479 501
 
480 502
 
503
+def _metadata_unit(unit):
504
+    """Given the name of a unit (e.g. apache2/0), get the unit charm's
505
+    metadata.yaml. Very similar to metadata() but allows us to inspect
506
+    other units. Unit needs to be co-located, such as a subordinate or
507
+    principal/primary.
508
+
509
+    :returns: metadata.yaml as a python object.
510
+
511
+    """
512
+    basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
513
+    unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
514
+    with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
515
+        return yaml.safe_load(md)
516
+
517
+
481 518
 @cached
482 519
 def relation_types():
483 520
     """Get a list of relation types supported by this charm"""
@@ -753,6 +790,9 @@ class Hooks(object):
753 790
 
754 791
 def charm_dir():
755 792
     """Return the root directory of the current charm"""
793
+    d = os.environ.get('JUJU_CHARM_DIR')
794
+    if d is not None:
795
+        return d
756 796
     return os.environ.get('CHARM_DIR')
757 797
 
758 798
 

+ 32
- 6
tests/charmhelpers/core/host.py View File

@@ -34,7 +34,7 @@ import six
34 34
 
35 35
 from contextlib import contextmanager
36 36
 from collections import OrderedDict
37
-from .hookenv import log
37
+from .hookenv import log, DEBUG
38 38
 from .fstab import Fstab
39 39
 from charmhelpers.osplatform import get_platform
40 40
 
@@ -191,6 +191,7 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
191 191
     upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
192 192
     sysv_file = os.path.join(initd_dir, service_name)
193 193
     if init_is_systemd():
194
+        service('disable', service_name)
194 195
         service('mask', service_name)
195 196
     elif os.path.exists(upstart_file):
196 197
         override_path = os.path.join(
@@ -225,6 +226,7 @@ def service_resume(service_name, init_dir="/etc/init",
225 226
     sysv_file = os.path.join(initd_dir, service_name)
226 227
     if init_is_systemd():
227 228
         service('unmask', service_name)
229
+        service('enable', service_name)
228 230
     elif os.path.exists(upstart_file):
229 231
         override_path = os.path.join(
230 232
             init_dir, '{}.override'.format(service_name))
@@ -485,13 +487,37 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
485 487
 
486 488
 def write_file(path, content, owner='root', group='root', perms=0o444):
487 489
     """Create or overwrite a file with the contents of a byte string."""
488
-    log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
489 490
     uid = pwd.getpwnam(owner).pw_uid
490 491
     gid = grp.getgrnam(group).gr_gid
491
-    with open(path, 'wb') as target:
492
-        os.fchown(target.fileno(), uid, gid)
493
-        os.fchmod(target.fileno(), perms)
494
-        target.write(content)
492
+    # lets see if we can grab the file and compare the context, to avoid doing
493
+    # a write.
494
+    existing_content = None
495
+    existing_uid, existing_gid = None, None
496
+    try:
497
+        with open(path, 'rb') as target:
498
+            existing_content = target.read()
499
+        stat = os.stat(path)
500
+        existing_uid, existing_gid = stat.st_uid, stat.st_gid
501
+    except:
502
+        pass
503
+    if content != existing_content:
504
+        log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
505
+            level=DEBUG)
506
+        with open(path, 'wb') as target:
507
+            os.fchown(target.fileno(), uid, gid)
508
+            os.fchmod(target.fileno(), perms)
509
+            target.write(content)
510
+        return
511
+    # the contents were the same, but we might still need to change the
512
+    # ownership.
513
+    if existing_uid != uid:
514
+        log("Changing uid on already existing content: {} -> {}"
515
+            .format(existing_uid, uid), level=DEBUG)
516
+        os.chown(path, uid, -1)
517
+    if existing_gid != gid:
518
+        log("Changing gid on already existing content: {} -> {}"
519
+            .format(existing_gid, gid), level=DEBUG)
520
+        os.chown(path, -1, gid)
495 521
 
496 522
 
497 523
 def fstab_remove(mp):

Loading…
Cancel
Save