Browse Source

Enable Bionic as a gate test

Change bionic test from dev to gate for 18.05.

Change-Id: I4d82f73b7b83d5bb597147663d424554d34d7e76
changes/83/566983/7
David Ames 1 year ago
parent
commit
bb4e4912c7
30 changed files with 1078 additions and 337 deletions
  1. 1
    0
      .gitignore
  2. 3
    2
      hooks/charmhelpers/contrib/hahelpers/apache.py
  3. 11
    3
      hooks/charmhelpers/contrib/hahelpers/cluster.py
  4. 5
    5
      hooks/charmhelpers/contrib/openstack/amulet/deployment.py
  5. 200
    25
      hooks/charmhelpers/contrib/openstack/amulet/utils.py
  6. 6
    4
      hooks/charmhelpers/contrib/openstack/context.py
  7. 5
    0
      hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware
  8. 3
    0
      hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications
  9. 2
    2
      hooks/charmhelpers/contrib/openstack/utils.py
  10. 126
    0
      hooks/charmhelpers/contrib/openstack/vaultlocker.py
  11. 41
    2
      hooks/charmhelpers/contrib/storage/linux/ceph.py
  12. 29
    0
      hooks/charmhelpers/contrib/storage/linux/lvm.py
  13. 16
    0
      hooks/charmhelpers/contrib/storage/linux/utils.py
  14. 104
    34
      hooks/charmhelpers/core/hookenv.py
  15. 9
    2
      hooks/charmhelpers/core/host.py
  16. 18
    7
      hooks/charmhelpers/core/services/base.py
  17. 11
    7
      hooks/charmhelpers/core/sysctl.py
  18. 7
    2
      hooks/charmhelpers/core/unitdata.py
  19. 1
    0
      hooks/charmhelpers/fetch/ubuntu.py
  20. 100
    159
      tests/basic_deployment.py
  21. 5
    5
      tests/charmhelpers/contrib/openstack/amulet/deployment.py
  22. 200
    25
      tests/charmhelpers/contrib/openstack/amulet/utils.py
  23. 104
    34
      tests/charmhelpers/core/hookenv.py
  24. 9
    2
      tests/charmhelpers/core/host.py
  25. 18
    7
      tests/charmhelpers/core/services/base.py
  26. 11
    7
      tests/charmhelpers/core/sysctl.py
  27. 7
    2
      tests/charmhelpers/core/unitdata.py
  28. 0
    0
      tests/gate-basic-bionic-queens
  29. 25
    0
      tests/gate-basic-xenial-queens
  30. 1
    1
      tox.ini

+ 1
- 0
.gitignore View File

@@ -5,3 +5,4 @@ bin
5 5
 *.sw[nop]
6 6
 *.py[oc]
7 7
 *.pyc
8
+func-results.json

+ 3
- 2
hooks/charmhelpers/contrib/hahelpers/apache.py View File

@@ -65,7 +65,8 @@ def get_ca_cert():
65 65
     if ca_cert is None:
66 66
         log("Inspecting identity-service relations for CA SSL certificate.",
67 67
             level=INFO)
68
-        for r_id in relation_ids('identity-service'):
68
+        for r_id in (relation_ids('identity-service') +
69
+                     relation_ids('identity-credentials')):
69 70
             for unit in relation_list(r_id):
70 71
                 if ca_cert is None:
71 72
                     ca_cert = relation_get('ca_cert',
@@ -76,7 +77,7 @@ def get_ca_cert():
76 77
 def retrieve_ca_cert(cert_file):
77 78
     cert = None
78 79
     if os.path.isfile(cert_file):
79
-        with open(cert_file, 'r') as crt:
80
+        with open(cert_file, 'rb') as crt:
80 81
             cert = crt.read()
81 82
     return cert
82 83
 

+ 11
- 3
hooks/charmhelpers/contrib/hahelpers/cluster.py View File

@@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
371 371
     ''' Distribute operations by waiting based on modulo_distribution
372 372
 
373 373
     If modulo and or wait are not set, check config_get for those values.
374
+    If config values are not set, default to modulo=3 and wait=30.
374 375
 
375 376
     :param modulo: int The modulo number creates the group distribution
376 377
     :param wait: int The constant time wait value
@@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
382 383
     :side effect: Calls time.sleep()
383 384
     '''
384 385
     if modulo is None:
385
-        modulo = config_get('modulo-nodes')
386
+        modulo = config_get('modulo-nodes') or 3
386 387
     if wait is None:
387
-        wait = config_get('known-wait')
388
-    calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
388
+        wait = config_get('known-wait') or 30
389
+    if juju_is_leader():
390
+        # The leader should never wait
391
+        calculated_wait = 0
392
+    else:
393
+        # non_zero_wait=True guarantees the non-leader who gets modulo 0
394
+        # will still wait
395
+        calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
396
+                                              non_zero_wait=True)
389 397
     msg = "Waiting {} seconds for {} ...".format(calculated_wait,
390 398
                                                  operation_name)
391 399
     log(msg, DEBUG)

+ 5
- 5
hooks/charmhelpers/contrib/openstack/amulet/deployment.py View File

@@ -21,6 +21,9 @@ from collections import OrderedDict
21 21
 from charmhelpers.contrib.amulet.deployment import (
22 22
     AmuletDeployment
23 23
 )
24
+from charmhelpers.contrib.openstack.amulet.utils import (
25
+    OPENSTACK_RELEASES_PAIRS
26
+)
24 27
 
25 28
 DEBUG = logging.DEBUG
26 29
 ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
271 274
            release.
272 275
            """
273 276
         # Must be ordered by OpenStack release (not by Ubuntu release):
274
-        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
275
-         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
276
-         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
277
-         self.xenial_pike, self.artful_pike, self.xenial_queens,
278
-         self.bionic_queens,) = range(13)
277
+        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
278
+            setattr(self, os_pair, i)
279 279
 
280 280
         releases = {
281 281
             ('trusty', None): self.trusty_icehouse,

+ 200
- 25
hooks/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -50,6 +50,13 @@ ERROR = logging.ERROR
50 50
 
51 51
 NOVA_CLIENT_VERSION = "2"
52 52
 
53
+OPENSTACK_RELEASES_PAIRS = [
54
+    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
55
+    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
56
+    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
57
+    'xenial_pike', 'artful_pike', 'xenial_queens',
58
+    'bionic_queens']
59
+
53 60
 
54 61
 class OpenStackAmuletUtils(AmuletUtils):
55 62
     """OpenStack amulet utilities.
@@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
63 70
         super(OpenStackAmuletUtils, self).__init__(log_level)
64 71
 
65 72
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
66
-                               public_port, expected):
73
+                               public_port, expected, openstack_release=None):
74
+        """Validate endpoint data. Pick the correct validator based on
75
+           OpenStack release. Expected data should be in the v2 format:
76
+           {
77
+               'id': id,
78
+               'region': region,
79
+               'adminurl': adminurl,
80
+               'internalurl': internalurl,
81
+               'publicurl': publicurl,
82
+               'service_id': service_id}
83
+
84
+           """
85
+        validation_function = self.validate_v2_endpoint_data
86
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
87
+        if openstack_release and openstack_release >= xenial_queens:
88
+                validation_function = self.validate_v3_endpoint_data
89
+                expected = {
90
+                    'id': expected['id'],
91
+                    'region': expected['region'],
92
+                    'region_id': 'RegionOne',
93
+                    'url': self.valid_url,
94
+                    'interface': self.not_null,
95
+                    'service_id': expected['service_id']}
96
+        return validation_function(endpoints, admin_port, internal_port,
97
+                                   public_port, expected)
98
+
99
+    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
100
+                                  public_port, expected):
67 101
         """Validate endpoint data.
68 102
 
69 103
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
141 175
         if len(found) != expected_num_eps:
142 176
             return 'Unexpected number of endpoints found'
143 177
 
144
-    def validate_svc_catalog_endpoint_data(self, expected, actual):
178
+    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
179
+        """Convert v2 endpoint data into v3.
180
+
181
+           {
182
+               'service_name1': [
183
+                   {
184
+                       'adminURL': adminURL,
185
+                       'id': id,
186
+                       'region': region.
187
+                       'publicURL': publicURL,
188
+                       'internalURL': internalURL
189
+                   }],
190
+               'service_name2': [
191
+                   {
192
+                       'adminURL': adminURL,
193
+                       'id': id,
194
+                       'region': region.
195
+                       'publicURL': publicURL,
196
+                       'internalURL': internalURL
197
+                   }],
198
+           }
199
+          """
200
+        self.log.warn("Endpoint ID and Region ID validation is limited to not "
201
+                      "null checks after v2 to v3 conversion")
202
+        for svc in ep_data.keys():
203
+            assert len(ep_data[svc]) == 1, "Unknown data format"
204
+            svc_ep_data = ep_data[svc][0]
205
+            ep_data[svc] = [
206
+                {
207
+                    'url': svc_ep_data['adminURL'],
208
+                    'interface': 'admin',
209
+                    'region': svc_ep_data['region'],
210
+                    'region_id': self.not_null,
211
+                    'id': self.not_null},
212
+                {
213
+                    'url': svc_ep_data['publicURL'],
214
+                    'interface': 'public',
215
+                    'region': svc_ep_data['region'],
216
+                    'region_id': self.not_null,
217
+                    'id': self.not_null},
218
+                {
219
+                    'url': svc_ep_data['internalURL'],
220
+                    'interface': 'internal',
221
+                    'region': svc_ep_data['region'],
222
+                    'region_id': self.not_null,
223
+                    'id': self.not_null}]
224
+        return ep_data
225
+
226
+    def validate_svc_catalog_endpoint_data(self, expected, actual,
227
+                                           openstack_release=None):
228
+        """Validate service catalog endpoint data. Pick the correct validator
229
+           for the OpenStack version. Expected data should be in the v2 format:
230
+           {
231
+               'service_name1': [
232
+                   {
233
+                       'adminURL': adminURL,
234
+                       'id': id,
235
+                       'region': region.
236
+                       'publicURL': publicURL,
237
+                       'internalURL': internalURL
238
+                   }],
239
+               'service_name2': [
240
+                   {
241
+                       'adminURL': adminURL,
242
+                       'id': id,
243
+                       'region': region.
244
+                       'publicURL': publicURL,
245
+                       'internalURL': internalURL
246
+                   }],
247
+           }
248
+
249
+           """
250
+        validation_function = self.validate_v2_svc_catalog_endpoint_data
251
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
252
+        if openstack_release and openstack_release >= xenial_queens:
253
+            validation_function = self.validate_v3_svc_catalog_endpoint_data
254
+            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
255
+        return validation_function(expected, actual)
256
+
257
+    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
145 258
         """Validate service catalog endpoint data.
146 259
 
147 260
            Validate a list of actual service catalog endpoints vs a list of
@@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
328 441
             if rel.get('api_version') != str(api_version):
329 442
                 raise Exception("api_version not propagated through relation"
330 443
                                 " data yet ('{}' != '{}')."
331
-                                "".format(rel['api_version'], api_version))
444
+                                "".format(rel.get('api_version'), api_version))
332 445
 
333 446
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
334 447
                                        api_version):
@@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
350 463
         deployment._auto_wait_for_status()
351 464
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
352 465
 
353
-    def authenticate_cinder_admin(self, keystone_sentry, username,
354
-                                  password, tenant, api_version=2):
466
+    def authenticate_cinder_admin(self, keystone, api_version=2):
355 467
         """Authenticates admin user with cinder."""
356
-        # NOTE(beisner): cinder python client doesn't accept tokens.
357
-        keystone_ip = keystone_sentry.info['public-address']
358
-        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
468
+        self.log.debug('Authenticating cinder admin...')
359 469
         _clients = {
360 470
             1: cinder_client.Client,
361 471
             2: cinder_clientv2.Client}
362
-        return _clients[api_version](username, password, tenant, ept)
472
+        return _clients[api_version](session=keystone.session)
363 473
 
364 474
     def authenticate_keystone(self, keystone_ip, username, password,
365 475
                               api_version=False, admin_port=False,
@@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
367 477
                               project_domain_name=None, project_name=None):
368 478
         """Authenticate with Keystone"""
369 479
         self.log.debug('Authenticating with keystone...')
370
-        port = 5000
371
-        if admin_port:
372
-            port = 35357
373
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
374
-                                        port)
375
-        if not api_version or api_version == 2:
376
-            ep = base_ep + "/v2.0"
480
+        if not api_version:
481
+            api_version = 2
482
+        sess, auth = self.get_keystone_session(
483
+            keystone_ip=keystone_ip,
484
+            username=username,
485
+            password=password,
486
+            api_version=api_version,
487
+            admin_port=admin_port,
488
+            user_domain_name=user_domain_name,
489
+            domain_name=domain_name,
490
+            project_domain_name=project_domain_name,
491
+            project_name=project_name
492
+        )
493
+        if api_version == 2:
494
+            client = keystone_client.Client(session=sess)
495
+        else:
496
+            client = keystone_client_v3.Client(session=sess)
497
+        # This populates the client.service_catalog
498
+        client.auth_ref = auth.get_access(sess)
499
+        return client
500
+
501
+    def get_keystone_session(self, keystone_ip, username, password,
502
+                             api_version=False, admin_port=False,
503
+                             user_domain_name=None, domain_name=None,
504
+                             project_domain_name=None, project_name=None):
505
+        """Return a keystone session object"""
506
+        ep = self.get_keystone_endpoint(keystone_ip,
507
+                                        api_version=api_version,
508
+                                        admin_port=admin_port)
509
+        if api_version == 2:
377 510
             auth = v2.Password(
378 511
                 username=username,
379 512
                 password=password,
@@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
381 514
                 auth_url=ep
382 515
             )
383 516
             sess = keystone_session.Session(auth=auth)
384
-            client = keystone_client.Client(session=sess)
385
-            # This populates the client.service_catalog
386
-            client.auth_ref = auth.get_access(sess)
387
-            return client
388 517
         else:
389
-            ep = base_ep + "/v3"
390 518
             auth = v3.Password(
391 519
                 user_domain_name=user_domain_name,
392 520
                 username=username,
@@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
397 525
                 auth_url=ep
398 526
             )
399 527
             sess = keystone_session.Session(auth=auth)
400
-            client = keystone_client_v3.Client(session=sess)
401
-            # This populates the client.service_catalog
402
-            client.auth_ref = auth.get_access(sess)
403
-            return client
528
+        return (sess, auth)
529
+
530
+    def get_keystone_endpoint(self, keystone_ip, api_version=None,
531
+                              admin_port=False):
532
+        """Return keystone endpoint"""
533
+        port = 5000
534
+        if admin_port:
535
+            port = 35357
536
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
537
+                                        port)
538
+        if api_version == 2:
539
+            ep = base_ep + "/v2.0"
540
+        else:
541
+            ep = base_ep + "/v3"
542
+        return ep
543
+
544
+    def get_default_keystone_session(self, keystone_sentry,
545
+                                     openstack_release=None):
546
+        """Return a keystone session object and client object assuming standard
547
+           default settings
548
+
549
+           Example call in amulet tests:
550
+               self.keystone_session, self.keystone = u.get_default_keystone_session(
551
+                   self.keystone_sentry,
552
+                   openstack_release=self._get_openstack_release())
553
+
554
+           The session can then be used to auth other clients:
555
+               neutronclient.Client(session=session)
556
+               aodh_client.Client(session=session)
557
+               eyc
558
+        """
559
+        self.log.debug('Authenticating keystone admin...')
560
+        api_version = 2
561
+        client_class = keystone_client.Client
562
+        # 11 => xenial_queens
563
+        if openstack_release and openstack_release >= 11:
564
+            api_version = 3
565
+            client_class = keystone_client_v3.Client
566
+        keystone_ip = keystone_sentry.info['public-address']
567
+        session, auth = self.get_keystone_session(
568
+            keystone_ip,
569
+            api_version=api_version,
570
+            username='admin',
571
+            password='openstack',
572
+            project_name='admin',
573
+            user_domain_name='admin_domain',
574
+            project_domain_name='admin_domain')
575
+        client = client_class(session=session)
576
+        # This populates the client.service_catalog
577
+        client.auth_ref = auth.get_access(session)
578
+        return session, client
404 579
 
405 580
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
406 581
                                     tenant=None, api_version=None,

+ 6
- 4
hooks/charmhelpers/contrib/openstack/context.py View File

@@ -384,6 +384,7 @@ class IdentityServiceContext(OSContextGenerator):
384 384
                     # so a missing value just indicates keystone needs
385 385
                     # upgrading
386 386
                     ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
387
+                    ctxt['admin_domain_id'] = rdata.get('service_domain_id')
387 388
                     return ctxt
388 389
 
389 390
         return {}
@@ -796,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator):
796 797
             key_filename = 'key'
797 798
 
798 799
         write_file(path=os.path.join(ssl_dir, cert_filename),
799
-                   content=b64decode(cert))
800
+                   content=b64decode(cert), perms=0o640)
800 801
         write_file(path=os.path.join(ssl_dir, key_filename),
801
-                   content=b64decode(key))
802
+                   content=b64decode(key), perms=0o640)
802 803
 
803 804
     def configure_ca(self):
804 805
         ca_cert = get_ca_cert()
@@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator):
1872 1873
     context is needed to do that before rendering a template.
1873 1874
    '''
1874 1875
 
1875
-    def __init__(self, dirname):
1876
+    def __init__(self, dirname, **kwargs):
1876 1877
         '''Used merely to ensure that a given directory exists.'''
1877 1878
         self.dirname = dirname
1879
+        self.kwargs = kwargs
1878 1880
 
1879 1881
     def __call__(self):
1880
-        mkdir(self.dirname)
1882
+        mkdir(self.dirname, **self.kwargs)
1881 1883
         return {}

+ 5
- 0
hooks/charmhelpers/contrib/openstack/templates/section-oslo-middleware View File

@@ -0,0 +1,5 @@
1
+[oslo_middleware]
2
+
3
+# Bug #1758675
4
+enable_proxy_headers_parsing = true
5
+

+ 3
- 0
hooks/charmhelpers/contrib/openstack/templates/section-oslo-notifications View File

@@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
5 5
 {% if notification_topics -%}
6 6
 topics = {{ notification_topics }}
7 7
 {% endif -%}
8
+{% if notification_format -%}
9
+notification_format = {{ notification_format }}
10
+{% endif -%}
8 11
 {% endif -%}

+ 2
- 2
hooks/charmhelpers/contrib/openstack/utils.py View File

@@ -182,7 +182,7 @@ SWIFT_CODENAMES = OrderedDict([
182 182
     ('pike',
183 183
         ['2.13.0', '2.15.0']),
184 184
     ('queens',
185
-        ['2.16.0']),
185
+        ['2.16.0', '2.17.0']),
186 186
 ])
187 187
 
188 188
 # >= Liberty version->codename mapping
@@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
306 306
 
307 307
     if src.startswith('cloud:'):
308 308
         ca_rel = src.split(':')[1]
309
-        ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
309
+        ca_rel = ca_rel.split('-')[1].split('/')[0]
310 310
         return ca_rel
311 311
 
312 312
     # Best guess match based on deb string provided

+ 126
- 0
hooks/charmhelpers/contrib/openstack/vaultlocker.py View File

@@ -0,0 +1,126 @@
1
+# Copyright 2018 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+import json
16
+import os
17
+
18
+import charmhelpers.contrib.openstack.alternatives as alternatives
19
+import charmhelpers.contrib.openstack.context as context
20
+
21
+import charmhelpers.core.hookenv as hookenv
22
+import charmhelpers.core.host as host
23
+import charmhelpers.core.templating as templating
24
+import charmhelpers.core.unitdata as unitdata
25
+
26
+VAULTLOCKER_BACKEND = 'charm-vaultlocker'
27
+
28
+
29
+class VaultKVContext(context.OSContextGenerator):
30
+    """Vault KV context for interaction with vault-kv interfaces"""
31
+    interfaces = ['secrets-storage']
32
+
33
+    def __init__(self, secret_backend=None):
34
+        super(context.OSContextGenerator, self).__init__()
35
+        self.secret_backend = (
36
+            secret_backend or 'charm-{}'.format(hookenv.service_name())
37
+        )
38
+
39
+    def __call__(self):
40
+        db = unitdata.kv()
41
+        last_token = db.get('last-token')
42
+        secret_id = db.get('secret-id')
43
+        for relation_id in hookenv.relation_ids(self.interfaces[0]):
44
+            for unit in hookenv.related_units(relation_id):
45
+                data = hookenv.relation_get(unit=unit,
46
+                                            rid=relation_id)
47
+                vault_url = data.get('vault_url')
48
+                role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
49
+                token = data.get('{}_token'.format(hookenv.local_unit()))
50
+
51
+                if all([vault_url, role_id, token]):
52
+                    token = json.loads(token)
53
+                    vault_url = json.loads(vault_url)
54
+
55
+                    # Tokens may change when secret_id's are being
56
+                    # reissued - if so use token to get new secret_id
57
+                    if token != last_token:
58
+                        secret_id = retrieve_secret_id(
59
+                            url=vault_url,
60
+                            token=token
61
+                        )
62
+                        db.set('secret-id', secret_id)
63
+                        db.set('last-token', token)
64
+                        db.flush()
65
+
66
+                    ctxt = {
67
+                        'vault_url': vault_url,
68
+                        'role_id': json.loads(role_id),
69
+                        'secret_id': secret_id,
70
+                        'secret_backend': self.secret_backend,
71
+                    }
72
+                    vault_ca = data.get('vault_ca')
73
+                    if vault_ca:
74
+                        ctxt['vault_ca'] = json.loads(vault_ca)
75
+                    self.complete = True
76
+                    return ctxt
77
+        return {}
78
+
79
+
80
+def write_vaultlocker_conf(context, priority=100):
81
+    """Write vaultlocker configuration to disk and install alternative
82
+
83
+    :param context: Dict of data from vault-kv relation
84
+    :ptype: context: dict
85
+    :param priority: Priority of alternative configuration
86
+    :ptype: priority: int"""
87
+    charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
88
+        hookenv.service_name()
89
+    )
90
+    host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
91
+    templating.render(source='vaultlocker.conf.j2',
92
+                      target=charm_vl_path,
93
+                      context=context, perms=0o600),
94
+    alternatives.install_alternative('vaultlocker.conf',
95
+                                     '/etc/vaultlocker/vaultlocker.conf',
96
+                                     charm_vl_path, priority)
97
+
98
+
99
+def vault_relation_complete(backend=None):
100
+    """Determine whether vault relation is complete
101
+
102
+    :param backend: Name of secrets backend requested
103
+    :ptype backend: string
104
+    :returns: whether the relation to vault is complete
105
+    :rtype: bool"""
106
+    vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
107
+    vault_kv()
108
+    return vault_kv.complete
109
+
110
+
111
+# TODO: contrib a high level unwrap method to hvac that works
112
+def retrieve_secret_id(url, token):
113
+    """Retrieve a response-wrapped secret_id from Vault
114
+
115
+    :param url: URL to Vault Server
116
+    :ptype url: str
117
+    :param token: One shot Token to use
118
+    :ptype token: str
119
+    :returns: secret_id to use for Vault Access
120
+    :rtype: str"""
121
+    import hvac
122
+    client = hvac.Client(url=url, token=token)
123
+    response = client._post('/v1/sys/wrapping/unwrap')
124
+    if response.status_code == 200:
125
+        data = response.json()
126
+        return data['data']['secret_id']

+ 41
- 2
hooks/charmhelpers/contrib/storage/linux/ceph.py View File

@@ -291,7 +291,7 @@ class Pool(object):
291 291
 
292 292
 class ReplicatedPool(Pool):
293 293
     def __init__(self, service, name, pg_num=None, replicas=2,
294
-                 percent_data=10.0):
294
+                 percent_data=10.0, app_name=None):
295 295
         super(ReplicatedPool, self).__init__(service=service, name=name)
296 296
         self.replicas = replicas
297 297
         if pg_num:
@@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
301 301
             self.pg_num = min(pg_num, max_pgs)
302 302
         else:
303 303
             self.pg_num = self.get_pgs(self.replicas, percent_data)
304
+        if app_name:
305
+            self.app_name = app_name
306
+        else:
307
+            self.app_name = 'unknown'
304 308
 
305 309
     def create(self):
306 310
         if not pool_exists(self.service, self.name):
@@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
313 317
                 update_pool(client=self.service,
314 318
                             pool=self.name,
315 319
                             settings={'size': str(self.replicas)})
320
+                try:
321
+                    set_app_name_for_pool(client=self.service,
322
+                                          pool=self.name,
323
+                                          name=self.app_name)
324
+                except CalledProcessError:
325
+                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
316 326
             except CalledProcessError:
317 327
                 raise
318 328
 
@@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
320 330
 # Default jerasure erasure coded pool
321 331
 class ErasurePool(Pool):
322 332
     def __init__(self, service, name, erasure_code_profile="default",
323
-                 percent_data=10.0):
333
+                 percent_data=10.0, app_name=None):
324 334
         super(ErasurePool, self).__init__(service=service, name=name)
325 335
         self.erasure_code_profile = erasure_code_profile
326 336
         self.percent_data = percent_data
337
+        if app_name:
338
+            self.app_name = app_name
339
+        else:
340
+            self.app_name = 'unknown'
327 341
 
328 342
     def create(self):
329 343
         if not pool_exists(self.service, self.name):
@@ -355,6 +369,12 @@ class ErasurePool(Pool):
355 369
                    'erasure', self.erasure_code_profile]
356 370
             try:
357 371
                 check_call(cmd)
372
+                try:
373
+                    set_app_name_for_pool(client=self.service,
374
+                                          pool=self.name,
375
+                                          name=self.app_name)
376
+                except CalledProcessError:
377
+                    log('Could not set app name for pool {}'.format(self.name, level=WARNING))
358 378
             except CalledProcessError:
359 379
                 raise
360 380
 
@@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
778 798
     check_call(cmd)
779 799
 
780 800
 
801
+def set_app_name_for_pool(client, pool, name):
802
+    """
803
+    Calls `osd pool application enable` for the specified pool name
804
+
805
+    :param client: Name of the ceph client to use
806
+    :type client: str
807
+    :param pool: Pool to set app name for
808
+    :type pool: str
809
+    :param name: app name for the specified pool
810
+    :type name: str
811
+
812
+    :raises: CalledProcessError if ceph call fails
813
+    """
814
+    if ceph_version() >= '12.0.0':
815
+        cmd = ['ceph', '--id', client, 'osd', 'pool',
816
+               'application', 'enable', pool, name]
817
+        check_call(cmd)
818
+
819
+
781 820
 def create_pool(service, name, replicas=3, pg_num=None):
782 821
     """Create a new RADOS pool."""
783 822
     if pool_exists(service, name):

+ 29
- 0
hooks/charmhelpers/contrib/storage/linux/lvm.py View File

@@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
151 151
     '''
152 152
     cmd = ['lvextend', lv_name, block_device]
153 153
     check_call(cmd)
154
+
155
+
156
+def create_logical_volume(lv_name, volume_group, size=None):
157
+    '''
158
+    Create a new logical volume in an existing volume group
159
+
160
+    :param lv_name: str: name of logical volume to be created.
161
+    :param volume_group: str: Name of volume group to use for the new volume.
162
+    :param size: str: Size of logical volume to create (100% if not supplied)
163
+    :raises subprocess.CalledProcessError: in the event that the lvcreate fails.
164
+    '''
165
+    if size:
166
+        check_call([
167
+            'lvcreate',
168
+            '--yes',
169
+            '-L',
170
+            '{}'.format(size),
171
+            '-n', lv_name, volume_group
172
+        ])
173
+    # create the lv with all the space available, this is needed because the
174
+    # system call is different for LVM
175
+    else:
176
+        check_call([
177
+            'lvcreate',
178
+            '--yes',
179
+            '-l',
180
+            '100%FREE',
181
+            '-n', lv_name, volume_group
182
+        ])

+ 16
- 0
hooks/charmhelpers/contrib/storage/linux/utils.py View File

@@ -67,3 +67,19 @@ def is_device_mounted(device):
67 67
     except Exception:
68 68
         return False
69 69
     return bool(re.search(r'MOUNTPOINT=".+"', out))
70
+
71
+
72
+def mkfs_xfs(device, force=False):
73
+    """Format device with XFS filesystem.
74
+
75
+    By default this should fail if the device already has a filesystem on it.
76
+    :param device: Full path to device to format
77
+    :ptype device: tr
78
+    :param force: Force operation
79
+    :ptype: force: boolean"""
80
+    cmd = ['mkfs.xfs']
81
+    if force:
82
+        cmd.append("-f")
83
+
84
+    cmd += ['-i', 'size=1024', device]
85
+    check_call(cmd)

+ 104
- 34
hooks/charmhelpers/core/hookenv.py View File

@@ -27,6 +27,7 @@ import glob
27 27
 import os
28 28
 import json
29 29
 import yaml
30
+import re
30 31
 import subprocess
31 32
 import sys
32 33
 import errno
@@ -67,7 +68,7 @@ def cached(func):
67 68
     @wraps(func)
68 69
     def wrapper(*args, **kwargs):
69 70
         global cache
70
-        key = str((func, args, kwargs))
71
+        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
71 72
         try:
72 73
             return cache[key]
73 74
         except KeyError:
@@ -289,7 +290,7 @@ class Config(dict):
289 290
         self.implicit_save = True
290 291
         self._prev_dict = None
291 292
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
292
-        if os.path.exists(self.path):
293
+        if os.path.exists(self.path) and os.stat(self.path).st_size:
293 294
             self.load_previous()
294 295
         atexit(self._implicit_save)
295 296
 
@@ -309,7 +310,11 @@ class Config(dict):
309 310
         """
310 311
         self.path = path or self.path
311 312
         with open(self.path) as f:
312
-            self._prev_dict = json.load(f)
313
+            try:
314
+                self._prev_dict = json.load(f)
315
+            except ValueError as e:
316
+                log('Unable to parse previous config data - {}'.format(str(e)),
317
+                    level=ERROR)
313 318
         for k, v in copy.deepcopy(self._prev_dict).items():
314 319
             if k not in self:
315 320
                 self[k] = v
@@ -353,22 +358,40 @@ class Config(dict):
353 358
             self.save()
354 359
 
355 360
 
356
-@cached
361
+_cache_config = None
362
+
363
+
357 364
 def config(scope=None):
358
-    """Juju charm configuration"""
359
-    config_cmd_line = ['config-get']
360
-    if scope is not None:
361
-        config_cmd_line.append(scope)
362
-    else:
363
-        config_cmd_line.append('--all')
364
-    config_cmd_line.append('--format=json')
365
+    """
366
+    Get the juju charm configuration (scope==None) or individual key,
367
+    (scope=str).  The returned value is a Python data structure loaded as
368
+    JSON from the Juju config command.
369
+
370
+    :param scope: If set, return the value for the specified key.
371
+    :type scope: Optional[str]
372
+    :returns: Either the whole config as a Config, or a key from it.
373
+    :rtype: Any
374
+    """
375
+    global _cache_config
376
+    config_cmd_line = ['config-get', '--all', '--format=json']
365 377
     try:
366
-        config_data = json.loads(
367
-            subprocess.check_output(config_cmd_line).decode('UTF-8'))
378
+        # JSON Decode Exception for Python3.5+
379
+        exc_json = json.decoder.JSONDecodeError
380
+    except AttributeError:
381
+        # JSON Decode Exception for Python2.7 through Python3.4
382
+        exc_json = ValueError
383
+    try:
384
+        if _cache_config is None:
385
+            config_data = json.loads(
386
+                subprocess.check_output(config_cmd_line).decode('UTF-8'))
387
+            _cache_config = Config(config_data)
368 388
         if scope is not None:
369
-            return config_data
370
-        return Config(config_data)
371
-    except ValueError:
389
+            return _cache_config.get(scope)
390
+        return _cache_config
391
+    except (exc_json, UnicodeDecodeError) as e:
392
+        log('Unable to parse output from config-get: config_cmd_line="{}" '
393
+            'message="{}"'
394
+            .format(config_cmd_line, str(e)), level=ERROR)
372 395
         return None
373 396
 
374 397
 
@@ -1043,7 +1066,6 @@ def juju_version():
1043 1066
                                    universal_newlines=True).strip()
1044 1067
 
1045 1068
 
1046
-@cached
1047 1069
 def has_juju_version(minimum_version):
1048 1070
     """Return True if the Juju version is at least the provided version"""
1049 1071
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1103,6 +1125,8 @@ def _run_atexit():
1103 1125
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1104 1126
 def network_get_primary_address(binding):
1105 1127
     '''
1128
+    Deprecated since Juju 2.3; use network_get()
1129
+
1106 1130
     Retrieve the primary network address for a named binding
1107 1131
 
1108 1132
     :param binding: string. The name of a relation of extra-binding
@@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
1123 1147
     return response
1124 1148
 
1125 1149
 
1126
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1127 1150
 def network_get(endpoint, relation_id=None):
1128 1151
     """
1129 1152
     Retrieve the network details for a relation endpoint
@@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
1131 1154
     :param endpoint: string. The name of a relation endpoint
1132 1155
     :param relation_id: int. The ID of the relation for the current context.
1133 1156
     :return: dict. The loaded YAML output of the network-get query.
1134
-    :raise: NotImplementedError if run on Juju < 2.1
1157
+    :raise: NotImplementedError if request not supported by the Juju version.
1135 1158
     """
1159
+    if not has_juju_version('2.2'):
1160
+        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
1161
+    if relation_id and not has_juju_version('2.3'):
1162
+        raise NotImplementedError  # 2.3 added the -r option
1163
+
1136 1164
     cmd = ['network-get', endpoint, '--format', 'yaml']
1137 1165
     if relation_id:
1138 1166
         cmd.append('-r')
1139 1167
         cmd.append(relation_id)
1140
-    try:
1141
-        response = subprocess.check_output(
1142
-            cmd,
1143
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
1144
-    except CalledProcessError as e:
1145
-        # Early versions of Juju 2.0.x required the --primary-address argument.
1146
-        # We catch that condition here and raise NotImplementedError since
1147
-        # the requested semantics are not available - the caller can then
1148
-        # use the network_get_primary_address() method instead.
1149
-        if '--primary-address is currently required' in e.output.decode('UTF-8'):
1150
-            raise NotImplementedError
1151
-        raise
1168
+    response = subprocess.check_output(
1169
+        cmd,
1170
+        stderr=subprocess.STDOUT).decode('UTF-8').strip()
1152 1171
     return yaml.safe_load(response)
1153 1172
 
1154 1173
 
@@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
1204 1223
 
1205 1224
 def ingress_address(rid=None, unit=None):
1206 1225
     """
1207
-    Retrieve the ingress-address from a relation when available. Otherwise,
1208
-    return the private-address. This function is to be used on the consuming
1209
-    side of the relation.
1226
+    Retrieve the ingress-address from a relation when available.
1227
+    Otherwise, return the private-address.
1228
+
1229
+    When used on the consuming side of the relation (unit is a remote
1230
+    unit), the ingress-address is the IP address that this unit needs
1231
+    to use to reach the provided service on the remote unit.
1232
+
1233
+    When used on the providing side of the relation (unit == local_unit()),
1234
+    the ingress-address is the IP address that is advertised to remote
1235
+    units on this relation. Remote units need to use this address to
1236
+    reach the local provided service on this unit.
1237
+
1238
+    Note that charms may document some other method to use in
1239
+    preference to the ingress_address(), such as an address provided
1240
+    on a different relation attribute or a service discovery mechanism.
1241
+    This allows charms to redirect inbound connections to their peers
1242
+    or different applications such as load balancers.
1210 1243
 
1211 1244
     Usage:
1212 1245
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
1220 1253
     settings = relation_get(rid=rid, unit=unit)
1221 1254
     return (settings.get('ingress-address') or
1222 1255
             settings.get('private-address'))
1256
+
1257
+
1258
+def egress_subnets(rid=None, unit=None):
1259
+    """
1260
+    Retrieve the egress-subnets from a relation.
1261
+
1262
+    This function is to be used on the providing side of the
1263
+    relation, and provides the ranges of addresses that client
1264
+    connections may come from. The result is uninteresting on
1265
+    the consuming side of a relation (unit == local_unit()).
1266
+
1267
+    Returns a stable list of subnets in CIDR format.
1268
+    eg. ['192.168.1.0/24', '2001::F00F/128']
1269
+
1270
+    If egress-subnets is not available, falls back to using the published
1271
+    ingress-address, or finally private-address.
1272
+
1273
+    :param rid: string relation id
1274
+    :param unit: string unit name
1275
+    :side effect: calls relation_get
1276
+    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
1277
+    """
1278
+    def _to_range(addr):
1279
+        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
1280
+            addr += '/32'
1281
+        elif ':' in addr and '/' not in addr:  # IPv6
1282
+            addr += '/128'
1283
+        return addr
1284
+
1285
+    settings = relation_get(rid=rid, unit=unit)
1286
+    if 'egress-subnets' in settings:
1287
+        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
1288
+    if 'ingress-address' in settings:
1289
+        return [_to_range(settings['ingress-address'])]
1290
+    if 'private-address' in settings:
1291
+        return [_to_range(settings['private-address'])]
1292
+    return []  # Should never happen

+ 9
- 2
hooks/charmhelpers/core/host.py View File

@@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
993 993
     return output
994 994
 
995 995
 
996
-def modulo_distribution(modulo=3, wait=30):
996
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
997 997
     """ Modulo distribution
998 998
 
999 999
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
1015 1015
 
1016 1016
     @param modulo: int The modulo number creates the group distribution
1017 1017
     @param wait: int The constant time wait value
1018
+    @param non_zero_wait: boolean Override unit % modulo == 0,
1019
+                          return modulo * wait. Used to avoid collisions with
1020
+                          leader nodes which are often given priority.
1018 1021
     @return: int Calculated time to wait for unit operation
1019 1022
     """
1020 1023
     unit_number = int(local_unit().split('/')[1])
1021
-    return (unit_number % modulo) * wait
1024
+    calculated_wait_time = (unit_number % modulo) * wait
1025
+    if non_zero_wait and calculated_wait_time == 0:
1026
+        return modulo * wait
1027
+    else:
1028
+        return calculated_wait_time

+ 18
- 7
hooks/charmhelpers/core/services/base.py View File

@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
307 307
     """
308 308
     def __call__(self, manager, service_name, event_name):
309 309
         service = manager.get_service(service_name)
310
-        new_ports = service.get('ports', [])
310
+        # turn this generator into a list,
311
+        # as we'll be going over it multiple times
312
+        new_ports = list(service.get('ports', []))
311 313
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
312 314
         if os.path.exists(port_file):
313 315
             with open(port_file) as fp:
314 316
                 old_ports = fp.read().split(',')
315 317
             for old_port in old_ports:
316
-                if bool(old_port):
317
-                    old_port = int(old_port)
318
-                    if old_port not in new_ports:
319
-                        hookenv.close_port(old_port)
318
+                if bool(old_port) and not self.ports_contains(old_port, new_ports):
319
+                    hookenv.close_port(old_port)
320 320
         with open(port_file, 'w') as fp:
321 321
             fp.write(','.join(str(port) for port in new_ports))
322 322
         for port in new_ports:
323
+            # A port is either a number or 'ICMP'
324
+            protocol = 'TCP'
325
+            if str(port).upper() == 'ICMP':
326
+                protocol = 'ICMP'
323 327
             if event_name == 'start':
324
-                hookenv.open_port(port)
328
+                hookenv.open_port(port, protocol)
325 329
             elif event_name == 'stop':
326
-                hookenv.close_port(port)
330
+                hookenv.close_port(port, protocol)
331
+
332
+    def ports_contains(self, port, ports):
333
+        if not bool(port):
334
+            return False
335
+        if str(port).upper() != 'ICMP':
336
+            port = int(port)
337
+        return port in ports
327 338
 
328 339
 
329 340
 def service_stop(service_name):

+ 11
- 7
hooks/charmhelpers/core/sysctl.py View File

@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
31 31
 def create(sysctl_dict, sysctl_file):
32 32
     """Creates a sysctl.conf file from a YAML associative array
33 33
 
34
-    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
34
+    :param sysctl_dict: a dict or YAML-formatted string of sysctl
35
+                        options eg "{ 'kernel.max_pid': 1337 }"
35 36
     :type sysctl_dict: str
36 37
     :param sysctl_file: path to the sysctl file to be saved
37 38
     :type sysctl_file: str or unicode
38 39
     :returns: None
39 40
     """
40
-    try:
41
-        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
42
-    except yaml.YAMLError:
43
-        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
44
-            level=ERROR)
45
-        return
41
+    if type(sysctl_dict) is not dict:
42
+        try:
43
+            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
44
+        except yaml.YAMLError:
45
+            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
46
+                level=ERROR)
47
+            return
48
+    else:
49
+        sysctl_dict_parsed = sysctl_dict
46 50
 
47 51
     with open(sysctl_file, "w") as fd:
48 52
         for key, value in sysctl_dict_parsed.items():

+ 7
- 2
hooks/charmhelpers/core/unitdata.py View File

@@ -166,6 +166,10 @@ class Storage(object):
166 166
 
167 167
     To support dicts, lists, integer, floats, and booleans values
168 168
     are automatically json encoded/decoded.
169
+
170
+    Note: to facilitate unit testing, ':memory:' can be passed as the
171
+    path parameter which causes sqlite3 to only build the db in memory.
172
+    This should only be used for testing purposes.
169 173
     """
170 174
     def __init__(self, path=None):
171 175
         self.db_path = path
@@ -175,8 +179,9 @@ class Storage(object):
175 179
             else:
176 180
                 self.db_path = os.path.join(
177 181
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178
-        with open(self.db_path, 'a') as f:
179
-            os.fchmod(f.fileno(), 0o600)
182
+        if self.db_path != ':memory:':
183
+            with open(self.db_path, 'a') as f:
184
+                os.fchmod(f.fileno(), 0o600)
180 185
         self.conn = sqlite3.connect('%s' % self.db_path)
181 186
         self.cursor = self.conn.cursor()
182 187
         self.revision = None

+ 1
- 0
hooks/charmhelpers/fetch/ubuntu.py View File

@@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
44 44
     'x86_64': PROPOSED_POCKET,
45 45
     'ppc64le': PROPOSED_PORTS_POCKET,
46 46
     'aarch64': PROPOSED_PORTS_POCKET,
47
+    's390x': PROPOSED_PORTS_POCKET,
47 48
 }
48 49
 CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
49 50
 CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'

+ 100
- 159
tests/basic_deployment.py View File

@@ -67,7 +67,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
67 67
             {'name': 'percona-cluster'},
68 68
             {'name': 'keystone'},
69 69
             {'name': 'rabbitmq-server'},
70
-            {'name': 'ceph', 'units': 3},
70
+            {'name': 'ceph-mon', 'units': 3},
71
+            {'name': 'ceph-osd', 'units': 3},
71 72
             {'name': 'cinder'},
72 73
             {'name': 'cinder-ceph'},
73 74
         ]
@@ -78,8 +79,9 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
78 79
         """Add all of the relations for the services."""
79 80
 
80 81
         relations = {
81
-            'cinder-backup:ceph': 'ceph:client',
82
-            'cinder-ceph:ceph': 'ceph:client',
82
+            'cinder-backup:ceph': 'ceph-mon:client',
83
+            'cinder-ceph:ceph': 'ceph-mon:client',
84
+            'ceph-osd:mon': 'ceph-mon:osd',
83 85
             'cinder:storage-backend': 'cinder-ceph:storage-backend',
84 86
             'cinder:backup-backend': 'cinder-backup:backup-backend',
85 87
             'keystone:shared-db': 'percona-cluster:shared-db',
@@ -108,10 +110,16 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
108 110
             'auth-supported': 'none',
109 111
             'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
110 112
             'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
111
-            'osd-reformat': 'yes',
113
+        }
114
+
115
+        # Include a non-existent device as osd-devices is a whitelist,
116
+        # and this will catch cases where proposals attempt to change that.
117
+        ceph_osd_config = {
118
+            'osd-reformat': True,
112 119
             'ephemeral-unmount': '/mnt',
113
-            'osd-devices': '/dev/vdb /srv/ceph'
120
+            'osd-devices': '/dev/vdb /srv/ceph /dev/test-non-existent'
114 121
         }
122
+
115 123
         cinder_ceph_config = {
116 124
             'ceph-osd-replication-count': '3',
117 125
         }
@@ -119,7 +127,8 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
119 127
             'keystone': keystone_config,
120 128
             'percona-cluster': pxc_config,
121 129
             'cinder': cinder_config,
122
-            'ceph': ceph_config,
130
+            'ceph-mon': ceph_config,
131
+            'ceph-osd': ceph_osd_config,
123 132
             'cinder-ceph': cinder_ceph_config,
124 133
             'cinder-backup': cinder_ceph_config,
125 134
         }
@@ -132,9 +141,12 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
132 141
         self.keystone_sentry = self.d.sentry['keystone'][0]
133 142
         self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
134 143
         self.cinder_sentry = self.d.sentry['cinder'][0]
135
-        self.ceph0_sentry = self.d.sentry['ceph'][0]
136
-        self.ceph1_sentry = self.d.sentry['ceph'][1]
137
-        self.ceph2_sentry = self.d.sentry['ceph'][2]
144
+        self.ceph0_sentry = self.d.sentry['ceph-mon'][0]
145
+        self.ceph1_sentry = self.d.sentry['ceph-mon'][1]
146
+        self.ceph2_sentry = self.d.sentry['ceph-mon'][2]
147
+        self.ceph_osd0_sentry = self.d.sentry['ceph-osd'][0]
148
+        self.ceph_osd1_sentry = self.d.sentry['ceph-osd'][1]
149
+        self.ceph_osd2_sentry = self.d.sentry['ceph-osd'][2]
138 150
         self.cinder_backup_sentry = self.d.sentry['cinder-backup'][0]
139 151
         u.log.debug('openstack release val: {}'.format(
140 152
             self._get_openstack_release()))
@@ -142,147 +154,46 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
142 154
             self._get_openstack_release_string()))
143 155
 
144 156
         # Authenticate admin with keystone
145
-        self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
146
-                                                      user='admin',
147
-                                                      password='openstack',
148
-                                                      tenant='admin')
149
-        # Authenticate admin with cinder endpoint
150
-        self.cinder = u.authenticate_cinder_admin(self.keystone_sentry,
151
-                                                  username='admin',
152
-                                                  password='openstack',
153
-                                                  tenant='admin')
154
-
155
-        # Create a demo tenant/role/user
156
-        self.demo_tenant = 'demoTenant'
157
-        self.demo_role = 'demoRole'
158
-        self.demo_user = 'demoUser'
159
-        if not u.tenant_exists(self.keystone, self.demo_tenant):
160
-            tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
161
-                                                  description='demo tenant',
162
-                                                  enabled=True)
163
-            self.keystone.roles.create(name=self.demo_role)
164
-            self.keystone.users.create(name=self.demo_user,
165
-                                       password='password',
166
-                                       tenant_id=tenant.id,
167
-                                       email='demo@demo.com')
168
-
169
-        # Authenticate demo user with keystone
170
-        self.keystone_demo = u.authenticate_keystone_user(self.keystone,
171
-                                                          self.demo_user,
172
-                                                          'password',
173
-                                                          self.demo_tenant)
174
-
175
-        # Authenticate demo user with nova-api
176
-        self.nova_demo = u.authenticate_nova_user(self.keystone,
177
-                                                  self.demo_user,
178
-                                                  'password',
179
-                                                  self.demo_tenant)
180
-
181
-    def test_100_ceph_processes(self):
182
-        """Verify that the expected service processes are running
183
-        on each ceph unit."""
184
-
185
-        # Process name and quantity of processes to expect on each unit
186
-        ceph_processes = {
187
-            'ceph-mon': 1,
188
-            'ceph-osd': 2
189
-        }
157
+        self.keystone_session, self.keystone = u.get_default_keystone_session(
158
+            self.keystone_sentry,
159
+            openstack_release=self._get_openstack_release())
190 160
 
191
-        # Units with process names and PID quantities expected
192
-        expected_processes = {
193
-            self.ceph0_sentry: ceph_processes,
194
-            self.ceph1_sentry: ceph_processes,
195
-            self.ceph2_sentry: ceph_processes
196
-        }
197
-
198
-        actual_pids = u.get_unit_process_ids(expected_processes)
199
-        ret = u.validate_unit_process_ids(expected_processes, actual_pids)
200
-        if ret:
201
-            amulet.raise_status(amulet.FAIL, msg=ret)
161
+        # Authenticate admin with cinder endpoint
162
+        if self._get_openstack_release() >= self.xenial_pike:
163
+            api_version = 2
164
+        else:
165
+            api_version = 1
166
+        self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
202 167
 
203 168
     def test_102_services(self):
204 169
         """Verify the expected services are running on the service units."""
170
+        if self._get_openstack_release() >= self.xenial_ocata:
171
+            cinder_services = ['apache2',
172
+                               'cinder-scheduler',
173
+                               'cinder-volume']
174
+        else:
175
+            cinder_services = ['cinder-api',
176
+                               'cinder-scheduler',
177
+                               'cinder-volume']
205 178
         services = {
206
-            self.rabbitmq_sentry: ['rabbitmq-server'],
207
-            self.keystone_sentry: ['keystone'],
208
-            self.cinder_sentry: ['cinder-api',
209
-                                 'cinder-scheduler',
210
-                                 'cinder-volume'],
179
+            self.cinder_sentry: cinder_services,
211 180
         }
212 181
 
213
-        if self._get_openstack_release() < self.xenial_mitaka:
214
-            # For upstart systems only.  Ceph services under systemd
215
-            # are checked by process name instead.
216
-            ceph_services = [
217
-                'ceph-mon-all',
218
-                'ceph-mon id=`hostname`',
219
-                'ceph-osd-all',
220
-                'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
221
-                'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
222
-            ]
223
-            services[self.ceph0_sentry] = ceph_services
224
-            services[self.ceph1_sentry] = ceph_services
225
-            services[self.ceph2_sentry] = ceph_services
226
-
227
-        if self._get_openstack_release() >= self.trusty_liberty:
228
-            services[self.keystone_sentry] = ['apache2']
229
-
230
-        if self._get_openstack_release() >= self.xenial_ocata:
231
-            services[self.cinder_sentry].remove('cinder-api')
232
-
233 182
         ret = u.validate_services_by_name(services)
234 183
         if ret:
235 184
             amulet.raise_status(amulet.FAIL, msg=ret)
236 185
 
237
-    def test_110_users(self):
238
-        """Verify expected users."""
239
-        u.log.debug('Checking keystone users...')
240
-
241
-        if self._get_openstack_release() < self.xenial_pike:
242
-            expected = [{
243
-                'name': 'cinder_cinderv2',
244
-                'enabled': True,
245
-                'tenantId': u.not_null,
246
-                'id': u.not_null,
247
-                'email': 'juju@localhost',
248
-            }]
249
-        else:
250
-            expected = [{
251
-                'name': 'cinderv2_cinderv3',
252
-                'enabled': True,
253
-                'tenantId': u.not_null,
254
-                'id': u.not_null,
255
-                'email': 'juju@localhost',
256
-            }]
257
-
258
-        expected.append({
259
-            'name': 'admin',
260
-            'enabled': True,
261
-            'tenantId': u.not_null,
262
-            'id': u.not_null,
263
-            'email': 'juju@localhost',
264
-        })
265
-
266
-        actual = self.keystone.users.list()
267
-        ret = u.validate_user_data(expected, actual)
268
-        if ret:
269
-            amulet.raise_status(amulet.FAIL, msg=ret)
270
-
271 186
     def test_112_service_catalog(self):
272 187
         """Verify that the service catalog endpoint data"""
273 188
         u.log.debug('Checking keystone service catalog...')
274
-        endpoint_vol = {
275
-            'adminURL': u.valid_url,
276
-            'region': 'RegionOne',
277
-            'publicURL': u.valid_url,
278
-            'internalURL': u.valid_url
279
-        }
280
-        endpoint_id = {
281
-            'adminURL': u.valid_url,
282
-            'region': 'RegionOne',
283
-            'publicURL': u.valid_url,
284
-            'internalURL': u.valid_url
285
-        }
189
+        endpoint_vol = {'adminURL': u.valid_url,
190
+                        'region': 'RegionOne',
191
+                        'publicURL': u.valid_url,
192
+                        'internalURL': u.valid_url}
193
+        endpoint_id = {'adminURL': u.valid_url,
194
+                       'region': 'RegionOne',
195
+                       'publicURL': u.valid_url,
196
+                       'internalURL': u.valid_url}
286 197
         if self._get_openstack_release() >= self.trusty_icehouse:
287 198
             endpoint_vol['id'] = u.not_null
288 199
             endpoint_id['id'] = u.not_null
@@ -295,29 +206,49 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
295 206
             # Ocata and prior
296 207
             expected = {'identity': [endpoint_id],
297 208
                         'volume': [endpoint_id]}
298
-
299 209
         actual = self.keystone.service_catalog.get_endpoints()
300 210
 
301
-        ret = u.validate_svc_catalog_endpoint_data(expected, actual)
211
+        ret = u.validate_svc_catalog_endpoint_data(
212
+            expected,
213
+            actual,
214
+            openstack_release=self._get_openstack_release())
302 215
         if ret:
303 216
             amulet.raise_status(amulet.FAIL, msg=ret)
304 217
 
305 218
     def test_114_cinder_endpoint(self):
306 219
         """Verify the cinder endpoint data."""
307
-        u.log.debug('Checking cinder api endpoint data...')
220
+        u.log.debug('Checking cinder endpoint...')
308 221
         endpoints = self.keystone.endpoints.list()
309 222
         admin_port = internal_port = public_port = '8776'
310
-        expected = {
311
-            'id': u.not_null,
312
-            'region': 'RegionOne',
313
-            'adminurl': u.valid_url,
314
-            'internalurl': u.valid_url,
315
-            'publicurl': u.valid_url,
316
-            'service_id': u.not_null
317
-        }
318
-
319
-        ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
320
-                                       public_port, expected)
223
+        if self._get_openstack_release() >= self.xenial_queens:
224
+            expected = {
225
+                'id': u.not_null,
226
+                'region': 'RegionOne',
227
+                'region_id': 'RegionOne',
228
+                'url': u.valid_url,
229
+                'interface': u.not_null,
230
+                'service_id': u.not_null}
231
+            ret = u.validate_v3_endpoint_data(
232
+                endpoints,
233
+                admin_port,
234
+                internal_port,
235
+                public_port,
236
+                expected,
237
+                6)
238
+        else:
239
+            expected = {
240
+                'id': u.not_null,
241
+                'region': 'RegionOne',
242
+                'adminurl': u.valid_url,
243
+                'internalurl': u.valid_url,
244
+                'publicurl': u.valid_url,
245
+                'service_id': u.not_null}
246
+            ret = u.validate_v2_endpoint_data(
247
+                endpoints,
248
+                admin_port,
249
+                internal_port,
250
+                public_port,
251
+                expected)
321 252
         if ret:
322 253
             amulet.raise_status(amulet.FAIL,
323 254
                                 msg='cinder endpoint: {}'.format(ret))
@@ -342,7 +273,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
342 273
         client_unit = self.cinder_backup_sentry
343 274
         broker_req = json.loads(client_unit.relation(
344 275
             'ceph',
345
-            'ceph:client')['broker_req'])
276
+            'ceph-mon:client')['broker_req'])
346 277
         return broker_req
347 278
 
348 279
     def get_broker_response(self):
@@ -371,7 +302,7 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
371 302
         u.log.debug('Checking cinder-backup:ceph to ceph:client '
372 303
                     'relation data...')
373 304
         unit = self.cinder_backup_sentry
374
-        relation = ['ceph', 'ceph:client']
305
+        relation = ['ceph', 'ceph-mon:client']
375 306
 
376 307
         req = {
377 308
             "api-version": 1,
@@ -385,15 +316,15 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
385 316
         }
386 317
         ret = u.validate_relation_data(unit, relation, expected)
387 318
         if ret:
388
-            msg = u.relation_error('cinder-backup ceph', ret)
319
+            msg = u.relation_error('cinder-backup ceph-mon', ret)
389 320
             amulet.raise_status(amulet.FAIL, msg=msg)
390 321
         ret = self.validate_broker_req(unit, relation, req)
391 322
         if ret:
392
-            msg = u.relation_error('cinder-backup ceph', ret)
323
+            msg = u.relation_error('cinder-backup ceph-mon', ret)
393 324
             amulet.raise_status(amulet.FAIL, msg=msg)
394 325
 
395 326
     def test_201_ceph_cinderbackup_ceph_relation(self):
396
-        u.log.debug('Checking ceph:client to cinder-backup:ceph '
327
+        u.log.debug('Checking ceph-mon:client to cinder-backup:ceph '
397 328
                     'relation data...')
398 329
         ceph_unit = self.ceph0_sentry
399 330
         relation = ['client', 'cinder-backup:ceph']
@@ -816,17 +747,27 @@ class CinderBackupBasicDeployment(OpenStackAmuletDeployment):
816 747
 
817 748
         name = "demo-vol"
818 749
         vols = self.cinder.volumes.list()
819
-        cinder_vols = [v for v in vols if v.name == name]
750
+        try:
751
+            cinder_vols = [v for v in vols if v.name == name]
752
+        except AttributeError:
753
+            cinder_vols = [v for v in vols if v.display_name == name]
820 754
         if not cinder_vols:
821 755
             # NOTE(hopem): it appears that at some point cinder-backup stopped
822 756
             # restoring volume metadata properly so revert to default name if
823 757
             # original is not found
824 758
             name = "restore_backup_%s" % (vol_backup.id)
825
-            cinder_vols = [v for v in vols if v.name == name]
759
+            try:
760
+                cinder_vols = [v for v in vols if v.name == name]
761
+            except AttributeError:
762
+                cinder_vols = [v for v in vols if v.display_name == name]
826 763
 
827 764
         if not cinder_vols:
828
-            msg = ("Could not find restore vol '%s' in %s" %
829
-                   (name, [v.name for v in vols]))
765
+            try:
766
+                msg = ("Could not find restore vol '%s' in %s" %
767
+                       (name, [v.name for v in vols]))
768
+            except AttributeError:
769
+                msg = ("Could not find restore vol '%s' in %s" %
770
+                       (name, [v.display_name for v in vols]))
830 771
             u.log.error(msg)
831 772
             amulet.raise_status(amulet.FAIL, msg=msg)
832 773
 

+ 5
- 5
tests/charmhelpers/contrib/openstack/amulet/deployment.py View File

@@ -21,6 +21,9 @@ from collections import OrderedDict
21 21
 from charmhelpers.contrib.amulet.deployment import (
22 22
     AmuletDeployment
23 23
 )
24
+from charmhelpers.contrib.openstack.amulet.utils import (
25
+    OPENSTACK_RELEASES_PAIRS
26
+)
24 27
 
25 28
 DEBUG = logging.DEBUG
26 29
 ERROR = logging.ERROR
@@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
271 274
            release.
272 275
            """
273 276
         # Must be ordered by OpenStack release (not by Ubuntu release):
274
-        (self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
275
-         self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
276
-         self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
277
-         self.xenial_pike, self.artful_pike, self.xenial_queens,
278
-         self.bionic_queens,) = range(13)
277
+        for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
278
+            setattr(self, os_pair, i)
279 279
 
280 280
         releases = {
281 281
             ('trusty', None): self.trusty_icehouse,

+ 200
- 25
tests/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -50,6 +50,13 @@ ERROR = logging.ERROR
50 50
 
51 51
 NOVA_CLIENT_VERSION = "2"
52 52
 
53
+OPENSTACK_RELEASES_PAIRS = [
54
+    'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
55
+    'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
56
+    'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
57
+    'xenial_pike', 'artful_pike', 'xenial_queens',
58
+    'bionic_queens']
59
+
53 60
 
54 61
 class OpenStackAmuletUtils(AmuletUtils):
55 62
     """OpenStack amulet utilities.
@@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
63 70
         super(OpenStackAmuletUtils, self).__init__(log_level)
64 71
 
65 72
     def validate_endpoint_data(self, endpoints, admin_port, internal_port,
66
-                               public_port, expected):
73
+                               public_port, expected, openstack_release=None):
74
+        """Validate endpoint data. Pick the correct validator based on
75
+           OpenStack release. Expected data should be in the v2 format:
76
+           {
77
+               'id': id,
78
+               'region': region,
79
+               'adminurl': adminurl,
80
+               'internalurl': internalurl,
81
+               'publicurl': publicurl,
82
+               'service_id': service_id}
83
+
84
+           """
85
+        validation_function = self.validate_v2_endpoint_data
86
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
87
+        if openstack_release and openstack_release >= xenial_queens:
88
+                validation_function = self.validate_v3_endpoint_data
89
+                expected = {
90
+                    'id': expected['id'],
91
+                    'region': expected['region'],
92
+                    'region_id': 'RegionOne',
93
+                    'url': self.valid_url,
94
+                    'interface': self.not_null,
95
+                    'service_id': expected['service_id']}
96
+        return validation_function(endpoints, admin_port, internal_port,
97
+                                   public_port, expected)
98
+
99
+    def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
100
+                                  public_port, expected):
67 101
         """Validate endpoint data.
68 102
 
69 103
            Validate actual endpoint data vs expected endpoint data. The ports
@@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
141 175
         if len(found) != expected_num_eps:
142 176
             return 'Unexpected number of endpoints found'
143 177
 
144
-    def validate_svc_catalog_endpoint_data(self, expected, actual):
178
+    def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
179
+        """Convert v2 endpoint data into v3.
180
+
181
+           {
182
+               'service_name1': [
183
+                   {
184
+                       'adminURL': adminURL,
185
+                       'id': id,
186
+                       'region': region.
187
+                       'publicURL': publicURL,
188
+                       'internalURL': internalURL
189
+                   }],
190
+               'service_name2': [
191
+                   {
192
+                       'adminURL': adminURL,
193
+                       'id': id,
194
+                       'region': region.
195
+                       'publicURL': publicURL,
196
+                       'internalURL': internalURL
197
+                   }],
198
+           }
199
+          """
200
+        self.log.warn("Endpoint ID and Region ID validation is limited to not "
201
+                      "null checks after v2 to v3 conversion")
202
+        for svc in ep_data.keys():
203
+            assert len(ep_data[svc]) == 1, "Unknown data format"
204
+            svc_ep_data = ep_data[svc][0]
205
+            ep_data[svc] = [
206
+                {
207
+                    'url': svc_ep_data['adminURL'],
208
+                    'interface': 'admin',
209
+                    'region': svc_ep_data['region'],
210
+                    'region_id': self.not_null,
211
+                    'id': self.not_null},
212
+                {
213
+                    'url': svc_ep_data['publicURL'],
214
+                    'interface': 'public',
215
+                    'region': svc_ep_data['region'],
216
+                    'region_id': self.not_null,
217
+                    'id': self.not_null},
218
+                {
219
+                    'url': svc_ep_data['internalURL'],
220
+                    'interface': 'internal',
221
+                    'region': svc_ep_data['region'],
222
+                    'region_id': self.not_null,
223
+                    'id': self.not_null}]
224
+        return ep_data
225
+
226
+    def validate_svc_catalog_endpoint_data(self, expected, actual,
227
+                                           openstack_release=None):
228
+        """Validate service catalog endpoint data. Pick the correct validator
229
+           for the OpenStack version. Expected data should be in the v2 format:
230
+           {
231
+               'service_name1': [
232
+                   {
233
+                       'adminURL': adminURL,
234
+                       'id': id,
235
+                       'region': region.
236
+                       'publicURL': publicURL,
237
+                       'internalURL': internalURL
238
+                   }],
239
+               'service_name2': [
240
+                   {
241
+                       'adminURL': adminURL,
242
+                       'id': id,
243
+                       'region': region.
244
+                       'publicURL': publicURL,
245
+                       'internalURL': internalURL
246
+                   }],
247
+           }
248
+
249
+           """
250
+        validation_function = self.validate_v2_svc_catalog_endpoint_data
251
+        xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
252
+        if openstack_release and openstack_release >= xenial_queens:
253
+            validation_function = self.validate_v3_svc_catalog_endpoint_data
254
+            expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
255
+        return validation_function(expected, actual)
256
+
257
+    def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
145 258
         """Validate service catalog endpoint data.
146 259
 
147 260
            Validate a list of actual service catalog endpoints vs a list of
@@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
328 441
             if rel.get('api_version') != str(api_version):
329 442
                 raise Exception("api_version not propagated through relation"
330 443
                                 " data yet ('{}' != '{}')."
331
-                                "".format(rel['api_version'], api_version))
444
+                                "".format(rel.get('api_version'), api_version))
332 445
 
333 446
     def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
334 447
                                        api_version):
@@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
350 463
         deployment._auto_wait_for_status()
351 464
         self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
352 465
 
353
-    def authenticate_cinder_admin(self, keystone_sentry, username,
354
-                                  password, tenant, api_version=2):
466
+    def authenticate_cinder_admin(self, keystone, api_version=2):
355 467
         """Authenticates admin user with cinder."""
356
-        # NOTE(beisner): cinder python client doesn't accept tokens.
357
-        keystone_ip = keystone_sentry.info['public-address']
358
-        ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
468
+        self.log.debug('Authenticating cinder admin...')
359 469
         _clients = {
360 470
             1: cinder_client.Client,
361 471
             2: cinder_clientv2.Client}
362
-        return _clients[api_version](username, password, tenant, ept)
472
+        return _clients[api_version](session=keystone.session)
363 473
 
364 474
     def authenticate_keystone(self, keystone_ip, username, password,
365 475
                               api_version=False, admin_port=False,
@@ -367,13 +477,36 @@ class OpenStackAmuletUtils(AmuletUtils):
367 477
                               project_domain_name=None, project_name=None):
368 478
         """Authenticate with Keystone"""
369 479
         self.log.debug('Authenticating with keystone...')
370
-        port = 5000
371
-        if admin_port:
372
-            port = 35357
373
-        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
374
-                                        port)
375
-        if not api_version or api_version == 2:
376
-            ep = base_ep + "/v2.0"
480
+        if not api_version:
481
+            api_version = 2
482
+        sess, auth = self.get_keystone_session(
483
+            keystone_ip=keystone_ip,
484
+            username=username,
485
+            password=password,
486
+            api_version=api_version,
487
+            admin_port=admin_port,
488
+            user_domain_name=user_domain_name,
489
+            domain_name=domain_name,
490
+            project_domain_name=project_domain_name,
491
+            project_name=project_name
492
+        )
493
+        if api_version == 2:
494
+            client = keystone_client.Client(session=sess)
495
+        else:
496
+            client = keystone_client_v3.Client(session=sess)
497
+        # This populates the client.service_catalog
498
+        client.auth_ref = auth.get_access(sess)
499
+        return client
500
+
501
+    def get_keystone_session(self, keystone_ip, username, password,
502
+                             api_version=False, admin_port=False,
503
+                             user_domain_name=None, domain_name=None,
504
+                             project_domain_name=None, project_name=None):
505
+        """Return a keystone session object"""
506
+        ep = self.get_keystone_endpoint(keystone_ip,
507
+                                        api_version=api_version,
508
+                                        admin_port=admin_port)
509
+        if api_version == 2:
377 510
             auth = v2.Password(
378 511
                 username=username,
379 512
                 password=password,
@@ -381,12 +514,7 @@ class OpenStackAmuletUtils(AmuletUtils):
381 514
                 auth_url=ep
382 515
             )
383 516
             sess = keystone_session.Session(auth=auth)
384
-            client = keystone_client.Client(session=sess)
385
-            # This populates the client.service_catalog
386
-            client.auth_ref = auth.get_access(sess)
387
-            return client
388 517
         else:
389
-            ep = base_ep + "/v3"
390 518
             auth = v3.Password(
391 519
                 user_domain_name=user_domain_name,
392 520
                 username=username,
@@ -397,10 +525,57 @@ class OpenStackAmuletUtils(AmuletUtils):
397 525
                 auth_url=ep
398 526
             )
399 527
             sess = keystone_session.Session(auth=auth)
400
-            client = keystone_client_v3.Client(session=sess)
401
-            # This populates the client.service_catalog
402
-            client.auth_ref = auth.get_access(sess)
403
-            return client
528
+        return (sess, auth)
529
+
530
+    def get_keystone_endpoint(self, keystone_ip, api_version=None,
531
+                              admin_port=False):
532
+        """Return keystone endpoint"""
533
+        port = 5000
534
+        if admin_port:
535
+            port = 35357
536
+        base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
537
+                                        port)
538
+        if api_version == 2:
539
+            ep = base_ep + "/v2.0"
540
+        else:
541
+            ep = base_ep + "/v3"
542
+        return ep
543
+
544
+    def get_default_keystone_session(self, keystone_sentry,
545
+                                     openstack_release=None):
546
+        """Return a keystone session object and client object assuming standard
547
+           default settings
548
+
549
+           Example call in amulet tests:
550
+               self.keystone_session, self.keystone = u.get_default_keystone_session(
551
+                   self.keystone_sentry,
552
+                   openstack_release=self._get_openstack_release())
553
+
554
+           The session can then be used to auth other clients:
555
+               neutronclient.Client(session=session)
556
+               aodh_client.Client(session=session)
557
+               eyc
558
+        """
559
+        self.log.debug('Authenticating keystone admin...')
560
+        api_version = 2
561
+        client_class = keystone_client.Client
562
+        # 11 => xenial_queens
563
+        if openstack_release and openstack_release >= 11:
564
+            api_version = 3
565
+            client_class = keystone_client_v3.Client
566
+        keystone_ip = keystone_sentry.info['public-address']
567
+        session, auth = self.get_keystone_session(
568
+            keystone_ip,
569
+            api_version=api_version,
570
+            username='admin',
571
+            password='openstack',
572
+            project_name='admin',
573
+            user_domain_name='admin_domain',
574
+            project_domain_name='admin_domain')
575
+        client = client_class(session=session)
576
+        # This populates the client.service_catalog
577
+        client.auth_ref = auth.get_access(session)
578
+        return session, client
404 579
 
405 580
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
406 581
                                     tenant=None, api_version=None,

+ 104
- 34
tests/charmhelpers/core/hookenv.py View File

@@ -27,6 +27,7 @@ import glob
27 27
 import os
28 28
 import json
29 29
 import yaml
30
+import re
30 31
 import subprocess
31 32
 import sys
32 33
 import errno
@@ -67,7 +68,7 @@ def cached(func):
67 68
     @wraps(func)
68 69
     def wrapper(*args, **kwargs):
69 70
         global cache
70
-        key = str((func, args, kwargs))
71
+        key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
71 72
         try:
72 73
             return cache[key]
73 74
         except KeyError:
@@ -289,7 +290,7 @@ class Config(dict):
289 290
         self.implicit_save = True
290 291
         self._prev_dict = None
291 292
         self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
292
-        if os.path.exists(self.path):
293
+        if os.path.exists(self.path) and os.stat(self.path).st_size:
293 294
             self.load_previous()
294 295
         atexit(self._implicit_save)
295 296
 
@@ -309,7 +310,11 @@ class Config(dict):
309 310
         """
310 311
         self.path = path or self.path
311 312
         with open(self.path) as f:
312
-            self._prev_dict = json.load(f)
313
+            try:
314
+                self._prev_dict = json.load(f)
315
+            except ValueError as e:
316
+                log('Unable to parse previous config data - {}'.format(str(e)),
317
+                    level=ERROR)
313 318
         for k, v in copy.deepcopy(self._prev_dict).items():
314 319
             if k not in self:
315 320
                 self[k] = v
@@ -353,22 +358,40 @@ class Config(dict):
353 358
             self.save()
354 359
 
355 360
 
356
-@cached
361
+_cache_config = None
362
+
363
+
357 364
 def config(scope=None):
358
-    """Juju charm configuration"""
359
-    config_cmd_line = ['config-get']
360
-    if scope is not None:
361
-        config_cmd_line.append(scope)
362
-    else:
363
-        config_cmd_line.append('--all')
364
-    config_cmd_line.append('--format=json')
365
+    """
366
+    Get the juju charm configuration (scope==None) or individual key,
367
+    (scope=str).  The returned value is a Python data structure loaded as
368
+    JSON from the Juju config command.
369
+
370
+    :param scope: If set, return the value for the specified key.
371
+    :type scope: Optional[str]
372
+    :returns: Either the whole config as a Config, or a key from it.
373
+    :rtype: Any
374
+    """
375
+    global _cache_config
376
+    config_cmd_line = ['config-get', '--all', '--format=json']
365 377
     try:
366
-        config_data = json.loads(
367
-            subprocess.check_output(config_cmd_line).decode('UTF-8'))
378
+        # JSON Decode Exception for Python3.5+
379
+        exc_json = json.decoder.JSONDecodeError
380
+    except AttributeError:
381
+        # JSON Decode Exception for Python2.7 through Python3.4
382
+        exc_json = ValueError
383
+    try:
384
+        if _cache_config is None:
385
+            config_data = json.loads(
386
+                subprocess.check_output(config_cmd_line).decode('UTF-8'))
387
+            _cache_config = Config(config_data)
368 388
         if scope is not None:
369
-            return config_data
370
-        return Config(config_data)
371
-    except ValueError:
389
+            return _cache_config.get(scope)
390
+        return _cache_config
391
+    except (exc_json, UnicodeDecodeError) as e:
392
+        log('Unable to parse output from config-get: config_cmd_line="{}" '
393
+            'message="{}"'
394
+            .format(config_cmd_line, str(e)), level=ERROR)
372 395
         return None
373 396
 
374 397
 
@@ -1043,7 +1066,6 @@ def juju_version():
1043 1066
                                    universal_newlines=True).strip()
1044 1067
 
1045 1068
 
1046
-@cached
1047 1069
 def has_juju_version(minimum_version):
1048 1070
     """Return True if the Juju version is at least the provided version"""
1049 1071
     return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@@ -1103,6 +1125,8 @@ def _run_atexit():
1103 1125
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1104 1126
 def network_get_primary_address(binding):
1105 1127
     '''
1128
+    Deprecated since Juju 2.3; use network_get()
1129
+
1106 1130
     Retrieve the primary network address for a named binding
1107 1131
 
1108 1132
     :param binding: string. The name of a relation of extra-binding
@@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
1123 1147
     return response
1124 1148
 
1125 1149
 
1126
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
1127 1150
 def network_get(endpoint, relation_id=None):
1128 1151
     """
1129 1152
     Retrieve the network details for a relation endpoint
@@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
1131 1154
     :param endpoint: string. The name of a relation endpoint
1132 1155
     :param relation_id: int. The ID of the relation for the current context.
1133 1156
     :return: dict. The loaded YAML output of the network-get query.
1134
-    :raise: NotImplementedError if run on Juju < 2.1
1157
+    :raise: NotImplementedError if request not supported by the Juju version.
1135 1158
     """
1159
+    if not has_juju_version('2.2'):
1160
+        raise NotImplementedError(juju_version())  # earlier versions require --primary-address
1161
+    if relation_id and not has_juju_version('2.3'):
1162
+        raise NotImplementedError  # 2.3 added the -r option
1163
+
1136 1164
     cmd = ['network-get', endpoint, '--format', 'yaml']
1137 1165
     if relation_id:
1138 1166
         cmd.append('-r')
1139 1167
         cmd.append(relation_id)
1140
-    try:
1141
-        response = subprocess.check_output(
1142
-            cmd,
1143
-            stderr=subprocess.STDOUT).decode('UTF-8').strip()
1144
-    except CalledProcessError as e:
1145
-        # Early versions of Juju 2.0.x required the --primary-address argument.
1146
-        # We catch that condition here and raise NotImplementedError since
1147
-        # the requested semantics are not available - the caller can then
1148
-        # use the network_get_primary_address() method instead.
1149
-        if '--primary-address is currently required' in e.output.decode('UTF-8'):
1150
-            raise NotImplementedError
1151
-        raise
1168
+    response = subprocess.check_output(
1169
+        cmd,
1170
+        stderr=subprocess.STDOUT).decode('UTF-8').strip()
1152 1171
     return yaml.safe_load(response)
1153 1172
 
1154 1173
 
@@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
1204 1223
 
1205 1224
 def ingress_address(rid=None, unit=None):
1206 1225
     """
1207
-    Retrieve the ingress-address from a relation when available. Otherwise,
1208
-    return the private-address. This function is to be used on the consuming
1209
-    side of the relation.
1226
+    Retrieve the ingress-address from a relation when available.
1227
+    Otherwise, return the private-address.
1228
+
1229
+    When used on the consuming side of the relation (unit is a remote
1230
+    unit), the ingress-address is the IP address that this unit needs
1231
+    to use to reach the provided service on the remote unit.
1232
+
1233
+    When used on the providing side of the relation (unit == local_unit()),
1234
+    the ingress-address is the IP address that is advertised to remote
1235
+    units on this relation. Remote units need to use this address to
1236
+    reach the local provided service on this unit.
1237
+
1238
+    Note that charms may document some other method to use in
1239
+    preference to the ingress_address(), such as an address provided
1240
+    on a different relation attribute or a service discovery mechanism.
1241
+    This allows charms to redirect inbound connections to their peers
1242
+    or different applications such as load balancers.
1210 1243
 
1211 1244
     Usage:
1212 1245
     addresses = [ingress_address(rid=u.rid, unit=u.unit)
@@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
1220 1253
     settings = relation_get(rid=rid, unit=unit)
1221 1254
     return (settings.get('ingress-address') or
1222 1255
             settings.get('private-address'))
1256
+
1257
+
1258
+def egress_subnets(rid=None, unit=None):
1259
+    """
1260
+    Retrieve the egress-subnets from a relation.
1261
+
1262
+    This function is to be used on the providing side of the
1263
+    relation, and provides the ranges of addresses that client
1264
+    connections may come from. The result is uninteresting on
1265
+    the consuming side of a relation (unit == local_unit()).
1266
+
1267
+    Returns a stable list of subnets in CIDR format.
1268
+    eg. ['192.168.1.0/24', '2001::F00F/128']
1269
+
1270
+    If egress-subnets is not available, falls back to using the published
1271
+    ingress-address, or finally private-address.
1272
+
1273
+    :param rid: string relation id
1274
+    :param unit: string unit name
1275
+    :side effect: calls relation_get
1276
+    :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
1277
+    """
1278
+    def _to_range(addr):
1279
+        if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
1280
+            addr += '/32'
1281
+        elif ':' in addr and '/' not in addr:  # IPv6
1282
+            addr += '/128'
1283
+        return addr
1284
+
1285
+    settings = relation_get(rid=rid, unit=unit)
1286
+    if 'egress-subnets' in settings:
1287
+        return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
1288
+    if 'ingress-address' in settings:
1289
+        return [_to_range(settings['ingress-address'])]
1290
+    if 'private-address' in settings:
1291
+        return [_to_range(settings['private-address'])]
1292
+    return []  # Should never happen

+ 9
- 2
tests/charmhelpers/core/host.py View File

@@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
993 993
     return output
994 994
 
995 995
 
996
-def modulo_distribution(modulo=3, wait=30):
996
+def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
997 997
     """ Modulo distribution
998 998
 
999 999
     This helper uses the unit number, a modulo value and a constant wait time
@@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
1015 1015
 
1016 1016
     @param modulo: int The modulo number creates the group distribution
1017 1017
     @param wait: int The constant time wait value
1018
+    @param non_zero_wait: boolean Override unit % modulo == 0,
1019
+                          return modulo * wait. Used to avoid collisions with
1020
+                          leader nodes which are often given priority.
1018 1021
     @return: int Calculated time to wait for unit operation
1019 1022
     """
1020 1023
     unit_number = int(local_unit().split('/')[1])
1021
-    return (unit_number % modulo) * wait
1024
+    calculated_wait_time = (unit_number % modulo) * wait
1025
+    if non_zero_wait and calculated_wait_time == 0:
1026
+        return modulo * wait
1027
+    else:
1028
+        return calculated_wait_time

+ 18
- 7
tests/charmhelpers/core/services/base.py View File

@@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
307 307
     """
308 308
     def __call__(self, manager, service_name, event_name):
309 309
         service = manager.get_service(service_name)
310
-        new_ports = service.get('ports', [])
310
+        # turn this generator into a list,
311
+        # as we'll be going over it multiple times
312
+        new_ports = list(service.get('ports', []))
311 313
         port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
312 314
         if os.path.exists(port_file):
313 315
             with open(port_file) as fp:
314 316
                 old_ports = fp.read().split(',')
315 317
             for old_port in old_ports:
316
-                if bool(old_port):
317
-                    old_port = int(old_port)
318
-                    if old_port not in new_ports:
319
-                        hookenv.close_port(old_port)
318
+                if bool(old_port) and not self.ports_contains(old_port, new_ports):
319
+                    hookenv.close_port(old_port)
320 320
         with open(port_file, 'w') as fp:
321 321
             fp.write(','.join(str(port) for port in new_ports))
322 322
         for port in new_ports:
323
+            # A port is either a number or 'ICMP'
324
+            protocol = 'TCP'
325
+            if str(port).upper() == 'ICMP':
326
+                protocol = 'ICMP'
323 327
             if event_name == 'start':
324
-                hookenv.open_port(port)
328
+                hookenv.open_port(port, protocol)
325 329
             elif event_name == 'stop':
326
-                hookenv.close_port(port)
330
+                hookenv.close_port(port, protocol)
331
+
332
+    def ports_contains(self, port, ports):
333
+        if not bool(port):
334
+            return False
335
+        if str(port).upper() != 'ICMP':
336
+            port = int(port)
337
+        return port in ports
327 338
 
328 339
 
329 340
 def service_stop(service_name):

+ 11
- 7
tests/charmhelpers/core/sysctl.py View File

@@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
31 31
 def create(sysctl_dict, sysctl_file):
32 32
     """Creates a sysctl.conf file from a YAML associative array
33 33
 
34
-    :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
34
+    :param sysctl_dict: a dict or YAML-formatted string of sysctl
35
+                        options eg "{ 'kernel.max_pid': 1337 }"
35 36
     :type sysctl_dict: str
36 37
     :param sysctl_file: path to the sysctl file to be saved
37 38
     :type sysctl_file: str or unicode
38 39
     :returns: None
39 40
     """
40
-    try:
41
-        sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
42
-    except yaml.YAMLError:
43
-        log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
44
-            level=ERROR)
45
-        return
41
+    if type(sysctl_dict) is not dict:
42
+        try:
43
+            sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
44
+        except yaml.YAMLError:
45
+            log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
46
+                level=ERROR)
47
+            return
48
+    else:
49
+        sysctl_dict_parsed = sysctl_dict
46 50
 
47 51
     with open(sysctl_file, "w") as fd:
48 52
         for key, value in sysctl_dict_parsed.items():

+ 7
- 2
tests/charmhelpers/core/unitdata.py View File

@@ -166,6 +166,10 @@ class Storage(object):
166 166
 
167 167
     To support dicts, lists, integer, floats, and booleans values
168 168
     are automatically json encoded/decoded.
169
+
170
+    Note: to facilitate unit testing, ':memory:' can be passed as the
171
+    path parameter which causes sqlite3 to only build the db in memory.
172
+    This should only be used for testing purposes.
169 173
     """
170 174
     def __init__(self, path=None):
171 175
         self.db_path = path
@@ -175,8 +179,9 @@ class Storage(object):
175 179
             else:
176 180
                 self.db_path = os.path.join(
177 181
                     os.environ.get('CHARM_DIR', ''), '.unit-state.db')
178
-        with open(self.db_path, 'a') as f:
179
-            os.fchmod(f.fileno(), 0o600)
182
+        if self.db_path != ':memory:':
183
+            with open(self.db_path, 'a') as f:
184
+                os.fchmod(f.fileno(), 0o600)
180 185
         self.conn = sqlite3.connect('%s' % self.db_path)
181 186
         self.cursor = self.conn.cursor()
182 187
         self.revision = None

tests/dev-basic-bionic-queens → tests/gate-basic-bionic-queens View File


+ 25
- 0
tests/gate-basic-xenial-queens View File

@@ -0,0 +1,25 @@
1
+#!/usr/bin/env python
2
+#
3
+# Copyright 2016 Canonical Ltd
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License");
6
+# you may not use this file except in compliance with the License.
7
+# You may obtain a copy of the License at
8
+#
9
+#  http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS,
13
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+
17
+"""Amulet tests on a basic cinder backup deployment on xenial-queens."""
18
+
19
+from basic_deployment import CinderBackupBasicDeployment
20
+
21
+if __name__ == '__main__':
22
+    deployment = CinderBackupBasicDeployment(series='xenial',
23
+                                             openstack='cloud:xenial-queens',
24
+                                             source='cloud:xenial-updates/queens')
25
+    deployment.run_tests()

+ 1
- 1
tox.ini View File

@@ -60,7 +60,7 @@ basepython = python2.7
60 60
 deps = -r{toxinidir}/requirements.txt
61 61
        -r{toxinidir}/test-requirements.txt
62 62
 commands =
63
-    bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
63
+    bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
64 64
 
65 65
 [testenv:func27-dfs]
66 66
 # Charm Functional Test

Loading…
Cancel
Save