Browse Source

Liberty changes

Junaid Ali 3 years ago
parent
commit
1dee6f9daa
31 changed files with 3260 additions and 517 deletions
  1. 4
    2
      hooks/charmhelpers/contrib/amulet/deployment.py
  2. 367
    82
      hooks/charmhelpers/contrib/amulet/utils.py
  3. 52
    14
      hooks/charmhelpers/contrib/charmsupport/nrpe.py
  4. 0
    0
      hooks/charmhelpers/contrib/mellanox/__init__.py
  5. 151
    0
      hooks/charmhelpers/contrib/mellanox/infiniband.py
  6. 32
    24
      hooks/charmhelpers/contrib/network/ip.py
  7. 5
    6
      hooks/charmhelpers/contrib/network/ufw.py
  8. 133
    14
      hooks/charmhelpers/contrib/openstack/amulet/deployment.py
  9. 381
    0
      hooks/charmhelpers/contrib/openstack/amulet/utils.py
  10. 208
    75
      hooks/charmhelpers/contrib/openstack/context.py
  11. 55
    20
      hooks/charmhelpers/contrib/openstack/neutron.py
  12. 30
    2
      hooks/charmhelpers/contrib/openstack/templating.py
  13. 393
    55
      hooks/charmhelpers/contrib/openstack/utils.py
  14. 5
    4
      hooks/charmhelpers/contrib/peerstorage/__init__.py
  15. 13
    4
      hooks/charmhelpers/contrib/python/packages.py
  16. 645
    50
      hooks/charmhelpers/contrib/storage/linux/ceph.py
  17. 10
    0
      hooks/charmhelpers/contrib/storage/linux/loopback.py
  18. 3
    2
      hooks/charmhelpers/contrib/storage/linux/utils.py
  19. 4
    3
      hooks/charmhelpers/contrib/templating/jinja.py
  20. 188
    12
      hooks/charmhelpers/core/hookenv.py
  21. 240
    61
      hooks/charmhelpers/core/host.py
  22. 71
    0
      hooks/charmhelpers/core/hugepage.py
  23. 68
    0
      hooks/charmhelpers/core/kernel.py
  24. 30
    5
      hooks/charmhelpers/core/services/helpers.py
  25. 30
    0
      hooks/charmhelpers/core/strutils.py
  26. 21
    8
      hooks/charmhelpers/core/templating.py
  27. 61
    17
      hooks/charmhelpers/core/unitdata.py
  28. 18
    2
      hooks/charmhelpers/fetch/__init__.py
  29. 1
    1
      hooks/charmhelpers/fetch/archiveurl.py
  30. 22
    32
      hooks/charmhelpers/fetch/bzrurl.py
  31. 19
    22
      hooks/charmhelpers/fetch/giturl.py

+ 4
- 2
hooks/charmhelpers/contrib/amulet/deployment.py View File

@@ -51,7 +51,8 @@ class AmuletDeployment(object):
51 51
         if 'units' not in this_service:
52 52
             this_service['units'] = 1
53 53
 
54
-        self.d.add(this_service['name'], units=this_service['units'])
54
+        self.d.add(this_service['name'], units=this_service['units'],
55
+                   constraints=this_service.get('constraints'))
55 56
 
56 57
         for svc in other_services:
57 58
             if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
64 65
             if 'units' not in svc:
65 66
                 svc['units'] = 1
66 67
 
67
-            self.d.add(svc['name'], charm=branch_location, units=svc['units'])
68
+            self.d.add(svc['name'], charm=branch_location, units=svc['units'],
69
+                       constraints=svc.get('constraints'))
68 70
 
69 71
     def _add_relations(self, relations):
70 72
         """Add all of the relations for the services."""

+ 367
- 82
hooks/charmhelpers/contrib/amulet/utils.py View File

@@ -14,17 +14,25 @@
14 14
 # You should have received a copy of the GNU Lesser General Public License
15 15
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16 16
 
17
-import amulet
18
-import ConfigParser
19
-import distro_info
20 17
 import io
18
+import json
21 19
 import logging
22 20
 import os
23 21
 import re
24
-import six
22
+import socket
23
+import subprocess
25 24
 import sys
26 25
 import time
27
-import urlparse
26
+import uuid
27
+
28
+import amulet
29
+import distro_info
30
+import six
31
+from six.moves import configparser
32
+if six.PY3:
33
+    from urllib import parse as urlparse
34
+else:
35
+    import urlparse
28 36
 
29 37
 
30 38
 class AmuletUtils(object):
@@ -108,7 +116,7 @@ class AmuletUtils(object):
108 116
         # /!\ DEPRECATION WARNING (beisner):
109 117
         # New and existing tests should be rewritten to use
110 118
         # validate_services_by_name() as it is aware of init systems.
111
-        self.log.warn('/!\\ DEPRECATION WARNING:  use '
119
+        self.log.warn('DEPRECATION WARNING:  use '
112 120
                       'validate_services_by_name instead of validate_services '
113 121
                       'due to init system differences.')
114 122
 
@@ -142,19 +150,23 @@ class AmuletUtils(object):
142 150
 
143 151
             for service_name in services_list:
144 152
                 if (self.ubuntu_releases.index(release) >= systemd_switch or
145
-                        service_name == "rabbitmq-server"):
146
-                    # init is systemd
153
+                        service_name in ['rabbitmq-server', 'apache2']):
154
+                    # init is systemd (or regular sysv)
147 155
                     cmd = 'sudo service {} status'.format(service_name)
156
+                    output, code = sentry_unit.run(cmd)
157
+                    service_running = code == 0
148 158
                 elif self.ubuntu_releases.index(release) < systemd_switch:
149 159
                     # init is upstart
150 160
                     cmd = 'sudo status {}'.format(service_name)
161
+                    output, code = sentry_unit.run(cmd)
162
+                    service_running = code == 0 and "start/running" in output
151 163
 
152
-                output, code = sentry_unit.run(cmd)
153 164
                 self.log.debug('{} `{}` returned '
154 165
                                '{}'.format(sentry_unit.info['unit_name'],
155 166
                                            cmd, code))
156
-                if code != 0:
157
-                    return "command `{}` returned {}".format(cmd, str(code))
167
+                if not service_running:
168
+                    return u"command `{}` returned {} {}".format(
169
+                        cmd, output, str(code))
158 170
         return None
159 171
 
160 172
     def _get_config(self, unit, filename):
@@ -164,7 +176,7 @@ class AmuletUtils(object):
164 176
         # NOTE(beisner):  by default, ConfigParser does not handle options
165 177
         # with no value, such as the flags used in the mysql my.cnf file.
166 178
         # https://bugs.python.org/issue7005
167
-        config = ConfigParser.ConfigParser(allow_no_value=True)
179
+        config = configparser.ConfigParser(allow_no_value=True)
168 180
         config.readfp(io.StringIO(file_contents))
169 181
         return config
170 182
 
@@ -259,33 +271,52 @@ class AmuletUtils(object):
259 271
         """Get last modification time of directory."""
260 272
         return sentry_unit.directory_stat(directory)['mtime']
261 273
 
262
-    def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
263
-        """Get process' start time.
274
+    def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
275
+        """Get start time of a process based on the last modification time
276
+           of the /proc/pid directory.
264 277
 
265
-           Determine start time of the process based on the last modification
266
-           time of the /proc/pid directory. If pgrep_full is True, the process
267
-           name is matched against the full command line.
268
-           """
269
-        if pgrep_full:
270
-            cmd = 'pgrep -o -f {}'.format(service)
271
-        else:
272
-            cmd = 'pgrep -o {}'.format(service)
273
-        cmd = cmd + '  | grep  -v pgrep || exit 0'
274
-        cmd_out = sentry_unit.run(cmd)
275
-        self.log.debug('CMDout: ' + str(cmd_out))
276
-        if cmd_out[0]:
277
-            self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
278
-            proc_dir = '/proc/{}'.format(cmd_out[0].strip())
279
-            return self._get_dir_mtime(sentry_unit, proc_dir)
278
+        :sentry_unit:  The sentry unit to check for the service on
279
+        :service:  service name to look for in process table
280
+        :pgrep_full:  [Deprecated] Use full command line search mode with pgrep
281
+        :returns:  epoch time of service process start
282
+        :param commands:  list of bash commands
283
+        :param sentry_units:  list of sentry unit pointers
284
+        :returns:  None if successful; Failure message otherwise
285
+        """
286
+        if pgrep_full is not None:
287
+            # /!\ DEPRECATION WARNING (beisner):
288
+            # No longer implemented, as pidof is now used instead of pgrep.
289
+            # https://bugs.launchpad.net/charm-helpers/+bug/1474030
290
+            self.log.warn('DEPRECATION WARNING:  pgrep_full bool is no '
291
+                          'longer implemented re: lp 1474030.')
292
+
293
+        pid_list = self.get_process_id_list(sentry_unit, service)
294
+        pid = pid_list[0]
295
+        proc_dir = '/proc/{}'.format(pid)
296
+        self.log.debug('Pid for {} on {}: {}'.format(
297
+            service, sentry_unit.info['unit_name'], pid))
298
+
299
+        return self._get_dir_mtime(sentry_unit, proc_dir)
280 300
 
281 301
     def service_restarted(self, sentry_unit, service, filename,
282
-                          pgrep_full=False, sleep_time=20):
302
+                          pgrep_full=None, sleep_time=20):
283 303
         """Check if service was restarted.
284 304
 
285 305
            Compare a service's start time vs a file's last modification time
286 306
            (such as a config file for that service) to determine if the service
287 307
            has been restarted.
288 308
            """
309
+        # /!\ DEPRECATION WARNING (beisner):
310
+        # This method is prone to races in that no before-time is known.
311
+        # Use validate_service_config_changed instead.
312
+
313
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
314
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
315
+        # deprecation WARNS.  lp1474030
316
+        self.log.warn('DEPRECATION WARNING:  use '
317
+                      'validate_service_config_changed instead of '
318
+                      'service_restarted due to known races.')
319
+
289 320
         time.sleep(sleep_time)
290 321
         if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
291 322
                 self._get_file_mtime(sentry_unit, filename)):
@@ -294,78 +325,122 @@ class AmuletUtils(object):
294 325
             return False
295 326
 
296 327
     def service_restarted_since(self, sentry_unit, mtime, service,
297
-                                pgrep_full=False, sleep_time=20,
298
-                                retry_count=2):
328
+                                pgrep_full=None, sleep_time=20,
329
+                                retry_count=30, retry_sleep_time=10):
299 330
         """Check if service was been started after a given time.
300 331
 
301 332
         Args:
302 333
           sentry_unit (sentry): The sentry unit to check for the service on
303 334
           mtime (float): The epoch time to check against
304 335
           service (string): service name to look for in process table
305
-          pgrep_full (boolean): Use full command line search mode with pgrep
306
-          sleep_time (int): Seconds to sleep before looking for process
307
-          retry_count (int): If service is not found, how many times to retry
336
+          pgrep_full: [Deprecated] Use full command line search mode with pgrep
337
+          sleep_time (int): Initial sleep time (s) before looking for file
338
+          retry_sleep_time (int): Time (s) to sleep between retries
339
+          retry_count (int): If file is not found, how many times to retry
308 340
 
309 341
         Returns:
310 342
           bool: True if service found and its start time it newer than mtime,
311 343
                 False if service is older than mtime or if service was
312 344
                 not found.
313 345
         """
314
-        self.log.debug('Checking %s restarted since %s' % (service, mtime))
346
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
347
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
348
+        # deprecation WARNS.  lp1474030
349
+
350
+        unit_name = sentry_unit.info['unit_name']
351
+        self.log.debug('Checking that %s service restarted since %s on '
352
+                       '%s' % (service, mtime, unit_name))
315 353
         time.sleep(sleep_time)
316
-        proc_start_time = self._get_proc_start_time(sentry_unit, service,
317
-                                                    pgrep_full)
318
-        while retry_count > 0 and not proc_start_time:
319
-            self.log.debug('No pid file found for service %s, will retry %i '
320
-                           'more times' % (service, retry_count))
321
-            time.sleep(30)
322
-            proc_start_time = self._get_proc_start_time(sentry_unit, service,
323
-                                                        pgrep_full)
324
-            retry_count = retry_count - 1
354
+        proc_start_time = None
355
+        tries = 0
356
+        while tries <= retry_count and not proc_start_time:
357
+            try:
358
+                proc_start_time = self._get_proc_start_time(sentry_unit,
359
+                                                            service,
360
+                                                            pgrep_full)
361
+                self.log.debug('Attempt {} to get {} proc start time on {} '
362
+                               'OK'.format(tries, service, unit_name))
363
+            except IOError as e:
364
+                # NOTE(beisner) - race avoidance, proc may not exist yet.
365
+                # https://bugs.launchpad.net/charm-helpers/+bug/1474030
366
+                self.log.debug('Attempt {} to get {} proc start time on {} '
367
+                               'failed\n{}'.format(tries, service,
368
+                                                   unit_name, e))
369
+                time.sleep(retry_sleep_time)
370
+                tries += 1
325 371
 
326 372
         if not proc_start_time:
327 373
             self.log.warn('No proc start time found, assuming service did '
328 374
                           'not start')
329 375
             return False
330 376
         if proc_start_time >= mtime:
331
-            self.log.debug('proc start time is newer than provided mtime'
332
-                           '(%s >= %s)' % (proc_start_time, mtime))
377
+            self.log.debug('Proc start time is newer than provided mtime'
378
+                           '(%s >= %s) on %s (OK)' % (proc_start_time,
379
+                                                      mtime, unit_name))
333 380
             return True
334 381
         else:
335
-            self.log.warn('proc start time (%s) is older than provided mtime '
336
-                          '(%s), service did not restart' % (proc_start_time,
337
-                                                             mtime))
382
+            self.log.warn('Proc start time (%s) is older than provided mtime '
383
+                          '(%s) on %s, service did not '
384
+                          'restart' % (proc_start_time, mtime, unit_name))
338 385
             return False
339 386
 
340 387
     def config_updated_since(self, sentry_unit, filename, mtime,
341
-                             sleep_time=20):
388
+                             sleep_time=20, retry_count=30,
389
+                             retry_sleep_time=10):
342 390
         """Check if file was modified after a given time.
343 391
 
344 392
         Args:
345 393
           sentry_unit (sentry): The sentry unit to check the file mtime on
346 394
           filename (string): The file to check mtime of
347 395
           mtime (float): The epoch time to check against
348
-          sleep_time (int): Seconds to sleep before looking for process
396
+          sleep_time (int): Initial sleep time (s) before looking for file
397
+          retry_sleep_time (int): Time (s) to sleep between retries
398
+          retry_count (int): If file is not found, how many times to retry
349 399
 
350 400
         Returns:
351 401
           bool: True if file was modified more recently than mtime, False if
352
-                file was modified before mtime,
402
+                file was modified before mtime, or if file not found.
353 403
         """
354
-        self.log.debug('Checking %s updated since %s' % (filename, mtime))
404
+        unit_name = sentry_unit.info['unit_name']
405
+        self.log.debug('Checking that %s updated since %s on '
406
+                       '%s' % (filename, mtime, unit_name))
355 407
         time.sleep(sleep_time)
356
-        file_mtime = self._get_file_mtime(sentry_unit, filename)
408
+        file_mtime = None
409
+        tries = 0
410
+        while tries <= retry_count and not file_mtime:
411
+            try:
412
+                file_mtime = self._get_file_mtime(sentry_unit, filename)
413
+                self.log.debug('Attempt {} to get {} file mtime on {} '
414
+                               'OK'.format(tries, filename, unit_name))
415
+            except IOError as e:
416
+                # NOTE(beisner) - race avoidance, file may not exist yet.
417
+                # https://bugs.launchpad.net/charm-helpers/+bug/1474030
418
+                self.log.debug('Attempt {} to get {} file mtime on {} '
419
+                               'failed\n{}'.format(tries, filename,
420
+                                                   unit_name, e))
421
+                time.sleep(retry_sleep_time)
422
+                tries += 1
423
+
424
+        if not file_mtime:
425
+            self.log.warn('Could not determine file mtime, assuming '
426
+                          'file does not exist')
427
+            return False
428
+
357 429
         if file_mtime >= mtime:
358 430
             self.log.debug('File mtime is newer than provided mtime '
359
-                           '(%s >= %s)' % (file_mtime, mtime))
431
+                           '(%s >= %s) on %s (OK)' % (file_mtime,
432
+                                                      mtime, unit_name))
360 433
             return True
361 434
         else:
362
-            self.log.warn('File mtime %s is older than provided mtime %s'
363
-                          % (file_mtime, mtime))
435
+            self.log.warn('File mtime is older than provided mtime'
436
+                          '(%s < on %s) on %s' % (file_mtime,
437
+                                                  mtime, unit_name))
364 438
             return False
365 439
 
366 440
     def validate_service_config_changed(self, sentry_unit, mtime, service,
367
-                                        filename, pgrep_full=False,
368
-                                        sleep_time=20, retry_count=2):
441
+                                        filename, pgrep_full=None,
442
+                                        sleep_time=20, retry_count=30,
443
+                                        retry_sleep_time=10):
369 444
         """Check service and file were updated after mtime
370 445
 
371 446
         Args:
@@ -373,9 +448,10 @@ class AmuletUtils(object):
373 448
           mtime (float): The epoch time to check against
374 449
           service (string): service name to look for in process table
375 450
           filename (string): The file to check mtime of
376
-          pgrep_full (boolean): Use full command line search mode with pgrep
377
-          sleep_time (int): Seconds to sleep before looking for process
451
+          pgrep_full: [Deprecated] Use full command line search mode with pgrep
452
+          sleep_time (int): Initial sleep in seconds to pass to test helpers
378 453
           retry_count (int): If service is not found, how many times to retry
454
+          retry_sleep_time (int): Time in seconds to wait between retries
379 455
 
380 456
         Typical Usage:
381 457
             u = OpenStackAmuletUtils(ERROR)
@@ -392,15 +468,27 @@ class AmuletUtils(object):
392 468
                 mtime, False if service is older than mtime or if service was
393 469
                 not found or if filename was modified before mtime.
394 470
         """
395
-        self.log.debug('Checking %s restarted since %s' % (service, mtime))
396
-        time.sleep(sleep_time)
397
-        service_restart = self.service_restarted_since(sentry_unit, mtime,
398
-                                                       service,
399
-                                                       pgrep_full=pgrep_full,
400
-                                                       sleep_time=0,
401
-                                                       retry_count=retry_count)
402
-        config_update = self.config_updated_since(sentry_unit, filename, mtime,
403
-                                                  sleep_time=0)
471
+
472
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
473
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
474
+        # deprecation WARNS.  lp1474030
475
+
476
+        service_restart = self.service_restarted_since(
477
+            sentry_unit, mtime,
478
+            service,
479
+            pgrep_full=pgrep_full,
480
+            sleep_time=sleep_time,
481
+            retry_count=retry_count,
482
+            retry_sleep_time=retry_sleep_time)
483
+
484
+        config_update = self.config_updated_since(
485
+            sentry_unit,
486
+            filename,
487
+            mtime,
488
+            sleep_time=sleep_time,
489
+            retry_count=retry_count,
490
+            retry_sleep_time=retry_sleep_time)
491
+
404 492
         return service_restart and config_update
405 493
 
406 494
     def get_sentry_time(self, sentry_unit):
@@ -418,7 +506,6 @@ class AmuletUtils(object):
418 506
         """Return a list of all Ubuntu releases in order of release."""
419 507
         _d = distro_info.UbuntuDistroInfo()
420 508
         _release_list = _d.all
421
-        self.log.debug('Ubuntu release list: {}'.format(_release_list))
422 509
         return _release_list
423 510
 
424 511
     def file_to_url(self, file_rel_path):
@@ -450,15 +537,20 @@ class AmuletUtils(object):
450 537
                                         cmd, code, output))
451 538
         return None
452 539
 
453
-    def get_process_id_list(self, sentry_unit, process_name):
540
+    def get_process_id_list(self, sentry_unit, process_name,
541
+                            expect_success=True):
454 542
         """Get a list of process ID(s) from a single sentry juju unit
455 543
         for a single process name.
456 544
 
457
-        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
545
+        :param sentry_unit: Amulet sentry instance (juju unit)
458 546
         :param process_name: Process name
547
+        :param expect_success: If False, expect the PID to be missing,
548
+            raise if it is present.
459 549
         :returns: List of process IDs
460 550
         """
461
-        cmd = 'pidof {}'.format(process_name)
551
+        cmd = 'pidof -x {}'.format(process_name)
552
+        if not expect_success:
553
+            cmd += " || exit 0 && exit 1"
462 554
         output, code = sentry_unit.run(cmd)
463 555
         if code != 0:
464 556
             msg = ('{} `{}` returned {} '
@@ -467,14 +559,23 @@ class AmuletUtils(object):
467 559
             amulet.raise_status(amulet.FAIL, msg=msg)
468 560
         return str(output).split()
469 561
 
470
-    def get_unit_process_ids(self, unit_processes):
562
+    def get_unit_process_ids(self, unit_processes, expect_success=True):
471 563
         """Construct a dict containing unit sentries, process names, and
472
-        process IDs."""
564
+        process IDs.
565
+
566
+        :param unit_processes: A dictionary of Amulet sentry instance
567
+            to list of process names.
568
+        :param expect_success: if False expect the processes to not be
569
+            running, raise if they are.
570
+        :returns: Dictionary of Amulet sentry instance to dictionary
571
+            of process names to PIDs.
572
+        """
473 573
         pid_dict = {}
474
-        for sentry_unit, process_list in unit_processes.iteritems():
574
+        for sentry_unit, process_list in six.iteritems(unit_processes):
475 575
             pid_dict[sentry_unit] = {}
476 576
             for process in process_list:
477
-                pids = self.get_process_id_list(sentry_unit, process)
577
+                pids = self.get_process_id_list(
578
+                    sentry_unit, process, expect_success=expect_success)
478 579
                 pid_dict[sentry_unit].update({process: pids})
479 580
         return pid_dict
480 581
 
@@ -488,7 +589,7 @@ class AmuletUtils(object):
488 589
             return ('Unit count mismatch.  expected, actual: {}, '
489 590
                     '{} '.format(len(expected), len(actual)))
490 591
 
491
-        for (e_sentry, e_proc_names) in expected.iteritems():
592
+        for (e_sentry, e_proc_names) in six.iteritems(expected):
492 593
             e_sentry_name = e_sentry.info['unit_name']
493 594
             if e_sentry in actual.keys():
494 595
                 a_proc_names = actual[e_sentry]
@@ -507,11 +608,23 @@ class AmuletUtils(object):
507 608
                             '{}'.format(e_proc_name, a_proc_name))
508 609
 
509 610
                 a_pids_length = len(a_pids)
510
-                if e_pids_length != a_pids_length:
511
-                    return ('PID count mismatch. {} ({}) expected, actual: '
611
+                fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
512 612
                             '{}, {} ({})'.format(e_sentry_name, e_proc_name,
513 613
                                                  e_pids_length, a_pids_length,
514 614
                                                  a_pids))
615
+
616
+                # If expected is not bool, ensure PID quantities match
617
+                if not isinstance(e_pids_length, bool) and \
618
+                        a_pids_length != e_pids_length:
619
+                    return fail_msg
620
+                # If expected is bool True, ensure 1 or more PIDs exist
621
+                elif isinstance(e_pids_length, bool) and \
622
+                        e_pids_length is True and a_pids_length < 1:
623
+                    return fail_msg
624
+                # If expected is bool False, ensure 0 PIDs exist
625
+                elif isinstance(e_pids_length, bool) and \
626
+                        e_pids_length is False and a_pids_length != 0:
627
+                    return fail_msg
515 628
                 else:
516 629
                     self.log.debug('PID check OK: {} {} {}: '
517 630
                                    '{}'.format(e_sentry_name, e_proc_name,
@@ -531,3 +644,175 @@ class AmuletUtils(object):
531 644
             return 'Dicts within list are not identical'
532 645
 
533 646
         return None
647
+
648
+    def validate_sectionless_conf(self, file_contents, expected):
649
+        """A crude conf parser.  Useful to inspect configuration files which
650
+        do not have section headers (as would be necessary in order to use
651
+        the configparser).  Such as openstack-dashboard or rabbitmq confs."""
652
+        for line in file_contents.split('\n'):
653
+            if '=' in line:
654
+                args = line.split('=')
655
+                if len(args) <= 1:
656
+                    continue
657
+                key = args[0].strip()
658
+                value = args[1].strip()
659
+                if key in expected.keys():
660
+                    if expected[key] != value:
661
+                        msg = ('Config mismatch.  Expected, actual:  {}, '
662
+                               '{}'.format(expected[key], value))
663
+                        amulet.raise_status(amulet.FAIL, msg=msg)
664
+
665
+    def get_unit_hostnames(self, units):
666
+        """Return a dict of juju unit names to hostnames."""
667
+        host_names = {}
668
+        for unit in units:
669
+            host_names[unit.info['unit_name']] = \
670
+                str(unit.file_contents('/etc/hostname').strip())
671
+        self.log.debug('Unit host names: {}'.format(host_names))
672
+        return host_names
673
+
674
+    def run_cmd_unit(self, sentry_unit, cmd):
675
+        """Run a command on a unit, return the output and exit code."""
676
+        output, code = sentry_unit.run(cmd)
677
+        if code == 0:
678
+            self.log.debug('{} `{}` command returned {} '
679
+                           '(OK)'.format(sentry_unit.info['unit_name'],
680
+                                         cmd, code))
681
+        else:
682
+            msg = ('{} `{}` command returned {} '
683
+                   '{}'.format(sentry_unit.info['unit_name'],
684
+                               cmd, code, output))
685
+            amulet.raise_status(amulet.FAIL, msg=msg)
686
+        return str(output), code
687
+
688
+    def file_exists_on_unit(self, sentry_unit, file_name):
689
+        """Check if a file exists on a unit."""
690
+        try:
691
+            sentry_unit.file_stat(file_name)
692
+            return True
693
+        except IOError:
694
+            return False
695
+        except Exception as e:
696
+            msg = 'Error checking file {}: {}'.format(file_name, e)
697
+            amulet.raise_status(amulet.FAIL, msg=msg)
698
+
699
+    def file_contents_safe(self, sentry_unit, file_name,
700
+                           max_wait=60, fatal=False):
701
+        """Get file contents from a sentry unit.  Wrap amulet file_contents
702
+        with retry logic to address races where a file checks as existing,
703
+        but no longer exists by the time file_contents is called.
704
+        Return None if file not found. Optionally raise if fatal is True."""
705
+        unit_name = sentry_unit.info['unit_name']
706
+        file_contents = False
707
+        tries = 0
708
+        while not file_contents and tries < (max_wait / 4):
709
+            try:
710
+                file_contents = sentry_unit.file_contents(file_name)
711
+            except IOError:
712
+                self.log.debug('Attempt {} to open file {} from {} '
713
+                               'failed'.format(tries, file_name,
714
+                                               unit_name))
715
+                time.sleep(4)
716
+                tries += 1
717
+
718
+        if file_contents:
719
+            return file_contents
720
+        elif not fatal:
721
+            return None
722
+        elif fatal:
723
+            msg = 'Failed to get file contents from unit.'
724
+            amulet.raise_status(amulet.FAIL, msg)
725
+
726
+    def port_knock_tcp(self, host="localhost", port=22, timeout=15):
727
+        """Open a TCP socket to check for a listening sevice on a host.
728
+
729
+        :param host: host name or IP address, default to localhost
730
+        :param port: TCP port number, default to 22
731
+        :param timeout: Connect timeout, default to 15 seconds
732
+        :returns: True if successful, False if connect failed
733
+        """
734
+
735
+        # Resolve host name if possible
736
+        try:
737
+            connect_host = socket.gethostbyname(host)
738
+            host_human = "{} ({})".format(connect_host, host)
739
+        except socket.error as e:
740
+            self.log.warn('Unable to resolve address: '
741
+                          '{} ({}) Trying anyway!'.format(host, e))
742
+            connect_host = host
743
+            host_human = connect_host
744
+
745
+        # Attempt socket connection
746
+        try:
747
+            knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
748
+            knock.settimeout(timeout)
749
+            knock.connect((connect_host, port))
750
+            knock.close()
751
+            self.log.debug('Socket connect OK for host '
752
+                           '{} on port {}.'.format(host_human, port))
753
+            return True
754
+        except socket.error as e:
755
+            self.log.debug('Socket connect FAIL for'
756
+                           ' {} port {} ({})'.format(host_human, port, e))
757
+            return False
758
+
759
+    def port_knock_units(self, sentry_units, port=22,
760
+                         timeout=15, expect_success=True):
761
+        """Open a TCP socket to check for a listening sevice on each
762
+        listed juju unit.
763
+
764
+        :param sentry_units: list of sentry unit pointers
765
+        :param port: TCP port number, default to 22
766
+        :param timeout: Connect timeout, default to 15 seconds
767
+        :expect_success: True by default, set False to invert logic
768
+        :returns: None if successful, Failure message otherwise
769
+        """
770
+        for unit in sentry_units:
771
+            host = unit.info['public-address']
772
+            connected = self.port_knock_tcp(host, port, timeout)
773
+            if not connected and expect_success:
774
+                return 'Socket connect failed.'
775
+            elif connected and not expect_success:
776
+                return 'Socket connected unexpectedly.'
777
+
778
+    def get_uuid_epoch_stamp(self):
779
+        """Returns a stamp string based on uuid4 and epoch time.  Useful in
780
+        generating test messages which need to be unique-ish."""
781
+        return '[{}-{}]'.format(uuid.uuid4(), time.time())
782
+
783
+# amulet juju action helpers:
784
+    def run_action(self, unit_sentry, action,
785
+                   _check_output=subprocess.check_output):
786
+        """Run the named action on a given unit sentry.
787
+
788
+        _check_output parameter is used for dependency injection.
789
+
790
+        @return action_id.
791
+        """
792
+        unit_id = unit_sentry.info["unit_name"]
793
+        command = ["juju", "action", "do", "--format=json", unit_id, action]
794
+        self.log.info("Running command: %s\n" % " ".join(command))
795
+        output = _check_output(command, universal_newlines=True)
796
+        data = json.loads(output)
797
+        action_id = data[u'Action queued with id']
798
+        return action_id
799
+
800
+    def wait_on_action(self, action_id, _check_output=subprocess.check_output):
801
+        """Wait for a given action, returning if it completed or not.
802
+
803
+        _check_output parameter is used for dependency injection.
804
+        """
805
+        command = ["juju", "action", "fetch", "--format=json", "--wait=0",
806
+                   action_id]
807
+        output = _check_output(command, universal_newlines=True)
808
+        data = json.loads(output)
809
+        return data.get(u"status") == "completed"
810
+
811
+    def status_get(self, unit):
812
+        """Return the current service status of this unit."""
813
+        raw_status, return_code = unit.run(
814
+            "status-get --format=json --include-data")
815
+        if return_code != 0:
816
+            return ("unknown", "")
817
+        status = json.loads(raw_status)
818
+        return (status["status"], status["message"])

+ 52
- 14
hooks/charmhelpers/contrib/charmsupport/nrpe.py View File

@@ -148,6 +148,13 @@ define service {{
148 148
         self.description = description
149 149
         self.check_cmd = self._locate_cmd(check_cmd)
150 150
 
151
+    def _get_check_filename(self):
152
+        return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
153
+
154
+    def _get_service_filename(self, hostname):
155
+        return os.path.join(NRPE.nagios_exportdir,
156
+                            'service__{}_{}.cfg'.format(hostname, self.command))
157
+
151 158
     def _locate_cmd(self, check_cmd):
152 159
         search_path = (
153 160
             '/usr/lib/nagios/plugins',
@@ -163,9 +170,21 @@ define service {{
163 170
         log('Check command not found: {}'.format(parts[0]))
164 171
         return ''
165 172
 
173
+    def _remove_service_files(self):
174
+        if not os.path.exists(NRPE.nagios_exportdir):
175
+            return
176
+        for f in os.listdir(NRPE.nagios_exportdir):
177
+            if f.endswith('_{}.cfg'.format(self.command)):
178
+                os.remove(os.path.join(NRPE.nagios_exportdir, f))
179
+
180
+    def remove(self, hostname):
181
+        nrpe_check_file = self._get_check_filename()
182
+        if os.path.exists(nrpe_check_file):
183
+            os.remove(nrpe_check_file)
184
+        self._remove_service_files()
185
+
166 186
     def write(self, nagios_context, hostname, nagios_servicegroups):
167
-        nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
168
-            self.command)
187
+        nrpe_check_file = self._get_check_filename()
169 188
         with open(nrpe_check_file, 'w') as nrpe_check_config:
170 189
             nrpe_check_config.write("# check {}\n".format(self.shortname))
171 190
             nrpe_check_config.write("command[{}]={}\n".format(
@@ -180,9 +199,7 @@ define service {{
180 199
 
181 200
     def write_service_config(self, nagios_context, hostname,
182 201
                              nagios_servicegroups):
183
-        for f in os.listdir(NRPE.nagios_exportdir):
184
-            if re.search('.*{}.cfg'.format(self.command), f):
185
-                os.remove(os.path.join(NRPE.nagios_exportdir, f))
202
+        self._remove_service_files()
186 203
 
187 204
         templ_vars = {
188 205
             'nagios_hostname': hostname,
@@ -192,8 +209,7 @@ define service {{
192 209
             'command': self.command,
193 210
         }
194 211
         nrpe_service_text = Check.service_template.format(**templ_vars)
195
-        nrpe_service_file = '{}/service__{}_{}.cfg'.format(
196
-            NRPE.nagios_exportdir, hostname, self.command)
212
+        nrpe_service_file = self._get_service_filename(hostname)
197 213
         with open(nrpe_service_file, 'w') as nrpe_service_config:
198 214
             nrpe_service_config.write(str(nrpe_service_text))
199 215
 
@@ -218,12 +234,32 @@ class NRPE(object):
218 234
         if hostname:
219 235
             self.hostname = hostname
220 236
         else:
221
-            self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
237
+            nagios_hostname = get_nagios_hostname()
238
+            if nagios_hostname:
239
+                self.hostname = nagios_hostname
240
+            else:
241
+                self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222 242
         self.checks = []
223 243
 
224 244
     def add_check(self, *args, **kwargs):
225 245
         self.checks.append(Check(*args, **kwargs))
226 246
 
247
+    def remove_check(self, *args, **kwargs):
248
+        if kwargs.get('shortname') is None:
249
+            raise ValueError('shortname of check must be specified')
250
+
251
+        # Use sensible defaults if they're not specified - these are not
252
+        # actually used during removal, but they're required for constructing
253
+        # the Check object; check_disk is chosen because it's part of the
254
+        # nagios-plugins-basic package.
255
+        if kwargs.get('check_cmd') is None:
256
+            kwargs['check_cmd'] = 'check_disk'
257
+        if kwargs.get('description') is None:
258
+            kwargs['description'] = ''
259
+
260
+        check = Check(*args, **kwargs)
261
+        check.remove(self.hostname)
262
+
227 263
     def write(self):
228 264
         try:
229 265
             nagios_uid = pwd.getpwnam('nagios').pw_uid
@@ -260,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'):
260 296
     :param str relation_name: Name of relation nrpe sub joined to
261 297
     """
262 298
     for rel in relations_of_type(relation_name):
263
-        if 'nagios_hostname' in rel:
299
+        if 'nagios_host_context' in rel:
264 300
             return rel['nagios_host_context']
265 301
 
266 302
 
@@ -301,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name):
301 337
         upstart_init = '/etc/init/%s.conf' % svc
302 338
         sysv_init = '/etc/init.d/%s' % svc
303 339
         if os.path.exists(upstart_init):
304
-            nrpe.add_check(
305
-                shortname=svc,
306
-                description='process check {%s}' % unit_name,
307
-                check_cmd='check_upstart_job %s' % svc
308
-            )
340
+            # Don't add a check for these services from neutron-gateway
341
+            if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
342
+                nrpe.add_check(
343
+                    shortname=svc,
344
+                    description='process check {%s}' % unit_name,
345
+                    check_cmd='check_upstart_job %s' % svc
346
+                )
309 347
         elif os.path.exists(sysv_init):
310 348
             cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311 349
             cron_file = ('*/5 * * * * root '

+ 0
- 0
hooks/charmhelpers/contrib/mellanox/__init__.py View File


+ 151
- 0
hooks/charmhelpers/contrib/mellanox/infiniband.py View File

@@ -0,0 +1,151 @@
1
+#!/usr/bin/env python
2
+# -*- coding: utf-8 -*-
3
+
4
+# Copyright 2014-2015 Canonical Limited.
5
+#
6
+# This file is part of charm-helpers.
7
+#
8
+# charm-helpers is free software: you can redistribute it and/or modify
9
+# it under the terms of the GNU Lesser General Public License version 3 as
10
+# published by the Free Software Foundation.
11
+#
12
+# charm-helpers is distributed in the hope that it will be useful,
13
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
+# GNU Lesser General Public License for more details.
16
+#
17
+# You should have received a copy of the GNU Lesser General Public License
18
+# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
19
+
20
+
21
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
22
+
23
+from charmhelpers.fetch import (
24
+    apt_install,
25
+    apt_update,
26
+)
27
+
28
+from charmhelpers.core.hookenv import (
29
+    log,
30
+    INFO,
31
+)
32
+
33
+try:
34
+    from netifaces import interfaces as network_interfaces
35
+except ImportError:
36
+    apt_install('python-netifaces')
37
+    from netifaces import interfaces as network_interfaces
38
+
39
+import os
40
+import re
41
+import subprocess
42
+
43
+from charmhelpers.core.kernel import modprobe
44
+
45
+REQUIRED_MODULES = (
46
+    "mlx4_ib",
47
+    "mlx4_en",
48
+    "mlx4_core",
49
+    "ib_ipath",
50
+    "ib_mthca",
51
+    "ib_srpt",
52
+    "ib_srp",
53
+    "ib_ucm",
54
+    "ib_isert",
55
+    "ib_iser",
56
+    "ib_ipoib",
57
+    "ib_cm",
58
+    "ib_uverbs"
59
+    "ib_umad",
60
+    "ib_sa",
61
+    "ib_mad",
62
+    "ib_core",
63
+    "ib_addr",
64
+    "rdma_ucm",
65
+)
66
+
67
+REQUIRED_PACKAGES = (
68
+    "ibutils",
69
+    "infiniband-diags",
70
+    "ibverbs-utils",
71
+)
72
+
73
+IPOIB_DRIVERS = (
74
+    "ib_ipoib",
75
+)
76
+
77
+ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version"
78
+
79
+
80
+class DeviceInfo(object):
81
+    pass
82
+
83
+
84
+def install_packages():
85
+    apt_update()
86
+    apt_install(REQUIRED_PACKAGES, fatal=True)
87
+
88
+
89
+def load_modules():
90
+    for module in REQUIRED_MODULES:
91
+        modprobe(module, persist=True)
92
+
93
+
94
+def is_enabled():
95
+    """Check if infiniband is loaded on the system"""
96
+    return os.path.exists(ABI_VERSION_FILE)
97
+
98
+
99
+def stat():
100
+    """Return full output of ibstat"""
101
+    return subprocess.check_output(["ibstat"])
102
+
103
+
104
+def devices():
105
+    """Returns a list of IB enabled devices"""
106
+    return subprocess.check_output(['ibstat', '-l']).splitlines()
107
+
108
+
109
+def device_info(device):
110
+    """Returns a DeviceInfo object with the current device settings"""
111
+
112
+    status = subprocess.check_output([
113
+        'ibstat', device, '-s']).splitlines()
114
+
115
+    regexes = {
116
+        "CA type: (.*)": "device_type",
117
+        "Number of ports: (.*)": "num_ports",
118
+        "Firmware version: (.*)": "fw_ver",
119
+        "Hardware version: (.*)": "hw_ver",
120
+        "Node GUID: (.*)": "node_guid",
121
+        "System image GUID: (.*)": "sys_guid",
122
+    }
123
+
124
+    device = DeviceInfo()
125
+
126
+    for line in status:
127
+        for expression, key in regexes.items():
128
+            matches = re.search(expression, line)
129
+            if matches:
130
+                setattr(device, key, matches.group(1))
131
+
132
+    return device
133
+
134
+
135
+def ipoib_interfaces():
136
+    """Return a list of IPOIB capable ethernet interfaces"""
137
+    interfaces = []
138
+
139
+    for interface in network_interfaces():
140
+        try:
141
+            driver = re.search('^driver: (.+)$', subprocess.check_output([
142
+                'ethtool', '-i',
143
+                interface]), re.M).group(1)
144
+
145
+            if driver in IPOIB_DRIVERS:
146
+                interfaces.append(interface)
147
+        except:
148
+            log("Skipping interface %s" % interface, level=INFO)
149
+            continue
150
+
151
+    return interfaces

+ 32
- 24
hooks/charmhelpers/contrib/network/ip.py View File

@@ -23,7 +23,7 @@ import socket
23 23
 from functools import partial
24 24
 
25 25
 from charmhelpers.core.hookenv import unit_get
26
-from charmhelpers.fetch import apt_install
26
+from charmhelpers.fetch import apt_install, apt_update
27 27
 from charmhelpers.core.hookenv import (
28 28
     log,
29 29
     WARNING,
@@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
32 32
 try:
33 33
     import netifaces
34 34
 except ImportError:
35
-    apt_install('python-netifaces')
35
+    apt_update(fatal=True)
36
+    apt_install('python-netifaces', fatal=True)
36 37
     import netifaces
37 38
 
38 39
 try:
39 40
     import netaddr
40 41
 except ImportError:
41
-    apt_install('python-netaddr')
42
+    apt_update(fatal=True)
43
+    apt_install('python-netaddr', fatal=True)
42 44
     import netaddr
43 45
 
44 46
 
@@ -51,7 +53,7 @@ def _validate_cidr(network):
51 53
 
52 54
 
53 55
 def no_ip_found_error_out(network):
54
-    errmsg = ("No IP address found in network: %s" % network)
56
+    errmsg = ("No IP address found in network(s): %s" % network)
55 57
     raise ValueError(errmsg)
56 58
 
57 59
 
@@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
59 61
     """Get an IPv4 or IPv6 address within the network from the host.
60 62
 
61 63
     :param network (str): CIDR presentation format. For example,
62
-        '192.168.1.0/24'.
64
+        '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
63 65
     :param fallback (str): If no address is found, return fallback.
64 66
     :param fatal (boolean): If no address is found, fallback is not
65 67
         set and fatal is True then exit(1).
@@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
73 75
         else:
74 76
             return None
75 77
 
76
-    _validate_cidr(network)
77
-    network = netaddr.IPNetwork(network)
78
-    for iface in netifaces.interfaces():
79
-        addresses = netifaces.ifaddresses(iface)
80
-        if network.version == 4 and netifaces.AF_INET in addresses:
81
-            addr = addresses[netifaces.AF_INET][0]['addr']
82
-            netmask = addresses[netifaces.AF_INET][0]['netmask']
83
-            cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
84
-            if cidr in network:
85
-                return str(cidr.ip)
86
-
87
-        if network.version == 6 and netifaces.AF_INET6 in addresses:
88
-            for addr in addresses[netifaces.AF_INET6]:
89
-                if not addr['addr'].startswith('fe80'):
90
-                    cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
91
-                                                        addr['netmask']))
92
-                    if cidr in network:
93
-                        return str(cidr.ip)
78
+    networks = network.split() or [network]
79
+    for network in networks:
80
+        _validate_cidr(network)
81
+        network = netaddr.IPNetwork(network)
82
+        for iface in netifaces.interfaces():
83
+            addresses = netifaces.ifaddresses(iface)
84
+            if network.version == 4 and netifaces.AF_INET in addresses:
85
+                addr = addresses[netifaces.AF_INET][0]['addr']
86
+                netmask = addresses[netifaces.AF_INET][0]['netmask']
87
+                cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
88
+                if cidr in network:
89
+                    return str(cidr.ip)
90
+
91
+            if network.version == 6 and netifaces.AF_INET6 in addresses:
92
+                for addr in addresses[netifaces.AF_INET6]:
93
+                    if not addr['addr'].startswith('fe80'):
94
+                        cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
95
+                                                            addr['netmask']))
96
+                        if cidr in network:
97
+                            return str(cidr.ip)
94 98
 
95 99
     if fallback is not None:
96 100
         return fallback
@@ -435,8 +439,12 @@ def get_hostname(address, fqdn=True):
435 439
 
436 440
         rev = dns.reversename.from_address(address)
437 441
         result = ns_query(rev)
442
+
438 443
         if not result:
439
-            return None
444
+            try:
445
+                result = socket.gethostbyaddr(address)[0]
446
+            except:
447
+                return None
440 448
     else:
441 449
         result = address
442 450
 

+ 5
- 6
hooks/charmhelpers/contrib/network/ufw.py View File

@@ -40,7 +40,9 @@ Examples:
40 40
 import re
41 41
 import os
42 42
 import subprocess
43
+
43 44
 from charmhelpers.core import hookenv
45
+from charmhelpers.core.kernel import modprobe, is_module_loaded
44 46
 
45 47
 __author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
46 48
 
@@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
82 84
     # do we have IPv6 in the machine?
83 85
     if os.path.isdir('/proc/sys/net/ipv6'):
84 86
         # is ip6tables kernel module loaded?
85
-        lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
86
-        matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
87
-        if len(matches) == 0:
87
+        if not is_module_loaded('ip6_tables'):
88 88
             # ip6tables support isn't complete, let's try to load it
89 89
             try:
90
-                subprocess.check_output(['modprobe', 'ip6_tables'],
91
-                                        universal_newlines=True)
92
-                # great, we could load the module
90
+                modprobe('ip6_tables')
91
+                # great, we can load the module
93 92
                 return True
94 93
             except subprocess.CalledProcessError as ex:
95 94
                 hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,

+ 133
- 14
hooks/charmhelpers/contrib/openstack/amulet/deployment.py View File

@@ -14,12 +14,18 @@
14 14
 # You should have received a copy of the GNU Lesser General Public License
15 15
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16 16
 
17
+import logging
18
+import re
19
+import sys
17 20
 import six
18 21
 from collections import OrderedDict
19 22
 from charmhelpers.contrib.amulet.deployment import (
20 23
     AmuletDeployment
21 24
 )
22 25
 
26
+DEBUG = logging.DEBUG
27
+ERROR = logging.ERROR
28
+
23 29
 
24 30
 class OpenStackAmuletDeployment(AmuletDeployment):
25 31
     """OpenStack amulet deployment.
@@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
28 34
        that is specifically for use by OpenStack charms.
29 35
        """
30 36
 
31
-    def __init__(self, series=None, openstack=None, source=None, stable=True):
37
+    def __init__(self, series=None, openstack=None, source=None,
38
+                 stable=True, log_level=DEBUG):
32 39
         """Initialize the deployment environment."""
33 40
         super(OpenStackAmuletDeployment, self).__init__(series)
41
+        self.log = self.get_logger(level=log_level)
42
+        self.log.info('OpenStackAmuletDeployment:  init')
34 43
         self.openstack = openstack
35 44
         self.source = source
36 45
         self.stable = stable
@@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
38 47
         # out.
39 48
         self.current_next = "trusty"
40 49
 
50
+    def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51
+        """Get a logger object that will log to stdout."""
52
+        log = logging
53
+        logger = log.getLogger(name)
54
+        fmt = log.Formatter("%(asctime)s %(funcName)s "
55
+                            "%(levelname)s: %(message)s")
56
+
57
+        handler = log.StreamHandler(stream=sys.stdout)
58
+        handler.setLevel(level)
59
+        handler.setFormatter(fmt)
60
+
61
+        logger.addHandler(handler)
62
+        logger.setLevel(level)
63
+
64
+        return logger
65
+
41 66
     def _determine_branch_locations(self, other_services):
42 67
         """Determine the branch locations for the other services.
43 68
 
44 69
            Determine if the local branch being tested is derived from its
45 70
            stable or next (dev) branch, and based on this, use the corresonding
46 71
            stable or next branches for the other_services."""
47
-        base_charms = ['mysql', 'mongodb']
72
+
73
+        self.log.info('OpenStackAmuletDeployment:  determine branch locations')
74
+
75
+        # Charms outside the lp:~openstack-charmers namespace
76
+        base_charms = ['mysql', 'mongodb', 'nrpe']
77
+
78
+        # Force these charms to current series even when using an older series.
79
+        # ie. Use trusty/nrpe even when series is precise, as the P charm
80
+        # does not possess the necessary external master config and hooks.
81
+        force_series_current = ['nrpe']
48 82
 
49 83
         if self.series in ['precise', 'trusty']:
50 84
             base_series = self.series
51 85
         else:
52 86
             base_series = self.current_next
53 87
 
54
-        if self.stable:
55
-            for svc in other_services:
88
+        for svc in other_services:
89
+            if svc['name'] in force_series_current:
90
+                base_series = self.current_next
91
+            # If a location has been explicitly set, use it
92
+            if svc.get('location'):
93
+                continue
94
+            if self.stable:
56 95
                 temp = 'lp:charms/{}/{}'
57 96
                 svc['location'] = temp.format(base_series,
58 97
                                               svc['name'])
59
-        else:
60
-            for svc in other_services:
98
+            else:
61 99
                 if svc['name'] in base_charms:
62 100
                     temp = 'lp:charms/{}/{}'
63 101
                     svc['location'] = temp.format(base_series,
@@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
66 104
                     temp = 'lp:~openstack-charmers/charms/{}/{}/next'
67 105
                     svc['location'] = temp.format(self.current_next,
68 106
                                                   svc['name'])
107
+
69 108
         return other_services
70 109
 
71 110
     def _add_services(self, this_service, other_services):
72 111
         """Add services to the deployment and set openstack-origin/source."""
112
+        self.log.info('OpenStackAmuletDeployment:  adding services')
113
+
73 114
         other_services = self._determine_branch_locations(other_services)
74 115
 
75 116
         super(OpenStackAmuletDeployment, self)._add_services(this_service,
@@ -77,29 +118,103 @@ class OpenStackAmuletDeployment(AmuletDeployment):
77 118
 
78 119
         services = other_services
79 120
         services.append(this_service)
121
+
122
+        # Charms which should use the source config option
80 123
         use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
81
-                      'ceph-osd', 'ceph-radosgw']
82
-        # Most OpenStack subordinate charms do not expose an origin option
83
-        # as that is controlled by the principle.
84
-        ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
124
+                      'ceph-osd', 'ceph-radosgw', 'ceph-mon']
125
+
126
+        # Charms which can not use openstack-origin, ie. many subordinates
127
+        no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128
+                     'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
129
+                     'cinder-backup']
85 130
 
86 131
         if self.openstack:
87 132
             for svc in services:
88
-                if svc['name'] not in use_source + ignore:
133
+                if svc['name'] not in use_source + no_origin:
89 134
                     config = {'openstack-origin': self.openstack}
90 135
                     self.d.configure(svc['name'], config)
91 136
 
92 137
         if self.source:
93 138
             for svc in services:
94
-                if svc['name'] in use_source and svc['name'] not in ignore:
139
+                if svc['name'] in use_source and svc['name'] not in no_origin:
95 140
                     config = {'source': self.source}
96 141
                     self.d.configure(svc['name'], config)
97 142
 
98 143
     def _configure_services(self, configs):
99 144
         """Configure all of the services."""
145
+        self.log.info('OpenStackAmuletDeployment:  configure services')
100 146
         for service, config in six.iteritems(configs):
101 147
             self.d.configure(service, config)
102 148
 
149
+    def _auto_wait_for_status(self, message=None, exclude_services=None,
150
+                              include_only=None, timeout=1800):
151
+        """Wait for all units to have a specific extended status, except
152
+        for any defined as excluded.  Unless specified via message, any
153
+        status containing any case of 'ready' will be considered a match.
154
+
155
+        Examples of message usage:
156
+
157
+          Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
158
+              message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
159
+
160
+          Wait for all units to reach this status (exact match):
161
+              message = re.compile('^Unit is ready and clustered$')
162
+
163
+          Wait for all units to reach any one of these (exact match):
164
+              message = re.compile('Unit is ready|OK|Ready')
165
+
166
+          Wait for at least one unit to reach this status (exact match):
167
+              message = {'ready'}
168
+
169
+        See Amulet's sentry.wait_for_messages() for message usage detail.
170
+        https://github.com/juju/amulet/blob/master/amulet/sentry.py
171
+
172
+        :param message: Expected status match
173
+        :param exclude_services: List of juju service names to ignore,
174
+            not to be used in conjuction with include_only.
175
+        :param include_only: List of juju service names to exclusively check,
176
+            not to be used in conjuction with exclude_services.
177
+        :param timeout: Maximum time in seconds to wait for status match
178
+        :returns: None.  Raises if timeout is hit.
179
+        """
180
+        self.log.info('Waiting for extended status on units...')
181
+
182
+        all_services = self.d.services.keys()
183
+
184
+        if exclude_services and include_only:
185
+            raise ValueError('exclude_services can not be used '
186
+                             'with include_only')
187
+
188
+        if message:
189
+            if isinstance(message, re._pattern_type):
190
+                match = message.pattern
191
+            else:
192
+                match = message
193
+
194
+            self.log.debug('Custom extended status wait match: '
195
+                           '{}'.format(match))
196
+        else:
197
+            self.log.debug('Default extended status wait match:  contains '
198
+                           'READY (case-insensitive)')
199
+            message = re.compile('.*ready.*', re.IGNORECASE)
200
+
201
+        if exclude_services:
202
+            self.log.debug('Excluding services from extended status match: '
203
+                           '{}'.format(exclude_services))
204
+        else:
205
+            exclude_services = []
206
+
207
+        if include_only:
208
+            services = include_only
209
+        else:
210
+            services = list(set(all_services) - set(exclude_services))
211
+
212
+        self.log.debug('Waiting up to {}s for extended status on services: '
213
+                       '{}'.format(timeout, services))
214
+        service_messages = {service: message for service in services}
215
+        self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
216
+        self.log.info('OK')
217
+
103 218
     def _get_openstack_release(self):
104 219
         """Get openstack release.
105 220
 
@@ -111,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
111 226
          self.precise_havana, self.precise_icehouse,
112 227
          self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
113 228
          self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
114
-         self.wily_liberty) = range(12)
229
+         self.wily_liberty, self.trusty_mitaka,
230
+         self.xenial_mitaka) = range(14)
115 231
 
116 232
         releases = {
117 233
             ('precise', None): self.precise_essex,
@@ -123,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
123 239
             ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
124 240
             ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
125 241
             ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
242
+            ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
126 243
             ('utopic', None): self.utopic_juno,
127 244
             ('vivid', None): self.vivid_kilo,
128
-            ('wily', None): self.wily_liberty}
245
+            ('wily', None): self.wily_liberty,
246
+            ('xenial', None): self.xenial_mitaka}
129 247
         return releases[(self.series, self.openstack)]
130 248
 
131 249
     def _get_openstack_release_string(self):
@@ -142,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
142 260
             ('utopic', 'juno'),
143 261
             ('vivid', 'kilo'),
144 262
             ('wily', 'liberty'),
263
+            ('xenial', 'mitaka'),
145 264
         ])
146 265
         if self.openstack:
147 266
             os_origin = self.openstack.split(':')[1]

+ 381
- 0
hooks/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -18,6 +18,7 @@ import amulet
18 18
 import json
19 19
 import logging
20 20
 import os
21
+import re
21 22
 import six
22 23
 import time
23 24
 import urllib
@@ -27,6 +28,7 @@ import glanceclient.v1.client as glance_client
27 28
 import heatclient.v1.client as heat_client
28 29
 import keystoneclient.v2_0 as keystone_client
29 30
 import novaclient.v1_1.client as nova_client
31
+import pika
30 32
 import swiftclient
31 33
 
32 34
 from charmhelpers.contrib.amulet.utils import (
@@ -602,3 +604,382 @@ class OpenStackAmuletUtils(AmuletUtils):
602 604
             self.log.debug('Ceph {} samples (OK): '
603 605
                            '{}'.format(sample_type, samples))
604 606
             return None
607
+
608
+    # rabbitmq/amqp specific helpers:
609
+
610
+    def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
611
+        """Wait for rmq units extended status to show cluster readiness,
612
+        after an optional initial sleep period.  Initial sleep is likely
613
+        necessary to be effective following a config change, as status
614
+        message may not instantly update to non-ready."""
615
+
616
+        if init_sleep:
617
+            time.sleep(init_sleep)
618
+
619
+        message = re.compile('^Unit is ready and clustered$')
620
+        deployment._auto_wait_for_status(message=message,
621
+                                         timeout=timeout,
622
+                                         include_only=['rabbitmq-server'])
623
+
624
+    def add_rmq_test_user(self, sentry_units,
625
+                          username="testuser1", password="changeme"):
626
+        """Add a test user via the first rmq juju unit, check connection as
627
+        the new user against all sentry units.
628
+
629
+        :param sentry_units: list of sentry unit pointers
630
+        :param username: amqp user name, default to testuser1
631
+        :param password: amqp user password
632
+        :returns: None if successful.  Raise on error.
633
+        """
634
+        self.log.debug('Adding rmq user ({})...'.format(username))
635
+
636
+        # Check that user does not already exist
637
+        cmd_user_list = 'rabbitmqctl list_users'
638
+        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
639
+        if username in output:
640
+            self.log.warning('User ({}) already exists, returning '
641
+                             'gracefully.'.format(username))
642
+            return
643
+
644
+        perms = '".*" ".*" ".*"'
645
+        cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
646
+                'rabbitmqctl set_permissions {} {}'.format(username, perms)]
647
+
648
+        # Add user via first unit
649
+        for cmd in cmds:
650
+            output, _ = self.run_cmd_unit(sentry_units[0], cmd)
651
+
652
+        # Check connection against the other sentry_units
653
+        self.log.debug('Checking user connect against units...')
654
+        for sentry_unit in sentry_units:
655
+            connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
656
+                                                   username=username,
657
+                                                   password=password)
658
+            connection.close()
659
+
660
+    def delete_rmq_test_user(self, sentry_units, username="testuser1"):
661
+        """Delete a rabbitmq user via the first rmq juju unit.
662
+
663
+        :param sentry_units: list of sentry unit pointers
664
+        :param username: amqp user name, default to testuser1
665
+        :param password: amqp user password
666
+        :returns: None if successful or no such user.
667
+        """
668
+        self.log.debug('Deleting rmq user ({})...'.format(username))
669
+
670
+        # Check that the user exists
671
+        cmd_user_list = 'rabbitmqctl list_users'
672
+        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
673
+
674
+        if username not in output:
675
+            self.log.warning('User ({}) does not exist, returning '
676
+                             'gracefully.'.format(username))
677
+            return
678
+
679
+        # Delete the user
680
+        cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
681
+        output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
682
+
683
+    def get_rmq_cluster_status(self, sentry_unit):
684
+        """Execute rabbitmq cluster status command on a unit and return
685
+        the full output.
686
+
687
+        :param unit: sentry unit
688
+        :returns: String containing console output of cluster status command
689
+        """
690
+        cmd = 'rabbitmqctl cluster_status'
691
+        output, _ = self.run_cmd_unit(sentry_unit, cmd)
692
+        self.log.debug('{} cluster_status:\n{}'.format(
693
+            sentry_unit.info['unit_name'], output))
694
+        return str(output)
695
+
696
+    def get_rmq_cluster_running_nodes(self, sentry_unit):
697
+        """Parse rabbitmqctl cluster_status output string, return list of
698
+        running rabbitmq cluster nodes.
699
+
700
+        :param unit: sentry unit
701
+        :returns: List containing node names of running nodes
702
+        """
703
+        # NOTE(beisner): rabbitmqctl cluster_status output is not
704
+        # json-parsable, do string chop foo, then json.loads that.
705
+        str_stat = self.get_rmq_cluster_status(sentry_unit)
706
+        if 'running_nodes' in str_stat:
707
+            pos_start = str_stat.find("{running_nodes,") + 15
708
+            pos_end = str_stat.find("]},", pos_start) + 1
709
+            str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
710
+            run_nodes = json.loads(str_run_nodes)
711
+            return run_nodes
712
+        else:
713
+            return []
714
+
715
+    def validate_rmq_cluster_running_nodes(self, sentry_units):
716
+        """Check that all rmq unit hostnames are represented in the
717
+        cluster_status output of all units.
718
+
719
+        :param host_names: dict of juju unit names to host names
720
+        :param units: list of sentry unit pointers (all rmq units)
721
+        :returns: None if successful, otherwise return error message
722
+        """
723
+        host_names = self.get_unit_hostnames(sentry_units)
724
+        errors = []
725
+
726
+        # Query every unit for cluster_status running nodes
727
+        for query_unit in sentry_units:
728
+            query_unit_name = query_unit.info['unit_name']
729
+            running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
730
+
731
+            # Confirm that every unit is represented in the queried unit's
732
+            # cluster_status running nodes output.
733
+            for validate_unit in sentry_units:
734
+                val_host_name = host_names[validate_unit.info['unit_name']]
735
+                val_node_name = 'rabbit@{}'.format(val_host_name)
736
+
737
+                if val_node_name not in running_nodes:
738
+                    errors.append('Cluster member check failed on {}: {} not '
739
+                                  'in {}\n'.format(query_unit_name,
740
+                                                   val_node_name,
741
+                                                   running_nodes))
742
+        if errors:
743
+            return ''.join(errors)
744
+
745
+    def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
746
+        """Check a single juju rmq unit for ssl and port in the config file."""
747
+        host = sentry_unit.info['public-address']
748
+        unit_name = sentry_unit.info['unit_name']
749
+
750
+        conf_file = '/etc/rabbitmq/rabbitmq.config'
751
+        conf_contents = str(self.file_contents_safe(sentry_unit,
752
+                                                    conf_file, max_wait=16))
753
+        # Checks
754
+        conf_ssl = 'ssl' in conf_contents
755
+        conf_port = str(port) in conf_contents
756
+
757
+        # Port explicitly checked in config
758
+        if port and conf_port and conf_ssl:
759
+            self.log.debug('SSL is enabled  @{}:{} '
760
+                           '({})'.format(host, port, unit_name))
761
+            return True
762
+        elif port and not conf_port and conf_ssl:
763
+            self.log.debug('SSL is enabled @{} but not on port {} '
764
+                           '({})'.format(host, port, unit_name))
765
+            return False
766
+        # Port not checked (useful when checking that ssl is disabled)
767
+        elif not port and conf_ssl:
768
+            self.log.debug('SSL is enabled  @{}:{} '
769
+                           '({})'.format(host, port, unit_name))
770
+            return True
771
+        elif not conf_ssl:
772
+            self.log.debug('SSL not enabled @{}:{} '
773
+                           '({})'.format(host, port, unit_name))
774
+            return False
775
+        else:
776
+            msg = ('Unknown condition when checking SSL status @{}:{} '
777
+                   '({})'.format(host, port, unit_name))
778
+            amulet.raise_status(amulet.FAIL, msg)
779
+
780
+    def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
781
+        """Check that ssl is enabled on rmq juju sentry units.
782
+
783
+        :param sentry_units: list of all rmq sentry units
784
+        :param port: optional ssl port override to validate
785
+        :returns: None if successful, otherwise return error message
786
+        """
787
+        for sentry_unit in sentry_units:
788
+            if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
789
+                return ('Unexpected condition:  ssl is disabled on unit '
790
+                        '({})'.format(sentry_unit.info['unit_name']))
791
+        return None
792
+
793
+    def validate_rmq_ssl_disabled_units(self, sentry_units):
794
+        """Check that ssl is enabled on listed rmq juju sentry units.
795
+
796
+        :param sentry_units: list of all rmq sentry units
797
+        :returns: True if successful.  Raise on error.
798
+        """
799
+        for sentry_unit in sentry_units:
800
+            if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
801
+                return ('Unexpected condition:  ssl is enabled on unit '
802
+                        '({})'.format(sentry_unit.info['unit_name']))
803
+        return None
804
+
805
+    def configure_rmq_ssl_on(self, sentry_units, deployment,
806
+                             port=None, max_wait=60):
807
+        """Turn ssl charm config option on, with optional non-default
808
+        ssl port specification.  Confirm that it is enabled on every
809
+        unit.
810
+
811
+        :param sentry_units: list of sentry units
812
+        :param deployment: amulet deployment object pointer
813
+        :param port: amqp port, use defaults if None
814
+        :param max_wait: maximum time to wait in seconds to confirm
815
+        :returns: None if successful.  Raise on error.
816
+        """
817
+        self.log.debug('Setting ssl charm config option:  on')
818
+
819
+        # Enable RMQ SSL
820
+        config = {'ssl': 'on'}
821
+        if port:
822
+            config['ssl_port'] = port
823
+
824
+        deployment.d.configure('rabbitmq-server', config)
825
+
826
+        # Wait for unit status
827
+        self.rmq_wait_for_cluster(deployment)
828
+
829
+        # Confirm
830
+        tries = 0
831
+        ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
832
+        while ret and tries < (max_wait / 4):
833
+            time.sleep(4)
834
+            self.log.debug('Attempt {}: {}'.format(tries, ret))
835
+            ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
836
+            tries += 1
837
+
838
+        if ret:
839
+            amulet.raise_status(amulet.FAIL, ret)
840
+
841
+    def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
842
+        """Turn ssl charm config option off, confirm that it is disabled
843
+        on every unit.
844
+
845
+        :param sentry_units: list of sentry units
846
+        :param deployment: amulet deployment object pointer
847
+        :param max_wait: maximum time to wait in seconds to confirm
848
+        :returns: None if successful.  Raise on error.
849
+        """
850
+        self.log.debug('Setting ssl charm config option:  off')
851
+
852
+        # Disable RMQ SSL
853
+        config = {'ssl': 'off'}
854
+        deployment.d.configure('rabbitmq-server', config)
855
+
856
+        # Wait for unit status
857
+        self.rmq_wait_for_cluster(deployment)
858
+
859
+        # Confirm
860
+        tries = 0
861
+        ret = self.validate_rmq_ssl_disabled_units(sentry_units)
862
+        while ret and tries < (max_wait / 4):
863
+            time.sleep(4)
864
+            self.log.debug('Attempt {}: {}'.format(tries, ret))
865
+            ret = self.validate_rmq_ssl_disabled_units(sentry_units)
866
+            tries += 1
867
+
868
+        if ret:
869
+            amulet.raise_status(amulet.FAIL, ret)
870
+
871
+    def connect_amqp_by_unit(self, sentry_unit, ssl=False,
872
+                             port=None, fatal=True,
873
+                             username="testuser1", password="changeme"):
874
+        """Establish and return a pika amqp connection to the rabbitmq service
875
+        running on a rmq juju unit.
876
+
877
+        :param sentry_unit: sentry unit pointer
878
+        :param ssl: boolean, default to False
879
+        :param port: amqp port, use defaults if None
880
+        :param fatal: boolean, default to True (raises on connect error)
881
+        :param username: amqp user name, default to testuser1
882
+        :param password: amqp user password
883
+        :returns: pika amqp connection pointer or None if failed and non-fatal
884
+        """
885
+        host = sentry_unit.info['public-address']
886
+        unit_name = sentry_unit.info['unit_name']
887
+
888
+        # Default port logic if port is not specified
889
+        if ssl and not port:
890
+            port = 5671
891
+        elif not ssl and not port:
892
+            port = 5672
893
+
894
+        self.log.debug('Connecting to amqp on {}:{} ({}) as '
895
+                       '{}...'.format(host, port, unit_name, username))
896
+
897
+        try:
898
+            credentials = pika.PlainCredentials(username, password)
899
+            parameters = pika.ConnectionParameters(host=host, port=port,
900
+                                                   credentials=credentials,
901
+                                                   ssl=ssl,
902
+                                                   connection_attempts=3,
903
+                                                   retry_delay=5,
904
+                                                   socket_timeout=1)
905
+            connection = pika.BlockingConnection(parameters)
906
+            assert connection.server_properties['product'] == 'RabbitMQ'
907
+            self.log.debug('Connect OK')
908
+            return connection
909
+        except Exception as e:
910
+            msg = ('amqp connection failed to {}:{} as '
911
+                   '{} ({})'.format(host, port, username, str(e)))
912
+            if fatal:
913
+                amulet.raise_status(amulet.FAIL, msg)
914
+            else:
915
+                self.log.warn(msg)
916
+                return None
917
+
918
+    def publish_amqp_message_by_unit(self, sentry_unit, message,
919
+                                     queue="test", ssl=False,
920
+                                     username="testuser1",
921
+                                     password="changeme",
922
+                                     port=None):
923
+        """Publish an amqp message to a rmq juju unit.
924
+
925
+        :param sentry_unit: sentry unit pointer
926
+        :param message: amqp message string
927
+        :param queue: message queue, default to test
928
+        :param username: amqp user name, default to testuser1
929
+        :param password: amqp user password
930
+        :param ssl: boolean, default to False
931
+        :param port: amqp port, use defaults if None
932
+        :returns: None.  Raises exception if publish failed.
933
+        """
934
+        self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
935
+                                                                    message))
936
+        connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
937
+                                               port=port,
938
+                                               username=username,
939
+                                               password=password)
940
+
941
+        # NOTE(beisner): extra debug here re: pika hang potential:
942
+        #   https://github.com/pika/pika/issues/297
943
+        #   https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
944
+        self.log.debug('Defining channel...')
945
+        channel = connection.channel()
946
+        self.log.debug('Declaring queue...')
947
+        channel.queue_declare(queue=queue, auto_delete=False, durable=True)
948
+        self.log.debug('Publishing message...')
949
+        channel.basic_publish(exchange='', routing_key=queue, body=message)
950
+        self.log.debug('Closing channel...')
951
+        channel.close()
952
+        self.log.debug('Closing connection...')
953
+        connection.close()
954
+
955
+    def get_amqp_message_by_unit(self, sentry_unit, queue="test",
956
+                                 username="testuser1",
957
+                                 password="changeme",
958
+                                 ssl=False, port=None):
959
+        """Get an amqp message from a rmq juju unit.
960
+
961
+        :param sentry_unit: sentry unit pointer
962
+        :param queue: message queue, default to test
963
+        :param username: amqp user name, default to testuser1
964
+        :param password: amqp user password
965
+        :param ssl: boolean, default to False
966
+        :param port: amqp port, use defaults if None
967
+        :returns: amqp message body as string.  Raise if get fails.
968
+        """
969
+        connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
970
+                                               port=port,
971
+                                               username=username,
972
+                                               password=password)
973
+        channel = connection.channel()
974
+        method_frame, _, body = channel.basic_get(queue)
975
+
976
+        if method_frame:
977
+            self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
978
+                                                                         body))
979
+            channel.basic_ack(method_frame.delivery_tag)
980
+            channel.close()
981
+            connection.close()
982
+            return body
983
+        else:
984
+            msg = 'No message retrieved.'
985
+            amulet.raise_status(amulet.FAIL, msg)

+ 208
- 75
hooks/charmhelpers/contrib/openstack/context.py View File

@@ -14,6 +14,7 @@
14 14
 # You should have received a copy of the GNU Lesser General Public License
15 15
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16 16
 
17
+import glob
17 18
 import json
18 19
 import os
19 20
 import re
@@ -50,10 +51,13 @@ from charmhelpers.core.sysctl import create as sysctl_create
50 51
 from charmhelpers.core.strutils import bool_from_string
51 52
 
52 53
 from charmhelpers.core.host import (
54
+    get_bond_master,
55
+    is_phy_iface,
53 56
     list_nics,
54 57
     get_nic_hwaddr,
55 58
     mkdir,
56 59
     write_file,
60
+    pwgen,
57 61
 )
58 62
 from charmhelpers.contrib.hahelpers.cluster import (
59 63
     determine_apache_port,
@@ -84,6 +88,14 @@ from charmhelpers.contrib.network.ip import (
84 88
     is_bridge_member,
85 89
 )
86 90
 from charmhelpers.contrib.openstack.utils import get_host_ip
91
+from charmhelpers.core.unitdata import kv
92
+
93
+try:
94
+    import psutil
95
+except ImportError:
96
+    apt_install('python-psutil', fatal=True)
97
+    import psutil
98
+
87 99
 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
88 100
 ADDRESS_TYPES = ['admin', 'internal', 'public']
89 101
 
@@ -192,10 +204,50 @@ def config_flags_parser(config_flags):
192 204
 class OSContextGenerator(object):
193 205
     """Base class for all context generators."""
194 206
     interfaces = []
207
+    related = False
208
+    complete = False
209
+    missing_data = []
195 210
 
196 211
     def __call__(self):
197 212
         raise NotImplementedError
198 213
 
214
+    def context_complete(self, ctxt):
215
+        """Check for missing data for the required context data.
216
+        Set self.missing_data if it exists and return False.
217
+        Set self.complete if no missing data and return True.
218
+        """
219
+        # Fresh start
220
+        self.complete = False
221
+        self.missing_data = []
222
+        for k, v in six.iteritems(ctxt):
223
+            if v is None or v == '':
224
+                if k not in self.missing_data:
225
+                    self.missing_data.append(k)
226
+
227
+        if self.missing_data:
228
+            self.complete = False
229
+            log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
230
+        else:
231
+            self.complete = True
232
+        return self.complete
233
+
234
+    def get_related(self):
235
+        """Check if any of the context interfaces have relation ids.
236
+        Set self.related and return True if one of the interfaces
237
+        has relation ids.
238
+        """
239
+        # Fresh start
240
+        self.related = False
241
+        try:
242
+            for interface in self.interfaces:
243
+                if relation_ids(interface):
244
+                    self.related = True
245
+            return self.related
246
+        except AttributeError as e:
247
+            log("{} {}"
248
+                "".format(self, e), 'INFO')
249
+            return self.related
250
+
199 251
 
200 252
 class SharedDBContext(OSContextGenerator):
201 253
     interfaces = ['shared-db']
@@ -211,6 +263,7 @@ class SharedDBContext(OSContextGenerator):
211 263
         self.database = database
212 264
         self.user = user
213 265
         self.ssl_dir = ssl_dir
266
+        self.rel_name = self.interfaces[0]
214 267
 
215 268
     def __call__(self):
216 269
         self.database = self.database or config('database')
@@ -244,6 +297,7 @@ class SharedDBContext(OSContextGenerator):
244 297
             password_setting = self.relation_prefix + '_password'
245 298
 
246 299
         for rid in relation_ids(self.interfaces[0]):
300
+            self.related = True
247 301
             for unit in related_units(rid):
248 302
                 rdata = relation_get(rid=rid, unit=unit)
249 303
                 host = rdata.get('db_host')
@@ -255,7 +309,7 @@ class SharedDBContext(OSContextGenerator):
255 309
                     'database_password': rdata.get(password_setting),
256 310
                     'database_type': 'mysql'
257 311
                 }
258
-                if context_complete(ctxt):
312
+                if self.context_complete(ctxt):
259 313
                     db_ssl(rdata, ctxt, self.ssl_dir)
260 314
                     return ctxt
261 315
         return {}
@@ -276,6 +330,7 @@ class PostgresqlDBContext(OSContextGenerator):
276 330
 
277 331
         ctxt = {}
278 332
         for rid in relation_ids(self.interfaces[0]):
333
+            self.related = True
279 334
             for unit in related_units(rid):
280 335
                 rel_host = relation_get('host', rid=rid, unit=unit)
281 336
                 rel_user = relation_get('user', rid=rid, unit=unit)
@@ -285,7 +340,7 @@ class PostgresqlDBContext(OSContextGenerator):
285 340
                         'database_user': rel_user,
286 341
                         'database_password': rel_passwd,
287 342
                         'database_type': 'postgresql'}
288
-                if context_complete(ctxt):
343
+                if self.context_complete(ctxt):
289 344
                     return ctxt
290 345
 
291 346
         return {}
@@ -346,6 +401,7 @@ class IdentityServiceContext(OSContextGenerator):
346 401
             ctxt['signing_dir'] = cachedir
347 402
 
348 403
         for rid in relation_ids(self.rel_name):
404
+            self.related = True
349 405
             for unit in related_units(rid):
350 406
                 rdata = relation_get(rid=rid, unit=unit)
351 407
                 serv_host = rdata.get('service_host')
@@ -364,7 +420,7 @@ class IdentityServiceContext(OSContextGenerator):
364 420
                              'service_protocol': svc_protocol,
365 421
                              'auth_protocol': auth_protocol})
366 422
 
367
-                if context_complete(ctxt):
423
+                if self.context_complete(ctxt):
368 424
                     # NOTE(jamespage) this is required for >= icehouse
369 425
                     # so a missing value just indicates keystone needs
370 426
                     # upgrading
@@ -403,6 +459,7 @@ class AMQPContext(OSContextGenerator):
403 459
         ctxt = {}
404 460
         for rid in relation_ids(self.rel_name):
405 461
             ha_vip_only = False
462
+            self.related = True
406 463
             for unit in related_units(rid):
407 464
                 if relation_get('clustered', rid=rid, unit=unit):
408 465
                     ctxt['clustered'] = True
@@ -435,7 +492,7 @@ class AMQPContext(OSContextGenerator):
435 492
                 ha_vip_only = relation_get('ha-vip-only',
436 493
                                            rid=rid, unit=unit) is not None
437 494
 
438
-                if context_complete(ctxt):
495
+                if self.context_complete(ctxt):
439 496
                     if 'rabbit_ssl_ca' in ctxt:
440 497
                         if not self.ssl_dir:
441 498
                             log("Charm not setup for ssl support but ssl ca "
@@ -467,7 +524,7 @@ class AMQPContext(OSContextGenerator):
467 524
             ctxt['oslo_messaging_flags'] = config_flags_parser(
468 525
                 oslo_messaging_flags)
469 526
 
470
-        if not context_complete(ctxt):
527
+        if not self.complete:
471 528
             return {}
472 529
 
473 530
         return ctxt
@@ -483,13 +540,15 @@ class CephContext(OSContextGenerator):
483 540
 
484 541
         log('Generating template context for ceph', level=DEBUG)
485 542
         mon_hosts = []
486
-        auth = None
487
-        key = None
488
-        use_syslog = str(config('use-syslog')).lower()
543
+        ctxt = {
544
+            'use_syslog': str(config('use-syslog')).lower()
545
+        }
489 546
         for rid in relation_ids('ceph'):
490 547
             for unit in related_units(rid):
491
-                auth = relation_get('auth', rid=rid, unit=unit)
492
-                key = relation_get('key', rid=rid, unit=unit)
548
+                if not ctxt.get('auth'):
549
+                    ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
550
+                if not ctxt.get('key'):
551
+                    ctxt['key'] = relation_get('key', rid=rid, unit=unit)
493 552
                 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
494 553
                                              unit=unit)
495 554
                 unit_priv_addr = relation_get('private-address', rid=rid,
@@ -498,15 +557,12 @@ class CephContext(OSContextGenerator):
498 557
                 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
499 558
                 mon_hosts.append(ceph_addr)
500 559
 
501
-        ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
502
-                'auth': auth,
503
-                'key': key,
504
-                'use_syslog': use_syslog}
560
+        ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
505 561
 
506 562
         if not os.path.isdir('/etc/ceph'):
507 563
             os.mkdir('/etc/ceph')
508 564
 
509
-        if not context_complete(ctxt):
565
+        if not self.context_complete(ctxt):
510 566
             return {}
511 567
 
512 568
         ensure_packages(['ceph-common'])
@@ -579,15 +635,28 @@ class HAProxyContext(OSContextGenerator):
579 635
         if config('haproxy-client-timeout'):
580 636
             ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
581 637
 
638
+        if config('haproxy-queue-timeout'):
639
+            ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
640
+
641
+        if config('haproxy-connect-timeout'):
642
+            ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
643
+
582 644
         if config('prefer-ipv6'):
583 645
             ctxt['ipv6'] = True
584 646
             ctxt['local_host'] = 'ip6-localhost'
585 647
             ctxt['haproxy_host'] = '::'
586
-            ctxt['stat_port'] = ':::8888'
587 648
         else:
588 649
             ctxt['local_host'] = '127.0.0.1'
589 650
             ctxt['haproxy_host'] = '0.0.0.0'
590
-            ctxt['stat_port'] = ':8888'
651
+
652
+        ctxt['stat_port'] = '8888'
653
+
654
+        db = kv()
655
+        ctxt['stat_password'] = db.get('stat-password')
656
+        if not ctxt['stat_password']:
657
+            ctxt['stat_password'] = db.set('stat-password',
658
+                                           pwgen(32))
659
+            db.flush()
591 660
 
592 661
         for frontend in cluster_hosts:
593 662
             if (len(cluster_hosts[frontend]['backends']) > 1 or
@@ -878,19 +947,6 @@ class NeutronContext(OSContextGenerator):
878 947
 
879 948
         return calico_ctxt
880 949
 
881
-    def pg_ctxt(self):
882
-        driver = neutron_plugin_attribute(self.plugin, 'driver',
883
-                                          self.network_manager)
884
-        config = neutron_plugin_attribute(self.plugin, 'config',
885
-                                          self.network_manager)
886
-        ovs_ctxt = {'core_plugin': driver,
887
-                    'neutron_plugin': 'plumgrid',
888
-                    'neutron_security_groups': self.neutron_security_groups,
889
-                    'local_ip': unit_private_ip(),
890
-                    'config': config}
891
-
892
-        return ovs_ctxt
893
-
894 950
     def neutron_ctxt(self):
895 951
         if https():
896 952
             proto = 'https'
@@ -906,6 +962,31 @@ class NeutronContext(OSContextGenerator):
906 962
                 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
907 963
         return ctxt
908 964
 
965
+    def pg_ctxt(self):
966
+        driver = neutron_plugin_attribute(self.plugin, 'driver',
967
+                                          self.network_manager)
968
+        config = neutron_plugin_attribute(self.plugin, 'config',
969
+                                          self.network_manager)
970
+        ovs_ctxt = {'core_plugin': driver,
971
+                    'neutron_plugin': 'plumgrid',
972
+                    'neutron_security_groups': self.neutron_security_groups,
973
+                    'local_ip': unit_private_ip(),
974
+                    'config': config}
975
+        return ovs_ctxt
976
+
977
+    def midonet_ctxt(self):
978
+        driver = neutron_plugin_attribute(self.plugin, 'driver',
979
+                                          self.network_manager)
980
+        midonet_config = neutron_plugin_attribute(self.plugin, 'config',
981
+                                                  self.network_manager)
982
+        mido_ctxt = {'core_plugin': driver,
983
+                     'neutron_plugin': 'midonet',
984
+                     'neutron_security_groups': self.neutron_security_groups,
985
+                     'local_ip': unit_private_ip(),
986
+                     'config': midonet_config}
987
+
988
+        return mido_ctxt
989
+
909 990
     def __call__(self):
910 991
         if self.network_manager not in ['quantum', 'neutron']:
911 992
             return {}
@@ -927,6 +1008,8 @@ class NeutronContext(OSContextGenerator):
927 1008
             ctxt.update(self.nuage_ctxt())
928 1009
         elif self.plugin == 'plumgrid':
929 1010
             ctxt.update(self.pg_ctxt())
1011
+        elif self.plugin == 'midonet':
1012
+            ctxt.update(self.midonet_ctxt())
930 1013
 
931 1014
         alchemy_flags = config('neutron-alchemy-flags')
932 1015
         if alchemy_flags:
@@ -938,7 +1021,6 @@ class NeutronContext(OSContextGenerator):
938 1021
 
939 1022
 
940 1023
 class NeutronPortContext(OSContextGenerator):
941
-    NIC_PREFIXES = ['eth', 'bond']
942 1024
 
943 1025
     def resolve_ports(self, ports):
944 1026
         """Resolve NICs not yet bound to bridge(s)
@@ -950,7 +1032,18 @@ class NeutronPortContext(OSContextGenerator):
950 1032
 
951 1033
         hwaddr_to_nic = {}
952 1034
         hwaddr_to_ip = {}
953
-        for nic in list_nics(self.NIC_PREFIXES):
1035
+        for nic in list_nics():
1036
+            # Ignore virtual interfaces (bond masters will be identified from
1037
+            # their slaves)
1038
+            if not is_phy_iface(nic):
1039
+                continue
1040
+
1041
+            _nic = get_bond_master(nic)
1042
+            if _nic:
1043
+                log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1044
+                    level=DEBUG)
1045
+                nic = _nic
1046
+
954 1047
             hwaddr = get_nic_hwaddr(nic)
955 1048
             hwaddr_to_nic[hwaddr] = nic
956 1049
             addresses = get_ipv4_addr(nic, fatal=False)
@@ -976,7 +1069,8 @@ class NeutronPortContext(OSContextGenerator):
976 1069
                 # trust it to be the real external network).
977 1070
                 resolved.append(entry)
978 1071
 
979
-        return resolved
1072
+        # Ensure no duplicates
1073
+        return list(set(resolved))
980 1074
 
981 1075
 
982 1076
 class OSConfigFlagContext(OSContextGenerator):
@@ -1016,6 +1110,20 @@ class OSConfigFlagContext(OSContextGenerator):
1016 1110
                 config_flags_parser(config_flags)}
1017 1111
 
1018 1112
 
1113
+class LibvirtConfigFlagsContext(OSContextGenerator):
1114
+    """
1115
+    This context provides support for extending
1116
+    the libvirt section through user-defined flags.
1117
+    """
1118
+    def __call__(self):
1119
+        ctxt = {}
1120
+        libvirt_flags = config('libvirt-flags')
1121
+        if libvirt_flags:
1122
+            ctxt['libvirt_flags'] = config_flags_parser(
1123
+                libvirt_flags)
1124
+        return ctxt
1125
+
1126
+
1019 1127
 class SubordinateConfigContext(OSContextGenerator):
1020 1128
 
1021 1129
     """
@@ -1048,7 +1156,7 @@ class SubordinateConfigContext(OSContextGenerator):
1048 1156
 
1049 1157
         ctxt = {
1050 1158
             ... other context ...
1051
-            'subordinate_config': {
1159
+            'subordinate_configuration': {
1052 1160
                 'DEFAULT': {
1053 1161
                     'key1': 'value1',
1054 1162
                 },
@@ -1066,13 +1174,22 @@ class SubordinateConfigContext(OSContextGenerator):
1066 1174
         :param config_file : Service's config file to query sections
1067 1175
         :param interface   : Subordinate interface to inspect
1068 1176
         """
1069
-        self.service = service
1070 1177
         self.config_file = config_file
1071
-        self.interface = interface
1178
+        if isinstance(service, list):
1179
+            self.services = service
1180
+        else:
1181
+            self.services = [service]
1182
+        if isinstance(interface, list):
1183
+            self.interfaces = interface
1184
+        else:
1185
+            self.interfaces = [interface]
1072 1186
 
1073 1187
     def __call__(self):
1074 1188
         ctxt = {'sections': {}}
1075
-        for rid in relation_ids(self.interface):
1189
+        rids = []
1190
+        for interface in self.interfaces:
1191
+            rids.extend(relation_ids(interface))
1192
+        for rid in rids:
1076 1193
             for unit in related_units(rid):
1077 1194
                 sub_config = relation_get('subordinate_configuration',
1078 1195
                                           rid=rid, unit=unit)
@@ -1080,33 +1197,37 @@ class SubordinateConfigContext(OSContextGenerator):
1080 1197
                     try:
1081 1198
                         sub_config = json.loads(sub_config)
1082 1199
                     except:
1083
-                        log('Could not parse JSON from subordinate_config '
1084
-                            'setting from %s' % rid, level=ERROR)
1200
+                        log('Could not parse JSON from '
1201
+                            'subordinate_configuration setting from %s'
1202
+                            % rid, level=ERROR)
1085 1203
                         continue
1086 1204
 
1087
-                    if self.service not in sub_config:
1088
-                        log('Found subordinate_config on %s but it contained'
1089
-                            'nothing for %s service' % (rid, self.service),
1090
-                            level=INFO)
1091
-                        continue
1092
-
1093
-                    sub_config = sub_config[self.service]
1094
-                    if self.config_file not in sub_config:
1095
-                        log('Found subordinate_config on %s but it contained'
1096
-                            'nothing for %s' % (rid, self.config_file),
1097
-                            level=INFO)
1098
-                        continue
1099
-
1100
-                    sub_config = sub_config[self.config_file]
1101
-                    for k, v in six.iteritems(sub_config):
1102
-                        if k == 'sections':
1103
-                            for section, config_dict in six.iteritems(v):
1104
-                                log("adding section '%s'" % (section),
1105
-                                    level=DEBUG)
1106
-                                ctxt[k][section] = config_dict
1107
-                        else:
1108
-                            ctxt[k] = v
1109
-
1205
+                    for service in self.services:
1206
+                        if service not in sub_config:
1207
+                            log('Found subordinate_configuration on %s but it '
1208
+                                'contained nothing for %s service'
1209
+                                % (rid, service), level=INFO)
1210
+                            continue
1211
+
1212
+                        sub_config = sub_config[service]
1213
+                        if self.config_file not in sub_config:
1214
+                            log('Found subordinate_configuration on %s but it '
1215
+                                'contained nothing for %s'
1216
+                                % (rid, self.config_file), level=INFO)
1217
+                            continue
1218
+
1219
+                        sub_config = sub_config[self.config_file]
1220
+                        for k, v in six.iteritems(sub_config):
1221
+                            if k == 'sections':
1222
+                                for section, config_list in six.iteritems(v):
1223
+                                    log("adding section '%s'" % (section),
1224
+                                        level=DEBUG)
1225
+                                    if ctxt[k].get(section):
1226
+                                        ctxt[k][section].extend(config_list)
1227
+                                    else:
1228
+                                        ctxt[k][section] = config_list
1229
+                            else:
1230
+                                ctxt[k] = v
1110 1231
         log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1111 1232
         return ctxt
1112 1233
 
@@ -1143,13 +1264,11 @@ class WorkerConfigContext(OSContextGenerator):
1143 1264
 
1144 1265
     @property
1145 1266
     def num_cpus(self):
1146
-        try:
1147
-            from psutil import NUM_CPUS
1148
-        except ImportError:
1149
-            apt_install('python-psutil', fatal=True)
1150
-            from psutil import NUM_CPUS
1151
-
1152
-        return NUM_CPUS
1267
+        # NOTE: use cpu_count if present (16.04 support)
1268
+        if hasattr(psutil, 'cpu_count'):
1269
+            return psutil.cpu_count()
1270
+        else:
1271
+            return psutil.NUM_CPUS
1153 1272
 
1154 1273
     def __call__(self):
1155 1274
         multiplier = config('worker-multiplier') or 0
@@ -1283,15 +1402,19 @@ class DataPortContext(NeutronPortContext):
1283 1402
     def __call__(self):
1284 1403
         ports = config('data-port')
1285 1404
         if ports:
1405
+            # Map of {port/mac:bridge}
1286 1406
             portmap = parse_data_port_mappings(ports)
1287
-            ports = portmap.values()
1407
+            ports = portmap.keys()
1408
+            # Resolve provided ports or mac addresses and filter out those
1409
+            # already attached to a bridge.
1288 1410
             resolved = self.resolve_ports(ports)
1411
+            # FIXME: is this necessary?
1289 1412
             normalized = {get_nic_hwaddr(port): port for port in resolved
1290 1413
                           if port not in ports}
1291 1414
             normalized.update({port: port for port in resolved
1292 1415
                                if port in ports})
1293 1416
             if resolved:
1294
-                return {bridge: normalized[port] for bridge, port in
1417
+                return {normalized[port]: bridge for port, bridge in
1295 1418
                         six.iteritems(portmap) if port in normalized.keys()}
1296 1419
 
1297 1420
         return None
@@ -1302,12 +1425,22 @@ class PhyNICMTUContext(DataPortContext):
1302 1425
     def __call__(self):
1303 1426
         ctxt = {}
1304 1427
         mappings = super(PhyNICMTUContext, self).__call__()
1305
-        if mappings and mappings.values():
1306
-            ports = mappings.values()
1428
+        if mappings and mappings.keys():
1429
+            ports = sorted(mappings.keys())
1307 1430
             napi_settings = NeutronAPIContext()()
1308 1431
             mtu = napi_settings.get('network_device_mtu')
1432
+            all_ports = set()
1433
+            # If any of ports is a vlan device, its underlying device must have
1434
+            # mtu applied first.
1435
+            for port in ports:
1436
+                for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1437
+                    lport = os.path.basename(lport)
1438
+                    all_ports.add(lport.split('_')[1])
1439
+
1440
+            all_ports = list(all_ports)
1441
+            all_ports.extend(ports)
1309 1442
             if mtu:
1310
-                ctxt["devs"] = '\\n'.join(ports)
1443
+                ctxt["devs"] = '\\n'.join(all_ports)
1311 1444
                 ctxt['mtu'] = mtu
1312 1445
 
1313 1446
         return ctxt
@@ -1339,6 +1472,6 @@ class NetworkServiceContext(OSContextGenerator):
1339 1472
                     'auth_protocol':
1340 1473
                     rdata.get('auth_protocol') or 'http',
1341 1474
                 }
1342
-                if context_complete(ctxt):
1475
+                if self.context_complete(ctxt):
1343 1476
                     return ctxt
1344 1477
         return {}

+ 55
- 20
hooks/charmhelpers/contrib/openstack/neutron.py View File

@@ -50,7 +50,7 @@ def determine_dkms_package():
50 50
     if kernel_version() >= (3, 13):
51 51
         return []
52 52
     else:
53
-        return ['openvswitch-datapath-dkms']
53
+        return [headers_package(), 'openvswitch-datapath-dkms']
54 54
 
55 55
 
56 56
 # legacy
@@ -70,7 +70,7 @@ def quantum_plugins():
70 70
                                         relation_prefix='neutron',
71 71
                                         ssl_dir=QUANTUM_CONF_DIR)],
72 72
             'services': ['quantum-plugin-openvswitch-agent'],
73
-            'packages': [[headers_package()] + determine_dkms_package(),
73
+            'packages': [determine_dkms_package(),
74 74
                          ['quantum-plugin-openvswitch-agent']],
75 75
             'server_packages': ['quantum-server',
76 76
                                 'quantum-plugin-openvswitch'],
@@ -111,7 +111,7 @@ def neutron_plugins():
111 111
                                         relation_prefix='neutron',
112 112
                                         ssl_dir=NEUTRON_CONF_DIR)],
113 113
             'services': ['neutron-plugin-openvswitch-agent'],
114
-            'packages': [[headers_package()] + determine_dkms_package(),
114
+            'packages': [determine_dkms_package(),
115 115
                          ['neutron-plugin-openvswitch-agent']],
116 116
             'server_packages': ['neutron-server',
117 117
                                 'neutron-plugin-openvswitch'],
@@ -155,7 +155,7 @@ def neutron_plugins():
155 155
                                         relation_prefix='neutron',
156 156
                                         ssl_dir=NEUTRON_CONF_DIR)],
157 157
             'services': [],
158
-            'packages': [[headers_package()] + determine_dkms_package(),
158
+            'packages': [determine_dkms_package(),
159 159
                          ['neutron-plugin-cisco']],
160 160
             'server_packages': ['neutron-server',
161 161
                                 'neutron-plugin-cisco'],
@@ -174,7 +174,7 @@ def neutron_plugins():
174 174
                          'neutron-dhcp-agent',
175 175
                          'nova-api-metadata',
176 176
                          'etcd'],
177
-            'packages': [[headers_package()] + determine_dkms_package(),
177
+            'packages': [determine_dkms_package(),
178 178
                          ['calico-compute',
179 179
                           'bird',
180 180
                           'neutron-dhcp-agent',
@@ -209,6 +209,20 @@ def neutron_plugins():
209 209
             'server_packages': ['neutron-server',
210 210
                                 'neutron-plugin-plumgrid'],
211 211
             'server_services': ['neutron-server']
212
+        },
213
+        'midonet': {
214
+            'config': '/etc/neutron/plugins/midonet/midonet.ini',
215
+            'driver': 'midonet.neutron.plugin.MidonetPluginV2',
216
+            'contexts': [
217
+                context.SharedDBContext(user=config('neutron-database-user'),
218
+                                        database=config('neutron-database'),
219
+                                        relation_prefix='neutron',
220
+                                        ssl_dir=NEUTRON_CONF_DIR)],
221
+            'services': [],
222
+            'packages': [determine_dkms_package()],
223
+            'server_packages': ['neutron-server',
224
+                                'python-neutron-plugin-midonet'],
225
+            'server_services': ['neutron-server']
212 226
         }
213 227
     }
214 228
     if release >= 'icehouse':
@@ -219,6 +233,14 @@ def neutron_plugins():
219 233
                                              'neutron-plugin-ml2']
220 234
         # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
221 235
         plugins['nvp'] = plugins['nsx']
236
+    if release >= 'kilo':
237
+        plugins['midonet']['driver'] = (
238
+            'neutron.plugins.midonet.plugin.MidonetPluginV2')
239
+    if release >= 'liberty':
240
+        midonet_origin = config('midonet-origin')
241
+        if midonet_origin is not None and midonet_origin[4:5] == '1':
242
+            plugins['midonet']['driver'] = (
243
+                'midonet.neutron.plugin_v1.MidonetPluginV2')
222 244
     return plugins
223 245
 
224 246
 
@@ -269,17 +291,30 @@ def network_manager():
269 291
         return 'neutron'
270 292
 
271 293
 
272
-def parse_mappings(mappings):
294
+def parse_mappings(mappings, key_rvalue=False):
295
+    """By default mappings are lvalue keyed.
296
+
297
+    If key_rvalue is True, the mapping will be reversed to allow multiple
298
+    configs for the same lvalue.
299
+    """
273 300
     parsed = {}
274 301
     if mappings:
275 302
         mappings = mappings.split()
276 303
         for m in mappings:
277 304
             p = m.partition(':')
278
-            key = p[0].strip()
279
-            if p[1]:
280
-                parsed[key] = p[2].strip()
305
+
306
+            if key_rvalue:
307
+                key_index = 2
308
+                val_index = 0
309
+                # if there is no rvalue skip to next
310
+                if not p[1]:
311
+                    continue
281 312
             else:
282
-                parsed[key] = ''
313
+                key_index = 0
314
+                val_index = 2
315
+
316
+            key = p[key_index].strip()
317
+            parsed[key] = p[val_index].strip()
283 318
 
284 319
     return parsed
285 320
 
@@ -297,25 +332,25 @@ def parse_bridge_mappings(mappings):
297 332
 def parse_data_port_mappings(mappings, default_bridge='br-data'):
298 333
     """Parse data port mappings.
299 334
 
300
-    Mappings must be a space-delimited list of bridge:port mappings.
335
+    Mappings must be a space-delimited list of bridge:port.
301 336
 
302
-    Returns dict of the form {bridge:port}.
337
+    Returns dict of the form {port:bridge} where ports may be mac addresses or
338
+    interface names.
303 339
     """
304
-    _mappings = parse_mappings(mappings)
340
+
341
+    # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
342
+    # proposed for <port> since it may be a mac address which will differ
343
+    # across units this allowing first-known-good to be chosen.
344
+    _mappings = parse_mappings(mappings, key_rvalue=True)
305 345
     if not _mappings or list(_mappings.values()) == ['']:
306 346
         if not mappings:
307 347
             return {}
308 348
 
309 349
         # For backwards-compatibility we need to support port-only provided in
310 350
         # config.
311
-        _mappings = {default_bridge: mappings.split()[0]}
312
-
313
-    bridges = _mappings.keys()
314
-    ports = _mappings.values()
315
-    if len(set(bridges)) != len(bridges):
316
-        raise Exception("It is not allowed to have more than one port "
317
-                        "configured on the same bridge")
351
+        _mappings = {mappings.split()[0]: default_bridge}
318 352
 
353
+    ports = _mappings.keys()
319 354
     if len(set(ports)) != len(ports):
320 355
         raise Exception("It is not allowed to have the same port configured "
321 356
                         "on more than one bridge")

+ 30
- 2
hooks/charmhelpers/contrib/openstack/templating.py View File

@@ -18,7 +18,7 @@ import os
18 18
 
19 19
 import six
20 20
 
21
-from charmhelpers.fetch import apt_install
21
+from charmhelpers.fetch import apt_install, apt_update
22 22
 from charmhelpers.core.hookenv import (
23 23
     log,
24 24
     ERROR,
@@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
29 29
 try:
30 30
     from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
31 31
 except ImportError:
32
+    apt_update(fatal=True)
32 33
     apt_install('python-jinja2', fatal=True)
33 34
     from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
34 35
 
@@ -112,7 +113,7 @@ class OSConfigTemplate(object):
112 113
 
113 114
     def complete_contexts(self):
114 115
         '''
115
-        Return a list of interfaces that have atisfied contexts.
116
+        Return a list of interfaces that have satisfied contexts.
116 117
         '''
117 118
         if self._complete_contexts:
118 119
             return self._complete_contexts
@@ -293,3 +294,30 @@ class OSConfigRenderer(object):
293 294
         [interfaces.extend(i.complete_contexts())
294 295
          for i in six.itervalues(self.templates)]
295 296
         return interfaces
297
+
298
+    def get_incomplete_context_data(self, interfaces):
299
+        '''
300
+        Return dictionary of relation status of interfaces and any missing
301
+        required context data. Example:
302
+            {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
303
+             'zeromq-configuration': {'related': False}}
304
+        '''
305
+        incomplete_context_data = {}
306
+
307
+        for i in six.itervalues(self.templates):
308
+            for context in i.contexts:
309
+                for interface in interfaces:
310
+                    related = False
311
+                    if interface in context.interfaces:
312
+                        related = context.get_related()
313
+                        missing_data = context.missing_data
314
+                        if missing_data:
315
+                            incomplete_context_data[interface] = {'missing_data': missing_data}
316
+                        if related:
317
+                            if incomplete_context_data.get(interface):
318
+                                incomplete_context_data[interface].update({'related': True})
319
+                            else:
320
+                                incomplete_context_data[interface] = {'related': True}
321
+                        else:
322
+                            incomplete_context_data[interface] = {'related': False}
323
+        return incomplete_context_data

+ 393
- 55
hooks/charmhelpers/contrib/openstack/utils.py View File

@@ -1,5 +1,3 @@
1
-#!/usr/bin/python
2
-
3 1
 # Copyright 2014-2015 Canonical Limited.
4 2
 #
5 3
 # This file is part of charm-helpers.
@@ -24,8 +22,11 @@ import subprocess
24 22
 import json
25 23
 import os
26 24
 import sys
25
+import re
27 26
 
28 27
 import six
28
+import traceback
29
+import uuid
29 30
 import yaml
30 31
 
31 32
 from charmhelpers.contrib.network import ip
@@ -35,12 +36,17 @@ from charmhelpers.core import (
35 36
 )
36 37
 
37 38
 from charmhelpers.core.hookenv import (
39
+    action_fail,
40
+    action_set,
38 41
     config,
39 42
     log as juju_log,
40 43
     charm_dir,
41 44
     INFO,