Browse Source

Merge: Liberty/Mitaka changes

Junaid Ali 2 years ago
parent
commit
7280316667
50 changed files with 4377 additions and 3413 deletions
  1. 1
    1
      Makefile
  2. 253
    0
      bin/charm_helpers_sync.py
  3. 6
    1
      charm-helpers-sync.yaml
  4. 4
    2
      hooks/charmhelpers/contrib/amulet/deployment.py
  5. 381
    85
      hooks/charmhelpers/contrib/amulet/utils.py
  6. 0
    254
      hooks/charmhelpers/contrib/ansible/__init__.py
  7. 0
    126
      hooks/charmhelpers/contrib/benchmark/__init__.py
  8. 0
    208
      hooks/charmhelpers/contrib/charmhelpers/__init__.py
  9. 0
    15
      hooks/charmhelpers/contrib/charmsupport/__init__.py
  10. 0
    360
      hooks/charmhelpers/contrib/charmsupport/nrpe.py
  11. 0
    175
      hooks/charmhelpers/contrib/charmsupport/volumes.py
  12. 0
    0
      hooks/charmhelpers/contrib/database/__init__.py
  13. 0
    412
      hooks/charmhelpers/contrib/database/mysql.py
  14. 56
    24
      hooks/charmhelpers/contrib/network/ip.py
  15. 6
    2
      hooks/charmhelpers/contrib/network/ovs/__init__.py
  16. 5
    6
      hooks/charmhelpers/contrib/network/ufw.py
  17. 135
    14
      hooks/charmhelpers/contrib/openstack/amulet/deployment.py
  18. 421
    13
      hooks/charmhelpers/contrib/openstack/amulet/utils.py
  19. 316
    77
      hooks/charmhelpers/contrib/openstack/context.py
  20. 35
    7
      hooks/charmhelpers/contrib/openstack/ip.py
  21. 61
    20
      hooks/charmhelpers/contrib/openstack/neutron.py
  22. 30
    2
      hooks/charmhelpers/contrib/openstack/templating.py
  23. 938
    69
      hooks/charmhelpers/contrib/openstack/utils.py
  24. 0
    268
      hooks/charmhelpers/contrib/peerstorage/__init__.py
  25. 35
    11
      hooks/charmhelpers/contrib/python/packages.py
  26. 0
    118
      hooks/charmhelpers/contrib/saltstack/__init__.py
  27. 0
    94
      hooks/charmhelpers/contrib/ssl/__init__.py
  28. 0
    279
      hooks/charmhelpers/contrib/ssl/service.py
  29. 813
    51
      hooks/charmhelpers/contrib/storage/linux/ceph.py
  30. 10
    0
      hooks/charmhelpers/contrib/storage/linux/loopback.py
  31. 8
    7
      hooks/charmhelpers/contrib/storage/linux/utils.py
  32. 0
    15
      hooks/charmhelpers/contrib/templating/__init__.py
  33. 0
    139
      hooks/charmhelpers/contrib/templating/contexts.py
  34. 0
    39
      hooks/charmhelpers/contrib/templating/jinja.py
  35. 0
    29
      hooks/charmhelpers/contrib/templating/pyformat.py
  36. 0
    313
      hooks/charmhelpers/contrib/unison/__init__.py
  37. 219
    12
      hooks/charmhelpers/core/hookenv.py
  38. 298
    75
      hooks/charmhelpers/core/host.py
  39. 71
    0
      hooks/charmhelpers/core/hugepage.py
  40. 68
    0
      hooks/charmhelpers/core/kernel.py
  41. 30
    5
      hooks/charmhelpers/core/services/helpers.py
  42. 30
    0
      hooks/charmhelpers/core/strutils.py
  43. 21
    8
      hooks/charmhelpers/core/templating.py
  44. 61
    17
      hooks/charmhelpers/core/unitdata.py
  45. 18
    2
      hooks/charmhelpers/fetch/__init__.py
  46. 1
    1
      hooks/charmhelpers/fetch/archiveurl.py
  47. 22
    32
      hooks/charmhelpers/fetch/bzrurl.py
  48. 19
    22
      hooks/charmhelpers/fetch/giturl.py
  49. 3
    2
      hooks/pg_gw_utils.py
  50. 2
    1
      unit_tests/test_pg_gw_hooks.py

+ 1
- 1
Makefile View File

@@ -4,7 +4,7 @@ PYTHON := /usr/bin/env python
4 4
 virtualenv:
5 5
 	virtualenv .venv
6 6
 	.venv/bin/pip install flake8 nose coverage mock pyyaml netifaces \
7
-        netaddr jinja2
7
+        netaddr jinja2 pyflakes pep8 six pbr funcsigs psutil
8 8
 
9 9
 lint: virtualenv
10 10
 	.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402

+ 253
- 0
bin/charm_helpers_sync.py View File

@@ -0,0 +1,253 @@
1
+#!/usr/bin/python
2
+
3
+# Copyright 2014-2015 Canonical Limited.
4
+#
5
+# This file is part of charm-helpers.
6
+#
7
+# charm-helpers is free software: you can redistribute it and/or modify
8
+# it under the terms of the GNU Lesser General Public License version 3 as
9
+# published by the Free Software Foundation.
10
+#
11
+# charm-helpers is distributed in the hope that it will be useful,
12
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
+# GNU Lesser General Public License for more details.
15
+#
16
+# You should have received a copy of the GNU Lesser General Public License
17
+# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
18
+
19
+# Authors:
20
+#   Adam Gandelman <adamg@ubuntu.com>
21
+
22
+import logging
23
+import optparse
24
+import os
25
+import subprocess
26
+import shutil
27
+import sys
28
+import tempfile
29
+import yaml
30
+from fnmatch import fnmatch
31
+
32
+import six
33
+
34
+CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
35
+
36
+
37
+def parse_config(conf_file):
38
+    if not os.path.isfile(conf_file):
39
+        logging.error('Invalid config file: %s.' % conf_file)
40
+        return False
41
+    return yaml.load(open(conf_file).read())
42
+
43
+
44
+def clone_helpers(work_dir, branch):
45
+    dest = os.path.join(work_dir, 'charm-helpers')
46
+    logging.info('Checking out %s to %s.' % (branch, dest))
47
+    cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
48
+    subprocess.check_call(cmd)
49
+    return dest
50
+
51
+
52
+def _module_path(module):
53
+    return os.path.join(*module.split('.'))
54
+
55
+
56
+def _src_path(src, module):
57
+    return os.path.join(src, 'charmhelpers', _module_path(module))
58
+
59
+
60
+def _dest_path(dest, module):
61
+    return os.path.join(dest, _module_path(module))
62
+
63
+
64
+def _is_pyfile(path):
65
+    return os.path.isfile(path + '.py')
66
+
67
+
68
+def ensure_init(path):
69
+    '''
70
+    ensure directories leading up to path are importable, omitting
71
+    parent directory, eg path='/hooks/helpers/foo'/:
72
+        hooks/
73
+        hooks/helpers/__init__.py
74
+        hooks/helpers/foo/__init__.py
75
+    '''
76
+    for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
77
+        _i = os.path.join(d, '__init__.py')
78
+        if not os.path.exists(_i):
79
+            logging.info('Adding missing __init__.py: %s' % _i)
80
+            open(_i, 'wb').close()
81
+
82
+
83
+def sync_pyfile(src, dest):
84
+    src = src + '.py'
85
+    src_dir = os.path.dirname(src)
86
+    logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
87
+    if not os.path.exists(dest):
88
+        os.makedirs(dest)
89
+    shutil.copy(src, dest)
90
+    if os.path.isfile(os.path.join(src_dir, '__init__.py')):
91
+        shutil.copy(os.path.join(src_dir, '__init__.py'),
92
+                    dest)
93
+    ensure_init(dest)
94
+
95
+
96
+def get_filter(opts=None):
97
+    opts = opts or []
98
+    if 'inc=*' in opts:
99
+        # do not filter any files, include everything
100
+        return None
101
+
102
+    def _filter(dir, ls):
103
+        incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
104
+        _filter = []
105
+        for f in ls:
106
+            _f = os.path.join(dir, f)
107
+
108
+            if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
109
+                if True not in [fnmatch(_f, inc) for inc in incs]:
110
+                    logging.debug('Not syncing %s, does not match include '
111
+                                  'filters (%s)' % (_f, incs))
112
+                    _filter.append(f)
113
+                else:
114
+                    logging.debug('Including file, which matches include '
115
+                                  'filters (%s): %s' % (incs, _f))
116
+            elif (os.path.isfile(_f) and not _f.endswith('.py')):
117
+                logging.debug('Not syncing file: %s' % f)
118
+                _filter.append(f)
119
+            elif (os.path.isdir(_f) and not
120
+                  os.path.isfile(os.path.join(_f, '__init__.py'))):
121
+                logging.debug('Not syncing directory: %s' % f)
122
+                _filter.append(f)
123
+        return _filter
124
+    return _filter
125
+
126
+
127
+def sync_directory(src, dest, opts=None):
128
+    if os.path.exists(dest):
129
+        logging.debug('Removing existing directory: %s' % dest)
130
+        shutil.rmtree(dest)
131
+    logging.info('Syncing directory: %s -> %s.' % (src, dest))
132
+
133
+    shutil.copytree(src, dest, ignore=get_filter(opts))
134
+    ensure_init(dest)
135
+
136
+
137
+def sync(src, dest, module, opts=None):
138
+
139
+    # Sync charmhelpers/__init__.py for bootstrap code.
140
+    sync_pyfile(_src_path(src, '__init__'), dest)
141
+
142
+    # Sync other __init__.py files in the path leading to module.
143
+    m = []
144
+    steps = module.split('.')[:-1]
145
+    while steps:
146
+        m.append(steps.pop(0))
147
+        init = '.'.join(m + ['__init__'])
148
+        sync_pyfile(_src_path(src, init),
149
+                    os.path.dirname(_dest_path(dest, init)))
150
+
151
+    # Sync the module, or maybe a .py file.
152
+    if os.path.isdir(_src_path(src, module)):
153
+        sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
154
+    elif _is_pyfile(_src_path(src, module)):
155
+        sync_pyfile(_src_path(src, module),
156
+                    os.path.dirname(_dest_path(dest, module)))
157
+    else:
158
+        logging.warn('Could not sync: %s. Neither a pyfile or directory, '
159
+                     'does it even exist?' % module)
160
+
161
+
162
+def parse_sync_options(options):
163
+    if not options:
164
+        return []
165
+    return options.split(',')
166
+
167
+
168
+def extract_options(inc, global_options=None):
169
+    global_options = global_options or []
170
+    if global_options and isinstance(global_options, six.string_types):
171
+        global_options = [global_options]
172
+    if '|' not in inc:
173
+        return (inc, global_options)
174
+    inc, opts = inc.split('|')
175
+    return (inc, parse_sync_options(opts) + global_options)
176
+
177
+
178
+def sync_helpers(include, src, dest, options=None):
179
+    if not os.path.isdir(dest):
180
+        os.makedirs(dest)
181
+
182
+    global_options = parse_sync_options(options)
183
+
184
+    for inc in include:
185
+        if isinstance(inc, str):
186
+            inc, opts = extract_options(inc, global_options)
187
+            sync(src, dest, inc, opts)
188
+        elif isinstance(inc, dict):
189
+            # could also do nested dicts here.
190
+            for k, v in six.iteritems(inc):
191
+                if isinstance(v, list):
192
+                    for m in v:
193
+                        inc, opts = extract_options(m, global_options)
194
+                        sync(src, dest, '%s.%s' % (k, inc), opts)
195
+
196
+if __name__ == '__main__':
197
+    parser = optparse.OptionParser()
198
+    parser.add_option('-c', '--config', action='store', dest='config',
199
+                      default=None, help='helper config file')
200
+    parser.add_option('-D', '--debug', action='store_true', dest='debug',
201
+                      default=False, help='debug')
202
+    parser.add_option('-b', '--branch', action='store', dest='branch',
203
+                      help='charm-helpers bzr branch (overrides config)')
204
+    parser.add_option('-d', '--destination', action='store', dest='dest_dir',
205
+                      help='sync destination dir (overrides config)')
206
+    (opts, args) = parser.parse_args()
207
+
208
+    if opts.debug:
209
+        logging.basicConfig(level=logging.DEBUG)
210
+    else:
211
+        logging.basicConfig(level=logging.INFO)
212
+
213
+    if opts.config:
214
+        logging.info('Loading charm helper config from %s.' % opts.config)
215
+        config = parse_config(opts.config)
216
+        if not config:
217
+            logging.error('Could not parse config from %s.' % opts.config)
218
+            sys.exit(1)
219
+    else:
220
+        config = {}
221
+
222
+    if 'branch' not in config:
223
+        config['branch'] = CHARM_HELPERS_BRANCH
224
+    if opts.branch:
225
+        config['branch'] = opts.branch
226
+    if opts.dest_dir:
227
+        config['destination'] = opts.dest_dir
228
+
229
+    if 'destination' not in config:
230
+        logging.error('No destination dir. specified as option or config.')
231
+        sys.exit(1)
232
+
233
+    if 'include' not in config:
234
+        if not args:
235
+            logging.error('No modules to sync specified as option or config.')
236
+            sys.exit(1)
237
+        config['include'] = []
238
+        [config['include'].append(a) for a in args]
239
+
240
+    sync_options = None
241
+    if 'options' in config:
242
+        sync_options = config['options']
243
+    tmpd = tempfile.mkdtemp()
244
+    try:
245
+        checkout = clone_helpers(tmpd, config['branch'])
246
+        sync_helpers(config['include'], checkout, config['destination'],
247
+                     options=sync_options)
248
+    except Exception as e:
249
+        logging.error("Could not sync: %s" % e)
250
+        raise e
251
+    finally:
252
+        logging.debug('Cleaning up %s' % tmpd)
253
+        shutil.rmtree(tmpd)

+ 6
- 1
charm-helpers-sync.yaml View File

@@ -3,5 +3,10 @@ destination: hooks/charmhelpers
3 3
 include:
4 4
     - core
5 5
     - fetch
6
-    - contrib
6
+    - contrib.amulet
7
+    - contrib.hahelpers
8
+    - contrib.network
9
+    - contrib.openstack
10
+    - contrib.python
11
+    - contrib.storage
7 12
     - payload

+ 4
- 2
hooks/charmhelpers/contrib/amulet/deployment.py View File

@@ -51,7 +51,8 @@ class AmuletDeployment(object):
51 51
         if 'units' not in this_service:
52 52
             this_service['units'] = 1
53 53
 
54
-        self.d.add(this_service['name'], units=this_service['units'])
54
+        self.d.add(this_service['name'], units=this_service['units'],
55
+                   constraints=this_service.get('constraints'))
55 56
 
56 57
         for svc in other_services:
57 58
             if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
64 65
             if 'units' not in svc:
65 66
                 svc['units'] = 1
66 67
 
67
-            self.d.add(svc['name'], charm=branch_location, units=svc['units'])
68
+            self.d.add(svc['name'], charm=branch_location, units=svc['units'],
69
+                       constraints=svc.get('constraints'))
68 70
 
69 71
     def _add_relations(self, relations):
70 72
         """Add all of the relations for the services."""

+ 381
- 85
hooks/charmhelpers/contrib/amulet/utils.py View File

@@ -14,17 +14,25 @@
14 14
 # You should have received a copy of the GNU Lesser General Public License
15 15
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16 16
 
17
-import amulet
18
-import ConfigParser
19
-import distro_info
20 17
 import io
18
+import json
21 19
 import logging
22 20
 import os
23 21
 import re
24
-import six
22
+import socket
23
+import subprocess
25 24
 import sys
26 25
 import time
27
-import urlparse
26
+import uuid
27
+
28
+import amulet
29
+import distro_info
30
+import six
31
+from six.moves import configparser
32
+if six.PY3:
33
+    from urllib import parse as urlparse
34
+else:
35
+    import urlparse
28 36
 
29 37
 
30 38
 class AmuletUtils(object):
@@ -108,7 +116,7 @@ class AmuletUtils(object):
108 116
         # /!\ DEPRECATION WARNING (beisner):
109 117
         # New and existing tests should be rewritten to use
110 118
         # validate_services_by_name() as it is aware of init systems.
111
-        self.log.warn('/!\\ DEPRECATION WARNING:  use '
119
+        self.log.warn('DEPRECATION WARNING:  use '
112 120
                       'validate_services_by_name instead of validate_services '
113 121
                       'due to init system differences.')
114 122
 
@@ -142,19 +150,23 @@ class AmuletUtils(object):
142 150
 
143 151
             for service_name in services_list:
144 152
                 if (self.ubuntu_releases.index(release) >= systemd_switch or
145
-                        service_name == "rabbitmq-server"):
146
-                    # init is systemd
153
+                        service_name in ['rabbitmq-server', 'apache2']):
154
+                    # init is systemd (or regular sysv)
147 155
                     cmd = 'sudo service {} status'.format(service_name)
156
+                    output, code = sentry_unit.run(cmd)
157
+                    service_running = code == 0
148 158
                 elif self.ubuntu_releases.index(release) < systemd_switch:
149 159
                     # init is upstart
150 160
                     cmd = 'sudo status {}'.format(service_name)
161
+                    output, code = sentry_unit.run(cmd)
162
+                    service_running = code == 0 and "start/running" in output
151 163
 
152
-                output, code = sentry_unit.run(cmd)
153 164
                 self.log.debug('{} `{}` returned '
154 165
                                '{}'.format(sentry_unit.info['unit_name'],
155 166
                                            cmd, code))
156
-                if code != 0:
157
-                    return "command `{}` returned {}".format(cmd, str(code))
167
+                if not service_running:
168
+                    return u"command `{}` returned {} {}".format(
169
+                        cmd, output, str(code))
158 170
         return None
159 171
 
160 172
     def _get_config(self, unit, filename):
@@ -164,7 +176,7 @@ class AmuletUtils(object):
164 176
         # NOTE(beisner):  by default, ConfigParser does not handle options
165 177
         # with no value, such as the flags used in the mysql my.cnf file.
166 178
         # https://bugs.python.org/issue7005
167
-        config = ConfigParser.ConfigParser(allow_no_value=True)
179
+        config = configparser.ConfigParser(allow_no_value=True)
168 180
         config.readfp(io.StringIO(file_contents))
169 181
         return config
170 182
 
@@ -259,33 +271,52 @@ class AmuletUtils(object):
259 271
         """Get last modification time of directory."""
260 272
         return sentry_unit.directory_stat(directory)['mtime']
261 273
 
262
-    def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
263
-        """Get process' start time.
274
+    def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
275
+        """Get start time of a process based on the last modification time
276
+           of the /proc/pid directory.
264 277
 
265
-           Determine start time of the process based on the last modification
266
-           time of the /proc/pid directory. If pgrep_full is True, the process
267
-           name is matched against the full command line.
268
-           """
269
-        if pgrep_full:
270
-            cmd = 'pgrep -o -f {}'.format(service)
271
-        else:
272
-            cmd = 'pgrep -o {}'.format(service)
273
-        cmd = cmd + '  | grep  -v pgrep || exit 0'
274
-        cmd_out = sentry_unit.run(cmd)
275
-        self.log.debug('CMDout: ' + str(cmd_out))
276
-        if cmd_out[0]:
277
-            self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
278
-            proc_dir = '/proc/{}'.format(cmd_out[0].strip())
279
-            return self._get_dir_mtime(sentry_unit, proc_dir)
278
+        :sentry_unit:  The sentry unit to check for the service on
279
+        :service:  service name to look for in process table
280
+        :pgrep_full:  [Deprecated] Use full command line search mode with pgrep
281
+        :returns:  epoch time of service process start
282
+        :param commands:  list of bash commands
283
+        :param sentry_units:  list of sentry unit pointers
284
+        :returns:  None if successful; Failure message otherwise
285
+        """
286
+        if pgrep_full is not None:
287
+            # /!\ DEPRECATION WARNING (beisner):
288
+            # No longer implemented, as pidof is now used instead of pgrep.
289
+            # https://bugs.launchpad.net/charm-helpers/+bug/1474030
290
+            self.log.warn('DEPRECATION WARNING:  pgrep_full bool is no '
291
+                          'longer implemented re: lp 1474030.')
292
+
293
+        pid_list = self.get_process_id_list(sentry_unit, service)
294
+        pid = pid_list[0]
295
+        proc_dir = '/proc/{}'.format(pid)
296
+        self.log.debug('Pid for {} on {}: {}'.format(
297
+            service, sentry_unit.info['unit_name'], pid))
298
+
299
+        return self._get_dir_mtime(sentry_unit, proc_dir)
280 300
 
281 301
     def service_restarted(self, sentry_unit, service, filename,
282
-                          pgrep_full=False, sleep_time=20):
302
+                          pgrep_full=None, sleep_time=20):
283 303
         """Check if service was restarted.
284 304
 
285 305
            Compare a service's start time vs a file's last modification time
286 306
            (such as a config file for that service) to determine if the service
287 307
            has been restarted.
288 308
            """
309
+        # /!\ DEPRECATION WARNING (beisner):
310
+        # This method is prone to races in that no before-time is known.
311
+        # Use validate_service_config_changed instead.
312
+
313
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
314
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
315
+        # deprecation WARNS.  lp1474030
316
+        self.log.warn('DEPRECATION WARNING:  use '
317
+                      'validate_service_config_changed instead of '
318
+                      'service_restarted due to known races.')
319
+
289 320
         time.sleep(sleep_time)
290 321
         if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
291 322
                 self._get_file_mtime(sentry_unit, filename)):
@@ -294,78 +325,122 @@ class AmuletUtils(object):
294 325
             return False
295 326
 
296 327
     def service_restarted_since(self, sentry_unit, mtime, service,
297
-                                pgrep_full=False, sleep_time=20,
298
-                                retry_count=2):
328
+                                pgrep_full=None, sleep_time=20,
329
+                                retry_count=30, retry_sleep_time=10):
299 330
         """Check if service was been started after a given time.
300 331
 
301 332
         Args:
302 333
           sentry_unit (sentry): The sentry unit to check for the service on
303 334
           mtime (float): The epoch time to check against
304 335
           service (string): service name to look for in process table
305
-          pgrep_full (boolean): Use full command line search mode with pgrep
306
-          sleep_time (int): Seconds to sleep before looking for process
307
-          retry_count (int): If service is not found, how many times to retry
336
+          pgrep_full: [Deprecated] Use full command line search mode with pgrep
337
+          sleep_time (int): Initial sleep time (s) before looking for file
338
+          retry_sleep_time (int): Time (s) to sleep between retries
339
+          retry_count (int): If file is not found, how many times to retry
308 340
 
309 341
         Returns:
310 342
           bool: True if service found and its start time it newer than mtime,
311 343
                 False if service is older than mtime or if service was
312 344
                 not found.
313 345
         """
314
-        self.log.debug('Checking %s restarted since %s' % (service, mtime))
346
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
347
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
348
+        # deprecation WARNS.  lp1474030
349
+
350
+        unit_name = sentry_unit.info['unit_name']
351
+        self.log.debug('Checking that %s service restarted since %s on '
352
+                       '%s' % (service, mtime, unit_name))
315 353
         time.sleep(sleep_time)
316
-        proc_start_time = self._get_proc_start_time(sentry_unit, service,
317
-                                                    pgrep_full)
318
-        while retry_count > 0 and not proc_start_time:
319
-            self.log.debug('No pid file found for service %s, will retry %i '
320
-                           'more times' % (service, retry_count))
321
-            time.sleep(30)
322
-            proc_start_time = self._get_proc_start_time(sentry_unit, service,
323
-                                                        pgrep_full)
324
-            retry_count = retry_count - 1
354
+        proc_start_time = None
355
+        tries = 0
356
+        while tries <= retry_count and not proc_start_time:
357
+            try:
358
+                proc_start_time = self._get_proc_start_time(sentry_unit,
359
+                                                            service,
360
+                                                            pgrep_full)
361
+                self.log.debug('Attempt {} to get {} proc start time on {} '
362
+                               'OK'.format(tries, service, unit_name))
363
+            except IOError as e:
364
+                # NOTE(beisner) - race avoidance, proc may not exist yet.
365
+                # https://bugs.launchpad.net/charm-helpers/+bug/1474030
366
+                self.log.debug('Attempt {} to get {} proc start time on {} '
367
+                               'failed\n{}'.format(tries, service,
368
+                                                   unit_name, e))
369
+                time.sleep(retry_sleep_time)
370
+                tries += 1
325 371
 
326 372
         if not proc_start_time:
327 373
             self.log.warn('No proc start time found, assuming service did '
328 374
                           'not start')
329 375
             return False
330 376
         if proc_start_time >= mtime:
331
-            self.log.debug('proc start time is newer than provided mtime'
332
-                           '(%s >= %s)' % (proc_start_time, mtime))
377
+            self.log.debug('Proc start time is newer than provided mtime'
378
+                           '(%s >= %s) on %s (OK)' % (proc_start_time,
379
+                                                      mtime, unit_name))
333 380
             return True
334 381
         else:
335
-            self.log.warn('proc start time (%s) is older than provided mtime '
336
-                          '(%s), service did not restart' % (proc_start_time,
337
-                                                             mtime))
382
+            self.log.warn('Proc start time (%s) is older than provided mtime '
383
+                          '(%s) on %s, service did not '
384
+                          'restart' % (proc_start_time, mtime, unit_name))
338 385
             return False
339 386
 
340 387
     def config_updated_since(self, sentry_unit, filename, mtime,
341
-                             sleep_time=20):
388
+                             sleep_time=20, retry_count=30,
389
+                             retry_sleep_time=10):
342 390
         """Check if file was modified after a given time.
343 391
 
344 392
         Args:
345 393
           sentry_unit (sentry): The sentry unit to check the file mtime on
346 394
           filename (string): The file to check mtime of
347 395
           mtime (float): The epoch time to check against
348
-          sleep_time (int): Seconds to sleep before looking for process
396
+          sleep_time (int): Initial sleep time (s) before looking for file
397
+          retry_sleep_time (int): Time (s) to sleep between retries
398
+          retry_count (int): If file is not found, how many times to retry
349 399
 
350 400
         Returns:
351 401
           bool: True if file was modified more recently than mtime, False if
352
-                file was modified before mtime,
402
+                file was modified before mtime, or if file not found.
353 403
         """
354
-        self.log.debug('Checking %s updated since %s' % (filename, mtime))
404
+        unit_name = sentry_unit.info['unit_name']
405
+        self.log.debug('Checking that %s updated since %s on '
406
+                       '%s' % (filename, mtime, unit_name))
355 407
         time.sleep(sleep_time)
356
-        file_mtime = self._get_file_mtime(sentry_unit, filename)
408
+        file_mtime = None
409
+        tries = 0
410
+        while tries <= retry_count and not file_mtime:
411
+            try:
412
+                file_mtime = self._get_file_mtime(sentry_unit, filename)
413
+                self.log.debug('Attempt {} to get {} file mtime on {} '
414
+                               'OK'.format(tries, filename, unit_name))
415
+            except IOError as e:
416
+                # NOTE(beisner) - race avoidance, file may not exist yet.
417
+                # https://bugs.launchpad.net/charm-helpers/+bug/1474030
418
+                self.log.debug('Attempt {} to get {} file mtime on {} '
419
+                               'failed\n{}'.format(tries, filename,
420
+                                                   unit_name, e))
421
+                time.sleep(retry_sleep_time)
422
+                tries += 1
423
+
424
+        if not file_mtime:
425
+            self.log.warn('Could not determine file mtime, assuming '
426
+                          'file does not exist')
427
+            return False
428
+
357 429
         if file_mtime >= mtime:
358 430
             self.log.debug('File mtime is newer than provided mtime '
359
-                           '(%s >= %s)' % (file_mtime, mtime))
431
+                           '(%s >= %s) on %s (OK)' % (file_mtime,
432
+                                                      mtime, unit_name))
360 433
             return True
361 434
         else:
362
-            self.log.warn('File mtime %s is older than provided mtime %s'
363
-                          % (file_mtime, mtime))
435
+            self.log.warn('File mtime is older than provided mtime'
436
+                          '(%s < on %s) on %s' % (file_mtime,
437
+                                                  mtime, unit_name))
364 438
             return False
365 439
 
366 440
     def validate_service_config_changed(self, sentry_unit, mtime, service,
367
-                                        filename, pgrep_full=False,
368
-                                        sleep_time=20, retry_count=2):
441
+                                        filename, pgrep_full=None,
442
+                                        sleep_time=20, retry_count=30,
443
+                                        retry_sleep_time=10):
369 444
         """Check service and file were updated after mtime
370 445
 
371 446
         Args:
@@ -373,9 +448,10 @@ class AmuletUtils(object):
373 448
           mtime (float): The epoch time to check against
374 449
           service (string): service name to look for in process table
375 450
           filename (string): The file to check mtime of
376
-          pgrep_full (boolean): Use full command line search mode with pgrep
377
-          sleep_time (int): Seconds to sleep before looking for process
451
+          pgrep_full: [Deprecated] Use full command line search mode with pgrep
452
+          sleep_time (int): Initial sleep in seconds to pass to test helpers
378 453
           retry_count (int): If service is not found, how many times to retry
454
+          retry_sleep_time (int): Time in seconds to wait between retries
379 455
 
380 456
         Typical Usage:
381 457
             u = OpenStackAmuletUtils(ERROR)
@@ -392,15 +468,27 @@ class AmuletUtils(object):
392 468
                 mtime, False if service is older than mtime or if service was
393 469
                 not found or if filename was modified before mtime.
394 470
         """
395
-        self.log.debug('Checking %s restarted since %s' % (service, mtime))
396
-        time.sleep(sleep_time)
397
-        service_restart = self.service_restarted_since(sentry_unit, mtime,
398
-                                                       service,
399
-                                                       pgrep_full=pgrep_full,
400
-                                                       sleep_time=0,
401
-                                                       retry_count=retry_count)
402
-        config_update = self.config_updated_since(sentry_unit, filename, mtime,
403
-                                                  sleep_time=0)
471
+
472
+        # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
473
+        # used instead of pgrep.  pgrep_full is still passed through to ensure
474
+        # deprecation WARNS.  lp1474030
475
+
476
+        service_restart = self.service_restarted_since(
477
+            sentry_unit, mtime,
478
+            service,
479
+            pgrep_full=pgrep_full,
480
+            sleep_time=sleep_time,
481
+            retry_count=retry_count,
482
+            retry_sleep_time=retry_sleep_time)
483
+
484
+        config_update = self.config_updated_since(
485
+            sentry_unit,
486
+            filename,
487
+            mtime,
488
+            sleep_time=sleep_time,
489
+            retry_count=retry_count,
490
+            retry_sleep_time=retry_sleep_time)
491
+
404 492
         return service_restart and config_update
405 493
 
406 494
     def get_sentry_time(self, sentry_unit):
@@ -418,7 +506,6 @@ class AmuletUtils(object):
418 506
         """Return a list of all Ubuntu releases in order of release."""
419 507
         _d = distro_info.UbuntuDistroInfo()
420 508
         _release_list = _d.all
421
-        self.log.debug('Ubuntu release list: {}'.format(_release_list))
422 509
         return _release_list
423 510
 
424 511
     def file_to_url(self, file_rel_path):
@@ -450,15 +537,20 @@ class AmuletUtils(object):
450 537
                                         cmd, code, output))
451 538
         return None
452 539
 
453
-    def get_process_id_list(self, sentry_unit, process_name):
540
+    def get_process_id_list(self, sentry_unit, process_name,
541
+                            expect_success=True):
454 542
         """Get a list of process ID(s) from a single sentry juju unit
455 543
         for a single process name.
456 544
 
457
-        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
545
+        :param sentry_unit: Amulet sentry instance (juju unit)
458 546
         :param process_name: Process name
547
+        :param expect_success: If False, expect the PID to be missing,
548
+            raise if it is present.
459 549
         :returns: List of process IDs
460 550
         """
461
-        cmd = 'pidof {}'.format(process_name)
551
+        cmd = 'pidof -x {}'.format(process_name)
552
+        if not expect_success:
553
+            cmd += " || exit 0 && exit 1"
462 554
         output, code = sentry_unit.run(cmd)
463 555
         if code != 0:
464 556
             msg = ('{} `{}` returned {} '
@@ -467,14 +559,23 @@ class AmuletUtils(object):
467 559
             amulet.raise_status(amulet.FAIL, msg=msg)
468 560
         return str(output).split()
469 561
 
470
-    def get_unit_process_ids(self, unit_processes):
562
+    def get_unit_process_ids(self, unit_processes, expect_success=True):
471 563
         """Construct a dict containing unit sentries, process names, and
472
-        process IDs."""
564
+        process IDs.
565
+
566
+        :param unit_processes: A dictionary of Amulet sentry instance
567
+            to list of process names.
568
+        :param expect_success: if False expect the processes to not be
569
+            running, raise if they are.
570
+        :returns: Dictionary of Amulet sentry instance to dictionary
571
+            of process names to PIDs.
572
+        """
473 573
         pid_dict = {}
474
-        for sentry_unit, process_list in unit_processes.iteritems():
574
+        for sentry_unit, process_list in six.iteritems(unit_processes):
475 575
             pid_dict[sentry_unit] = {}
476 576
             for process in process_list:
477
-                pids = self.get_process_id_list(sentry_unit, process)
577
+                pids = self.get_process_id_list(
578
+                    sentry_unit, process, expect_success=expect_success)
478 579
                 pid_dict[sentry_unit].update({process: pids})
479 580
         return pid_dict
480 581
 
@@ -488,7 +589,7 @@ class AmuletUtils(object):
488 589
             return ('Unit count mismatch.  expected, actual: {}, '
489 590
                     '{} '.format(len(expected), len(actual)))
490 591
 
491
-        for (e_sentry, e_proc_names) in expected.iteritems():
592
+        for (e_sentry, e_proc_names) in six.iteritems(expected):
492 593
             e_sentry_name = e_sentry.info['unit_name']
493 594
             if e_sentry in actual.keys():
494 595
                 a_proc_names = actual[e_sentry]
@@ -500,22 +601,40 @@ class AmuletUtils(object):
500 601
                 return ('Process name count mismatch.  expected, actual: {}, '
501 602
                         '{}'.format(len(expected), len(actual)))
502 603
 
503
-            for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
604
+            for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
504 605
                     zip(e_proc_names.items(), a_proc_names.items()):
505 606
                 if e_proc_name != a_proc_name:
506 607
                     return ('Process name mismatch.  expected, actual: {}, '
507 608
                             '{}'.format(e_proc_name, a_proc_name))
508 609
 
509 610
                 a_pids_length = len(a_pids)
510
-                if e_pids_length != a_pids_length:
511
-                    return ('PID count mismatch. {} ({}) expected, actual: '
611
+                fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
512 612
                             '{}, {} ({})'.format(e_sentry_name, e_proc_name,
513
-                                                 e_pids_length, a_pids_length,
613
+                                                 e_pids, a_pids_length,
514 614
                                                  a_pids))
615
+
616
+                # If expected is a list, ensure at least one PID quantity match
617
+                if isinstance(e_pids, list) and \
618
+                        a_pids_length not in e_pids:
619
+                    return fail_msg
620
+                # If expected is not bool and not list,
621
+                # ensure PID quantities match
622
+                elif not isinstance(e_pids, bool) and \
623
+                        not isinstance(e_pids, list) and \
624
+                        a_pids_length != e_pids:
625
+                    return fail_msg
626
+                # If expected is bool True, ensure 1 or more PIDs exist
627
+                elif isinstance(e_pids, bool) and \
628
+                        e_pids is True and a_pids_length < 1:
629
+                    return fail_msg
630
+                # If expected is bool False, ensure 0 PIDs exist
631
+                elif isinstance(e_pids, bool) and \
632
+                        e_pids is False and a_pids_length != 0:
633
+                    return fail_msg
515 634
                 else:
516 635
                     self.log.debug('PID check OK: {} {} {}: '
517 636
                                    '{}'.format(e_sentry_name, e_proc_name,
518
-                                               e_pids_length, a_pids))
637
+                                               e_pids, a_pids))
519 638
         return None
520 639
 
521 640
     def validate_list_of_identical_dicts(self, list_of_dicts):
@@ -531,3 +650,180 @@ class AmuletUtils(object):
531 650
             return 'Dicts within list are not identical'
532 651
 
533 652
         return None
653
+
654
+    def validate_sectionless_conf(self, file_contents, expected):
655
+        """A crude conf parser.  Useful to inspect configuration files which
656
+        do not have section headers (as would be necessary in order to use
657
+        the configparser).  Such as openstack-dashboard or rabbitmq confs."""
658
+        for line in file_contents.split('\n'):
659
+            if '=' in line:
660
+                args = line.split('=')
661
+                if len(args) <= 1:
662
+                    continue
663
+                key = args[0].strip()
664
+                value = args[1].strip()
665
+                if key in expected.keys():
666
+                    if expected[key] != value:
667
+                        msg = ('Config mismatch.  Expected, actual:  {}, '
668
+                               '{}'.format(expected[key], value))
669
+                        amulet.raise_status(amulet.FAIL, msg=msg)
670
+
671
+    def get_unit_hostnames(self, units):
672
+        """Return a dict of juju unit names to hostnames."""
673
+        host_names = {}
674
+        for unit in units:
675
+            host_names[unit.info['unit_name']] = \
676
+                str(unit.file_contents('/etc/hostname').strip())
677
+        self.log.debug('Unit host names: {}'.format(host_names))
678
+        return host_names
679
+
680
+    def run_cmd_unit(self, sentry_unit, cmd):
681
+        """Run a command on a unit, return the output and exit code."""
682
+        output, code = sentry_unit.run(cmd)
683
+        if code == 0:
684
+            self.log.debug('{} `{}` command returned {} '
685
+                           '(OK)'.format(sentry_unit.info['unit_name'],
686
+                                         cmd, code))
687
+        else:
688
+            msg = ('{} `{}` command returned {} '
689
+                   '{}'.format(sentry_unit.info['unit_name'],
690
+                               cmd, code, output))
691
+            amulet.raise_status(amulet.FAIL, msg=msg)
692
+        return str(output), code
693
+
694
+    def file_exists_on_unit(self, sentry_unit, file_name):
695
+        """Check if a file exists on a unit."""
696
+        try:
697
+            sentry_unit.file_stat(file_name)
698
+            return True
699
+        except IOError:
700
+            return False
701
+        except Exception as e:
702
+            msg = 'Error checking file {}: {}'.format(file_name, e)
703
+            amulet.raise_status(amulet.FAIL, msg=msg)
704
+
705
+    def file_contents_safe(self, sentry_unit, file_name,
706
+                           max_wait=60, fatal=False):
707
+        """Get file contents from a sentry unit.  Wrap amulet file_contents
708
+        with retry logic to address races where a file checks as existing,
709
+        but no longer exists by the time file_contents is called.
710
+        Return None if file not found. Optionally raise if fatal is True."""
711
+        unit_name = sentry_unit.info['unit_name']
712
+        file_contents = False
713
+        tries = 0
714
+        while not file_contents and tries < (max_wait / 4):
715
+            try:
716
+                file_contents = sentry_unit.file_contents(file_name)
717
+            except IOError:
718
+                self.log.debug('Attempt {} to open file {} from {} '
719
+                               'failed'.format(tries, file_name,
720
+                                               unit_name))
721
+                time.sleep(4)
722
+                tries += 1
723
+
724
+        if file_contents:
725
+            return file_contents
726
+        elif not fatal:
727
+            return None
728
+        elif fatal:
729
+            msg = 'Failed to get file contents from unit.'
730
+            amulet.raise_status(amulet.FAIL, msg)
731
+
732
+    def port_knock_tcp(self, host="localhost", port=22, timeout=15):
733
+        """Open a TCP socket to check for a listening sevice on a host.
734
+
735
+        :param host: host name or IP address, default to localhost
736
+        :param port: TCP port number, default to 22
737
+        :param timeout: Connect timeout, default to 15 seconds
738
+        :returns: True if successful, False if connect failed
739
+        """
740
+
741
+        # Resolve host name if possible
742
+        try:
743
+            connect_host = socket.gethostbyname(host)
744
+            host_human = "{} ({})".format(connect_host, host)
745
+        except socket.error as e:
746
+            self.log.warn('Unable to resolve address: '
747
+                          '{} ({}) Trying anyway!'.format(host, e))
748
+            connect_host = host
749
+            host_human = connect_host
750
+
751
+        # Attempt socket connection
752
+        try:
753
+            knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
754
+            knock.settimeout(timeout)
755
+            knock.connect((connect_host, port))
756
+            knock.close()
757
+            self.log.debug('Socket connect OK for host '
758
+                           '{} on port {}.'.format(host_human, port))
759
+            return True
760
+        except socket.error as e:
761
+            self.log.debug('Socket connect FAIL for'
762
+                           ' {} port {} ({})'.format(host_human, port, e))
763
+            return False
764
+
765
+    def port_knock_units(self, sentry_units, port=22,
766
+                         timeout=15, expect_success=True):
767
+        """Open a TCP socket to check for a listening sevice on each
768
+        listed juju unit.
769
+
770
+        :param sentry_units: list of sentry unit pointers
771
+        :param port: TCP port number, default to 22
772
+        :param timeout: Connect timeout, default to 15 seconds
773
+        :expect_success: True by default, set False to invert logic
774
+        :returns: None if successful, Failure message otherwise
775
+        """
776
+        for unit in sentry_units:
777
+            host = unit.info['public-address']
778
+            connected = self.port_knock_tcp(host, port, timeout)
779
+            if not connected and expect_success:
780
+                return 'Socket connect failed.'
781
+            elif connected and not expect_success:
782
+                return 'Socket connected unexpectedly.'
783
+
784
+    def get_uuid_epoch_stamp(self):
785
+        """Returns a stamp string based on uuid4 and epoch time.  Useful in
786
+        generating test messages which need to be unique-ish."""
787
+        return '[{}-{}]'.format(uuid.uuid4(), time.time())
788
+
789
+# amulet juju action helpers:
790
+    def run_action(self, unit_sentry, action,
791
+                   _check_output=subprocess.check_output,
792
+                   params=None):
793
+        """Run the named action on a given unit sentry.
794
+
795
+        params a dict of parameters to use
796
+        _check_output parameter is used for dependency injection.
797
+
798
+        @return action_id.
799
+        """
800
+        unit_id = unit_sentry.info["unit_name"]
801
+        command = ["juju", "action", "do", "--format=json", unit_id, action]
802
+        if params is not None:
803
+            for key, value in params.iteritems():
804
+                command.append("{}={}".format(key, value))
805
+        self.log.info("Running command: %s\n" % " ".join(command))
806
+        output = _check_output(command, universal_newlines=True)
807
+        data = json.loads(output)
808
+        action_id = data[u'Action queued with id']
809
+        return action_id
810
+
811
+    def wait_on_action(self, action_id, _check_output=subprocess.check_output):
812
+        """Wait for a given action, returning if it completed or not.
813
+
814
+        _check_output parameter is used for dependency injection.
815
+        """
816
+        command = ["juju", "action", "fetch", "--format=json", "--wait=0",
817
+                   action_id]
818
+        output = _check_output(command, universal_newlines=True)
819
+        data = json.loads(output)
820
+        return data.get(u"status") == "completed"
821
+
822
+    def status_get(self, unit):
823
+        """Return the current service status of this unit."""
824
+        raw_status, return_code = unit.run(
825
+            "status-get --format=json --include-data")
826
+        if return_code != 0:
827
+            return ("unknown", "")
828
+        status = json.loads(raw_status)
829
+        return (status["status"], status["message"])

+ 0
- 254
hooks/charmhelpers/contrib/ansible/__init__.py View File

@@ -1,254 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-# Copyright 2013 Canonical Ltd.
18
-#
19
-# Authors:
20
-#  Charm Helpers Developers <juju@lists.ubuntu.com>
21
-"""Charm Helpers ansible - declare the state of your machines.
22
-
23
-This helper enables you to declare your machine state, rather than
24
-program it procedurally (and have to test each change to your procedures).
25
-Your install hook can be as simple as::
26
-
27
-    {{{
28
-    import charmhelpers.contrib.ansible
29
-
30
-
31
-    def install():
32
-        charmhelpers.contrib.ansible.install_ansible_support()
33
-        charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
34
-    }}}
35
-
36
-and won't need to change (nor will its tests) when you change the machine
37
-state.
38
-
39
-All of your juju config and relation-data are available as template
40
-variables within your playbooks and templates. An install playbook looks
41
-something like::
42
-
43
-    {{{
44
-    ---
45
-    - hosts: localhost
46
-      user: root
47
-
48
-      tasks:
49
-        - name: Add private repositories.
50
-          template:
51
-            src: ../templates/private-repositories.list.jinja2
52
-            dest: /etc/apt/sources.list.d/private.list
53
-
54
-        - name: Update the cache.
55
-          apt: update_cache=yes
56
-
57
-        - name: Install dependencies.
58
-          apt: pkg={{ item }}
59
-          with_items:
60
-            - python-mimeparse
61
-            - python-webob
62
-            - sunburnt
63
-
64
-        - name: Setup groups.
65
-          group: name={{ item.name }} gid={{ item.gid }}
66
-          with_items:
67
-            - { name: 'deploy_user', gid: 1800 }
68
-            - { name: 'service_user', gid: 1500 }
69
-
70
-      ...
71
-    }}}
72
-
73
-Read more online about `playbooks`_ and standard ansible `modules`_.
74
-
75
-.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
76
-.. _modules: http://www.ansibleworks.com/docs/modules.html
77
-
78
-A further feature os the ansible hooks is to provide a light weight "action"
79
-scripting tool. This is a decorator that you apply to a function, and that
80
-function can now receive cli args, and can pass extra args to the playbook.
81
-
82
-e.g.
83
-
84
-
85
-@hooks.action()
86
-def some_action(amount, force="False"):
87
-    "Usage: some-action AMOUNT [force=True]"  # <-- shown on error
88
-    # process the arguments
89
-    # do some calls
90
-    # return extra-vars to be passed to ansible-playbook
91
-    return {
92
-        'amount': int(amount),
93
-        'type': force,
94
-    }
95
-
96
-You can now create a symlink to hooks.py that can be invoked like a hook, but
97
-with cli params:
98
-
99
-# link actions/some-action to hooks/hooks.py
100
-
101
-actions/some-action amount=10 force=true
102
-
103
-"""
104
-import os
105
-import stat
106
-import subprocess
107
-import functools
108
-
109
-import charmhelpers.contrib.templating.contexts
110
-import charmhelpers.core.host
111
-import charmhelpers.core.hookenv
112
-import charmhelpers.fetch
113
-
114
-
115
-charm_dir = os.environ.get('CHARM_DIR', '')
116
-ansible_hosts_path = '/etc/ansible/hosts'
117
-# Ansible will automatically include any vars in the following
118
-# file in its inventory when run locally.
119
-ansible_vars_path = '/etc/ansible/host_vars/localhost'
120
-
121
-
122
-def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
123
-    """Installs the ansible package.
124
-
125
-    By default it is installed from the `PPA`_ linked from
126
-    the ansible `website`_ or from a ppa specified by a charm config..
127
-
128
-    .. _PPA: https://launchpad.net/~rquillo/+archive/ansible
129
-    .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
130
-
131
-    If from_ppa is empty, you must ensure that the package is available
132
-    from a configured repository.
133
-    """
134
-    if from_ppa:
135
-        charmhelpers.fetch.add_source(ppa_location)
136
-        charmhelpers.fetch.apt_update(fatal=True)
137
-    charmhelpers.fetch.apt_install('ansible')
138
-    with open(ansible_hosts_path, 'w+') as hosts_file:
139
-        hosts_file.write('localhost ansible_connection=local')
140
-
141
-
142
-def apply_playbook(playbook, tags=None, extra_vars=None):
143
-    tags = tags or []
144
-    tags = ",".join(tags)
145
-    charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
146
-        ansible_vars_path, namespace_separator='__',
147
-        allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
148
-
149
-    # we want ansible's log output to be unbuffered
150
-    env = os.environ.copy()
151
-    env['PYTHONUNBUFFERED'] = "1"
152
-    call = [
153
-        'ansible-playbook',
154
-        '-c',
155
-        'local',
156
-        playbook,
157
-    ]
158
-    if tags:
159
-        call.extend(['--tags', '{}'.format(tags)])
160
-    if extra_vars:
161
-        extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
162
-        call.extend(['--extra-vars', " ".join(extra)])
163
-    subprocess.check_call(call, env=env)
164
-
165
-
166
-class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
167
-    """Run a playbook with the hook-name as the tag.
168
-
169
-    This helper builds on the standard hookenv.Hooks helper,
170
-    but additionally runs the playbook with the hook-name specified
171
-    using --tags (ie. running all the tasks tagged with the hook-name).
172
-
173
-    Example::
174
-
175
-        hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
176
-
177
-        # All the tasks within my_machine_state.yaml tagged with 'install'
178
-        # will be run automatically after do_custom_work()
179
-        @hooks.hook()
180
-        def install():
181
-            do_custom_work()
182
-
183
-        # For most of your hooks, you won't need to do anything other
184
-        # than run the tagged tasks for the hook:
185
-        @hooks.hook('config-changed', 'start', 'stop')
186
-        def just_use_playbook():
187
-            pass
188
-
189
-        # As a convenience, you can avoid the above noop function by specifying
190
-        # the hooks which are handled by ansible-only and they'll be registered
191
-        # for you:
192
-        # hooks = AnsibleHooks(
193
-        #     'playbooks/my_machine_state.yaml',
194
-        #     default_hooks=['config-changed', 'start', 'stop'])
195
-
196
-        if __name__ == "__main__":
197
-            # execute a hook based on the name the program is called by
198
-            hooks.execute(sys.argv)
199
-
200
-    """
201
-
202
-    def __init__(self, playbook_path, default_hooks=None):
203
-        """Register any hooks handled by ansible."""
204
-        super(AnsibleHooks, self).__init__()
205
-
206
-        self._actions = {}
207
-        self.playbook_path = playbook_path
208
-
209
-        default_hooks = default_hooks or []
210
-
211
-        def noop(*args, **kwargs):
212
-            pass
213
-
214
-        for hook in default_hooks:
215
-            self.register(hook, noop)
216
-
217
-    def register_action(self, name, function):
218
-        """Register a hook"""
219
-        self._actions[name] = function
220
-
221
-    def execute(self, args):
222
-        """Execute the hook followed by the playbook using the hook as tag."""
223
-        hook_name = os.path.basename(args[0])
224
-        extra_vars = None
225
-        if hook_name in self._actions:
226
-            extra_vars = self._actions[hook_name](args[1:])
227
-        else:
228
-            super(AnsibleHooks, self).execute(args)
229
-
230
-        charmhelpers.contrib.ansible.apply_playbook(
231
-            self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
232
-
233
-    def action(self, *action_names):
234
-        """Decorator, registering them as actions"""
235
-        def action_wrapper(decorated):
236
-
237
-            @functools.wraps(decorated)
238
-            def wrapper(argv):
239
-                kwargs = dict(arg.split('=') for arg in argv)
240
-                try:
241
-                    return decorated(**kwargs)
242
-                except TypeError as e:
243
-                    if decorated.__doc__:
244
-                        e.args += (decorated.__doc__,)
245
-                    raise
246
-
247
-            self.register_action(decorated.__name__, wrapper)
248
-            if '_' in decorated.__name__:
249
-                self.register_action(
250
-                    decorated.__name__.replace('_', '-'), wrapper)
251
-
252
-            return wrapper
253
-
254
-        return action_wrapper

+ 0
- 126
hooks/charmhelpers/contrib/benchmark/__init__.py View File

@@ -1,126 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-import subprocess
18
-import time
19
-import os
20
-from distutils.spawn import find_executable
21
-
22
-from charmhelpers.core.hookenv import (
23
-    in_relation_hook,
24
-    relation_ids,
25
-    relation_set,
26
-    relation_get,
27
-)
28
-
29
-
30
-def action_set(key, val):
31
-    if find_executable('action-set'):
32
-        action_cmd = ['action-set']
33
-
34
-        if isinstance(val, dict):
35
-            for k, v in iter(val.items()):
36
-                action_set('%s.%s' % (key, k), v)
37
-            return True
38
-
39
-        action_cmd.append('%s=%s' % (key, val))
40
-        subprocess.check_call(action_cmd)
41
-        return True
42
-    return False
43
-
44
-
45
-class Benchmark():
46
-    """
47
-    Helper class for the `benchmark` interface.
48
-
49
-    :param list actions: Define the actions that are also benchmarks
50
-
51
-    From inside the benchmark-relation-changed hook, you would
52
-    Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
53
-
54
-    Examples:
55
-
56
-        siege = Benchmark(['siege'])
57
-        siege.start()
58
-        [... run siege ...]
59
-        # The higher the score, the better the benchmark
60
-        siege.set_composite_score(16.70, 'trans/sec', 'desc')
61
-        siege.finish()
62
-
63
-
64
-    """
65
-
66
-    BENCHMARK_CONF = '/etc/benchmark.conf'  # Replaced in testing
67
-
68
-    required_keys = [
69
-        'hostname',
70
-        'port',
71
-        'graphite_port',
72
-        'graphite_endpoint',
73
-        'api_port'
74
-    ]
75
-
76
-    def __init__(self, benchmarks=None):
77
-        if in_relation_hook():
78
-            if benchmarks is not None:
79
-                for rid in sorted(relation_ids('benchmark')):
80
-                    relation_set(relation_id=rid, relation_settings={
81
-                        'benchmarks': ",".join(benchmarks)
82
-                    })
83
-
84
-            # Check the relation data
85
-            config = {}
86
-            for key in self.required_keys:
87
-                val = relation_get(key)
88
-                if val is not None:
89
-                    config[key] = val
90
-                else:
91
-                    # We don't have all of the required keys
92
-                    config = {}
93
-                    break
94
-
95
-            if len(config):
96
-                with open(self.BENCHMARK_CONF, 'w') as f:
97
-                    for key, val in iter(config.items()):
98
-                        f.write("%s=%s\n" % (key, val))
99
-
100
-    @staticmethod
101
-    def start():
102
-        action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
103
-
104
-        """
105
-        If the collectd charm is also installed, tell it to send a snapshot
106
-        of the current profile data.
107
-        """
108
-        COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
109
-        if os.path.exists(COLLECT_PROFILE_DATA):
110
-            subprocess.check_output([COLLECT_PROFILE_DATA])
111
-
112
-    @staticmethod
113
-    def finish():
114
-        action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
115
-
116
-    @staticmethod
117
-    def set_composite_score(value, units, direction='asc'):
118
-        """
119
-        Set the composite score for a benchmark run. This is a single number
120
-        representative of the benchmark results. This could be the most
121
-        important metric, or an amalgamation of metric scores.
122
-        """
123
-        return action_set(
124
-            "meta.composite",
125
-            {'value': value, 'units': units, 'direction': direction}
126
-        )

+ 0
- 208
hooks/charmhelpers/contrib/charmhelpers/__init__.py View File

@@ -1,208 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-# Copyright 2012 Canonical Ltd.  This software is licensed under the
18
-# GNU Affero General Public License version 3 (see the file LICENSE).
19
-
20
-import warnings
21
-warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning)  # noqa
22
-
23
-import operator
24
-import tempfile
25
-import time
26
-import yaml
27
-import subprocess
28
-
29
-import six
30
-if six.PY3:
31
-    from urllib.request import urlopen
32
-    from urllib.error import (HTTPError, URLError)
33
-else:
34
-    from urllib2 import (urlopen, HTTPError, URLError)
35
-
36
-"""Helper functions for writing Juju charms in Python."""
37
-
38
-__metaclass__ = type
39
-__all__ = [
40
-    # 'get_config',             # core.hookenv.config()
41
-    # 'log',                    # core.hookenv.log()
42
-    # 'log_entry',              # core.hookenv.log()
43
-    # 'log_exit',               # core.hookenv.log()
44
-    # 'relation_get',           # core.hookenv.relation_get()
45
-    # 'relation_set',           # core.hookenv.relation_set()
46
-    # 'relation_ids',           # core.hookenv.relation_ids()
47
-    # 'relation_list',          # core.hookenv.relation_units()
48
-    # 'config_get',             # core.hookenv.config()
49
-    # 'unit_get',               # core.hookenv.unit_get()
50
-    # 'open_port',              # core.hookenv.open_port()
51
-    # 'close_port',             # core.hookenv.close_port()
52
-    # 'service_control',        # core.host.service()
53
-    'unit_info',              # client-side, NOT IMPLEMENTED
54
-    'wait_for_machine',       # client-side, NOT IMPLEMENTED
55
-    'wait_for_page_contents',  # client-side, NOT IMPLEMENTED
56
-    'wait_for_relation',      # client-side, NOT IMPLEMENTED
57
-    'wait_for_unit',          # client-side, NOT IMPLEMENTED
58
-]
59
-
60
-
61
-SLEEP_AMOUNT = 0.1
62
-
63
-
64
-# We create a juju_status Command here because it makes testing much,
65
-# much easier.
66
-def juju_status():
67
-    subprocess.check_call(['juju', 'status'])
68
-
69
-# re-implemented as charmhelpers.fetch.configure_sources()
70
-# def configure_source(update=False):
71
-#    source = config_get('source')
72
-#    if ((source.startswith('ppa:') or
73
-#         source.startswith('cloud:') or
74
-#         source.startswith('http:'))):
75
-#        run('add-apt-repository', source)
76
-#    if source.startswith("http:"):
77
-#        run('apt-key', 'import', config_get('key'))
78
-#    if update:
79
-#        run('apt-get', 'update')
80
-
81
-
82
-# DEPRECATED: client-side only
83
-def make_charm_config_file(charm_config):
84
-    charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
85
-    charm_config_file.write(yaml.dump(charm_config))
86
-    charm_config_file.flush()
87
-    # The NamedTemporaryFile instance is returned instead of just the name
88
-    # because we want to take advantage of garbage collection-triggered
89
-    # deletion of the temp file when it goes out of scope in the caller.
90
-    return charm_config_file
91
-
92
-
93
-# DEPRECATED: client-side only
94
-def unit_info(service_name, item_name, data=None, unit=None):
95
-    if data is None:
96
-        data = yaml.safe_load(juju_status())
97
-    service = data['services'].get(service_name)
98
-    if service is None:
99
-        # XXX 2012-02-08 gmb:
100
-        #     This allows us to cope with the race condition that we
101
-        #     have between deploying a service and having it come up in
102
-        #     `juju status`. We could probably do with cleaning it up so
103
-        #     that it fails a bit more noisily after a while.
104
-        return ''
105
-    units = service['units']
106
-    if unit is not None:
107
-        item = units[unit][item_name]
108
-    else:
109
-        # It might seem odd to sort the units here, but we do it to
110
-        # ensure that when no unit is specified, the first unit for the
111
-        # service (or at least the one with the lowest number) is the
112
-        # one whose data gets returned.
113
-        sorted_unit_names = sorted(units.keys())
114
-        item = units[sorted_unit_names[0]][item_name]
115
-    return item
116
-
117
-
118
-# DEPRECATED: client-side only
119
-def get_machine_data():
120
-    return yaml.safe_load(juju_status())['machines']
121
-
122
-
123
-# DEPRECATED: client-side only
124
-def wait_for_machine(num_machines=1, timeout=300):
125
-    """Wait `timeout` seconds for `num_machines` machines to come up.
126
-
127
-    This wait_for... function can be called by other wait_for functions
128
-    whose timeouts might be too short in situations where only a bare
129
-    Juju setup has been bootstrapped.
130
-
131
-    :return: A tuple of (num_machines, time_taken). This is used for
132
-             testing.
133
-    """
134
-    # You may think this is a hack, and you'd be right. The easiest way
135
-    # to tell what environment we're working in (LXC vs EC2) is to check
136
-    # the dns-name of the first machine. If it's localhost we're in LXC
137
-    # and we can just return here.
138
-    if get_machine_data()[0]['dns-name'] == 'localhost':
139
-        return 1, 0
140
-    start_time = time.time()
141
-    while True:
142
-        # Drop the first machine, since it's the Zookeeper and that's
143
-        # not a machine that we need to wait for. This will only work
144
-        # for EC2 environments, which is why we return early above if
145
-        # we're in LXC.
146
-        machine_data = get_machine_data()
147
-        non_zookeeper_machines = [
148
-            machine_data[key] for key in list(machine_data.keys())[1:]]
149
-        if len(non_zookeeper_machines) >= num_machines:
150
-            all_machines_running = True
151
-            for machine in non_zookeeper_machines:
152
-                if machine.get('instance-state') != 'running':
153
-                    all_machines_running = False
154
-                    break
155
-            if all_machines_running:
156
-                break
157
-        if time.time() - start_time >= timeout:
158
-            raise RuntimeError('timeout waiting for service to start')
159
-        time.sleep(SLEEP_AMOUNT)
160
-    return num_machines, time.time() - start_time
161
-
162
-
163
-# DEPRECATED: client-side only
164
-def wait_for_unit(service_name, timeout=480):
165
-    """Wait `timeout` seconds for a given service name to come up."""
166
-    wait_for_machine(num_machines=1)
167
-    start_time = time.time()
168
-    while True:
169
-        state = unit_info(service_name, 'agent-state')
170
-        if 'error' in state or state == 'started':
171
-            break
172
-        if time.time() - start_time >= timeout:
173
-            raise RuntimeError('timeout waiting for service to start')
174
-        time.sleep(SLEEP_AMOUNT)
175
-    if state != 'started':
176
-        raise RuntimeError('unit did not start, agent-state: ' + state)
177
-
178
-
179
-# DEPRECATED: client-side only
180
-def wait_for_relation(service_name, relation_name, timeout=120):
181
-    """Wait `timeout` seconds for a given relation to come up."""
182
-    start_time = time.time()
183
-    while True:
184
-        relation = unit_info(service_name, 'relations').get(relation_name)
185
-        if relation is not None and relation['state'] == 'up':
186
-            break
187
-        if time.time() - start_time >= timeout:
188
-            raise RuntimeError('timeout waiting for relation to be up')
189
-        time.sleep(SLEEP_AMOUNT)
190
-
191
-
192
-# DEPRECATED: client-side only
193
-def wait_for_page_contents(url, contents, timeout=120, validate=None):
194
-    if validate is None:
195
-        validate = operator.contains
196
-    start_time = time.time()
197
-    while True:
198
-        try:
199
-            stream = urlopen(url)
200
-        except (HTTPError, URLError):
201
-            pass
202
-        else:
203
-            page = stream.read()
204
-            if validate(page, contents):
205
-                return page
206
-        if time.time() - start_time >= timeout:
207
-            raise RuntimeError('timeout waiting for contents of ' + url)
208
-        time.sleep(SLEEP_AMOUNT)

+ 0
- 15
hooks/charmhelpers/contrib/charmsupport/__init__.py View File

@@ -1,15 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.

+ 0
- 360
hooks/charmhelpers/contrib/charmsupport/nrpe.py View File

@@ -1,360 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-"""Compatibility with the nrpe-external-master charm"""
18
-# Copyright 2012 Canonical Ltd.
19
-#
20
-# Authors:
21
-#  Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
-
23
-import subprocess
24
-import pwd
25
-import grp
26
-import os
27
-import glob
28
-import shutil
29
-import re
30
-import shlex
31
-import yaml
32
-
33
-from charmhelpers.core.hookenv import (
34
-    config,
35
-    local_unit,
36
-    log,
37
-    relation_ids,
38
-    relation_set,
39
-    relations_of_type,
40
-)
41
-
42
-from charmhelpers.core.host import service
43
-
44
-# This module adds compatibility with the nrpe-external-master and plain nrpe
45
-# subordinate charms. To use it in your charm:
46
-#
47
-# 1. Update metadata.yaml
48
-#
49
-#   provides:
50
-#     (...)
51
-#     nrpe-external-master:
52
-#       interface: nrpe-external-master
53
-#       scope: container
54
-#
55
-#   and/or
56
-#
57
-#   provides:
58
-#     (...)
59
-#     local-monitors:
60
-#       interface: local-monitors
61
-#       scope: container
62
-
63
-#
64
-# 2. Add the following to config.yaml
65
-#
66
-#    nagios_context:
67
-#      default: "juju"
68
-#      type: string
69
-#      description: |
70
-#        Used by the nrpe subordinate charms.
71
-#        A string that will be prepended to instance name to set the host name
72
-#        in nagios. So for instance the hostname would be something like:
73
-#            juju-myservice-0
74
-#        If you're running multiple environments with the same services in them
75
-#        this allows you to differentiate between them.
76
-#    nagios_servicegroups:
77
-#      default: ""
78
-#      type: string
79
-#      description: |
80
-#        A comma-separated list of nagios servicegroups.
81
-#        If left empty, the nagios_context will be used as the servicegroup
82
-#
83
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84
-#
85
-# 4. Update your hooks.py with something like this:
86
-#
87
-#    from charmsupport.nrpe import NRPE
88
-#    (...)
89
-#    def update_nrpe_config():
90
-#        nrpe_compat = NRPE()
91
-#        nrpe_compat.add_check(
92
-#            shortname = "myservice",
93
-#            description = "Check MyService",
94
-#            check_cmd = "check_http -w 2 -c 10 http://localhost"
95
-#            )
96
-#        nrpe_compat.add_check(
97
-#            "myservice_other",
98
-#            "Check for widget failures",
99
-#            check_cmd = "/srv/myapp/scripts/widget_check"
100
-#            )
101
-#        nrpe_compat.write()
102
-#
103
-#    def config_changed():
104
-#        (...)
105
-#        update_nrpe_config()
106
-#
107
-#    def nrpe_external_master_relation_changed():
108
-#        update_nrpe_config()
109
-#
110
-#    def local_monitors_relation_changed():
111
-#        update_nrpe_config()
112
-#
113
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
114
-#    ln -s hooks.py local-monitors-relation-changed
115
-
116
-
117
-class CheckException(Exception):
118
-    pass
119
-
120
-
121
-class Check(object):
122
-    shortname_re = '[A-Za-z0-9-_]+$'
123
-    service_template = ("""
124
-#---------------------------------------------------
125
-# This file is Juju managed
126
-#---------------------------------------------------
127
-define service {{
128
-    use                             active-service
129
-    host_name                       {nagios_hostname}
130
-    service_description             {nagios_hostname}[{shortname}] """
131
-                        """{description}
132
-    check_command                   check_nrpe!{command}
133
-    servicegroups                   {nagios_servicegroup}
134
-}}
135
-""")
136
-
137
-    def __init__(self, shortname, description, check_cmd):
138
-        super(Check, self).__init__()
139
-        # XXX: could be better to calculate this from the service name
140
-        if not re.match(self.shortname_re, shortname):
141
-            raise CheckException("shortname must match {}".format(
142
-                Check.shortname_re))
143
-        self.shortname = shortname
144
-        self.command = "check_{}".format(shortname)
145
-        # Note: a set of invalid characters is defined by the
146
-        # Nagios server config
147
-        # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148
-        self.description = description
149
-        self.check_cmd = self._locate_cmd(check_cmd)
150
-
151
-    def _locate_cmd(self, check_cmd):
152
-        search_path = (
153
-            '/usr/lib/nagios/plugins',
154
-            '/usr/local/lib/nagios/plugins',
155
-        )
156
-        parts = shlex.split(check_cmd)
157
-        for path in search_path:
158
-            if os.path.exists(os.path.join(path, parts[0])):
159
-                command = os.path.join(path, parts[0])
160
-                if len(parts) > 1:
161
-                    command += " " + " ".join(parts[1:])
162
-                return command
163
-        log('Check command not found: {}'.format(parts[0]))
164
-        return ''
165
-
166
-    def write(self, nagios_context, hostname, nagios_servicegroups):
167
-        nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
168
-            self.command)
169
-        with open(nrpe_check_file, 'w') as nrpe_check_config:
170
-            nrpe_check_config.write("# check {}\n".format(self.shortname))
171
-            nrpe_check_config.write("command[{}]={}\n".format(
172
-                self.command, self.check_cmd))
173
-
174
-        if not os.path.exists(NRPE.nagios_exportdir):
175
-            log('Not writing service config as {} is not accessible'.format(
176
-                NRPE.nagios_exportdir))
177
-        else:
178
-            self.write_service_config(nagios_context, hostname,
179
-                                      nagios_servicegroups)
180
-
181
-    def write_service_config(self, nagios_context, hostname,
182
-                             nagios_servicegroups):
183
-        for f in os.listdir(NRPE.nagios_exportdir):
184
-            if re.search('.*{}.cfg'.format(self.command), f):
185
-                os.remove(os.path.join(NRPE.nagios_exportdir, f))
186
-
187
-        templ_vars = {
188
-            'nagios_hostname': hostname,
189
-            'nagios_servicegroup': nagios_servicegroups,
190
-            'description': self.description,
191
-            'shortname': self.shortname,
192
-            'command': self.command,
193
-        }
194
-        nrpe_service_text = Check.service_template.format(**templ_vars)
195
-        nrpe_service_file = '{}/service__{}_{}.cfg'.format(
196
-            NRPE.nagios_exportdir, hostname, self.command)
197
-        with open(nrpe_service_file, 'w') as nrpe_service_config:
198
-            nrpe_service_config.write(str(nrpe_service_text))
199
-
200
-    def run(self):
201
-        subprocess.call(self.check_cmd)
202
-
203
-
204
-class NRPE(object):
205
-    nagios_logdir = '/var/log/nagios'
206
-    nagios_exportdir = '/var/lib/nagios/export'
207
-    nrpe_confdir = '/etc/nagios/nrpe.d'
208
-
209
-    def __init__(self, hostname=None):
210
-        super(NRPE, self).__init__()
211
-        self.config = config()
212
-        self.nagios_context = self.config['nagios_context']
213
-        if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
214
-            self.nagios_servicegroups = self.config['nagios_servicegroups']
215
-        else:
216
-            self.nagios_servicegroups = self.nagios_context
217
-        self.unit_name = local_unit().replace('/', '-')
218
-        if hostname:
219
-            self.hostname = hostname
220
-        else:
221
-            self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222
-        self.checks = []
223
-
224
-    def add_check(self, *args, **kwargs):
225
-        self.checks.append(Check(*args, **kwargs))
226
-
227
-    def write(self):
228
-        try:
229
-            nagios_uid = pwd.getpwnam('nagios').pw_uid
230
-            nagios_gid = grp.getgrnam('nagios').gr_gid
231
-        except:
232
-            log("Nagios user not set up, nrpe checks not updated")
233
-            return
234
-
235
-        if not os.path.exists(NRPE.nagios_logdir):
236
-            os.mkdir(NRPE.nagios_logdir)
237
-            os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
238
-
239
-        nrpe_monitors = {}
240
-        monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
241
-        for nrpecheck in self.checks:
242
-            nrpecheck.write(self.nagios_context, self.hostname,
243
-                            self.nagios_servicegroups)
244
-            nrpe_monitors[nrpecheck.shortname] = {
245
-                "command": nrpecheck.command,
246
-            }
247
-
248
-        service('restart', 'nagios-nrpe-server')
249
-
250
-        monitor_ids = relation_ids("local-monitors") + \
251
-            relation_ids("nrpe-external-master")
252
-        for rid in monitor_ids:
253
-            relation_set(relation_id=rid, monitors=yaml.dump(monitors))
254
-
255
-
256
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
257
-    """
258
-    Query relation with nrpe subordinate, return the nagios_host_context
259
-
260
-    :param str relation_name: Name of relation nrpe sub joined to
261
-    """
262
-    for rel in relations_of_type(relation_name):
263
-        if 'nagios_hostname' in rel:
264
-            return rel['nagios_host_context']
265
-
266
-
267
-def get_nagios_hostname(relation_name='nrpe-external-master'):
268
-    """
269
-    Query relation with nrpe subordinate, return the nagios_hostname
270
-
271
-    :param str relation_name: Name of relation nrpe sub joined to
272
-    """
273
-    for rel in relations_of_type(relation_name):
274
-        if 'nagios_hostname' in rel:
275
-            return rel['nagios_hostname']
276
-
277
-
278
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
279
-    """
280
-    Return the nagios unit name prepended with host_context if needed
281
-
282
-    :param str relation_name: Name of relation nrpe sub joined to
283
-    """
284
-    host_context = get_nagios_hostcontext(relation_name)
285
-    if host_context:
286
-        unit = "%s:%s" % (host_context, local_unit())
287
-    else:
288
-        unit = local_unit()
289
-    return unit
290
-
291
-
292
-def add_init_service_checks(nrpe, services, unit_name):
293
-    """
294
-    Add checks for each service in list
295
-
296
-    :param NRPE nrpe: NRPE object to add check to
297
-    :param list services: List of services to check
298
-    :param str unit_name: Unit name to use in check description
299
-    """
300
-    for svc in services:
301
-        upstart_init = '/etc/init/%s.conf' % svc
302
-        sysv_init = '/etc/init.d/%s' % svc
303
-        if os.path.exists(upstart_init):
304
-            nrpe.add_check(
305
-                shortname=svc,
306
-                description='process check {%s}' % unit_name,
307
-                check_cmd='check_upstart_job %s' % svc
308
-            )
309
-        elif os.path.exists(sysv_init):
310
-            cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311
-            cron_file = ('*/5 * * * * root '
312
-                         '/usr/local/lib/nagios/plugins/check_exit_status.pl '
313
-                         '-s /etc/init.d/%s status > '
314
-                         '/var/lib/nagios/service-check-%s.txt\n' % (svc,
315
-                                                                     svc)
316
-                         )
317
-            f = open(cronpath, 'w')
318
-            f.write(cron_file)
319
-            f.close()
320
-            nrpe.add_check(
321
-                shortname=svc,
322
-                description='process check {%s}' % unit_name,
323
-                check_cmd='check_status_file.py -f '
324
-                          '/var/lib/nagios/service-check-%s.txt' % svc,
325
-            )
326
-
327
-
328
-def copy_nrpe_checks():
329
-    """
330
-    Copy the nrpe checks into place
331
-
332
-    """
333
-    NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
334
-    nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
335
-                                  'charmhelpers', 'contrib', 'openstack',
336
-                                  'files')
337
-
338
-    if not os.path.exists(NAGIOS_PLUGINS):
339
-        os.makedirs(NAGIOS_PLUGINS)
340
-    for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
341
-        if os.path.isfile(fname):
342
-            shutil.copy2(fname,
343
-                         os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
344
-
345
-
346
-def add_haproxy_checks(nrpe, unit_name):
347
-    """
348
-    Add checks for each service in list
349
-
350
-    :param NRPE nrpe: NRPE object to add check to
351
-    :param str unit_name: Unit name to use in check description
352
-    """
353
-    nrpe.add_check(
354
-        shortname='haproxy_servers',
355
-        description='Check HAProxy {%s}' % unit_name,
356
-        check_cmd='check_haproxy.sh')
357
-    nrpe.add_check(
358
-        shortname='haproxy_queue',
359
-        description='Check HAProxy queue depth {%s}' % unit_name,
360
-        check_cmd='check_haproxy_queue_depth.sh')

+ 0
- 175
hooks/charmhelpers/contrib/charmsupport/volumes.py View File

@@ -1,175 +0,0 @@
1
-# Copyright 2014-2015 Canonical Limited.
2
-#
3
-# This file is part of charm-helpers.
4
-#
5
-# charm-helpers is free software: you can redistribute it and/or modify
6
-# it under the terms of the GNU Lesser General Public License version 3 as
7
-# published by the Free Software Foundation.
8
-#
9
-# charm-helpers is distributed in the hope that it will be useful,
10
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
-# GNU Lesser General Public License for more details.
13
-#
14
-# You should have received a copy of the GNU Lesser General Public License
15
-# along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
16
-
17
-'''
18
-Functions for managing volumes in juju units. One volume is supported per unit.
19
-Subordinates may have their own storage, provided it is on its own partition.
20
-
21
-Configuration stanzas::
22
-
23
-  volume-ephemeral:
24
-    type: boolean
25
-    default: true
26
-    description: >
27
-      If false, a volume is mounted as sepecified in "volume-map"
28
-      If true, ephemeral storage will be used, meaning that log data
29
-         will only exist as long as the machine. YOU HAVE BEEN WARNED.
30
-  volume-map:
31
-    type: string
32
-    default: {}
33
-    description: >
34
-      YAML map of units to device names, e.g:
35
-        "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
36
-      Service units will raise a configure-error if volume-ephemeral
37
-      is 'true' and no volume-map value is set. Use 'juju set' to set a
38
-      value and 'juju resolved' to complete configuration.
39
-
40
-Usage::
41
-
42
-    from charmsupport.volumes import configure_volume, VolumeConfigurationError
43
-    from charmsupport.hookenv import log, ERROR
44
-    def post_mount_hook():
45
-        stop_service('myservice')
46
-    def post_mount_hook():
47
-        start_service('myservice')
48
-
49
-    if __name__ == '__main__':
50
-        try:
51
-            configure_volume(before_change=pre_mount_hook,
52
-                             after_change=post_mount_hook)
53
-        except VolumeConfigurationError:
54
-            log('Storage could not be configured', ERROR)
55
-
56
-'''
57
-
58
-# XXX: Known limitations
59
-# - fstab is neither consulted nor updated
60
-
61
-import os
62
-from charmhelpers.core import hookenv
63
-from charmhelpers.core import host
64
-import yaml
65
-
66
-
67
-MOUNT_BASE = '/srv/juju/volumes'
68
-
69
-
70
-class VolumeConfigurationError(Exception):
71
-    '''Volume configuration data is missing or invalid'''
72
-    pass
73
-
74
-
75
-def get_config():
76
-    '''Gather and sanity-check volume configuration data'''
77
-    volume_config = {}
78
-    config = hookenv.config()
79
-
80
-    errors = False
81
-
82
-    if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
83
-        volume_config['ephemeral'] = True
84
-    else:
85
-        volume_config['ephemeral'] = False
86
-
87
-    try:
88
-        volume_map = yaml.safe_load(config.get('volume-map', '{}'))
89
-    except yaml.YAMLError as e:
90
-        hookenv.log("Error parsing YAML volume-map: {}".format(e),
91
-                    hookenv.ERROR)
92
-        errors = True
93
-    if volume_map is None:
94
-        # probably an empty string
95
-        volume_map = {}
96
-    elif not isinstance(volume_map, dict):
97
-        hookenv.log("Volume-map should be a dictionary, not {}".format(
98
-            type(volume_map)))
99
-        errors = True
100
-
101
-    volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
102
-    if volume_config['device'] and volume_config['ephemeral']:
103
-        # asked for ephemeral storage but also defined a volume ID
104
-        hookenv.log('A volume is defined for this unit, but ephemeral '
105
-                    'storage was requested', hookenv.ERROR)
106
-        errors = True
107
-    elif not volume_config['device'] and not volume_config['ephemeral']:
108
-        # asked for permanent storage but did not define volume ID
109
-        hookenv.log('Ephemeral storage was requested, but there is no volume '
110
-                    'defined for this unit.', hookenv.ERROR)
111
-        errors = True
112
-
113
-    unit_mount_name = hookenv.local_unit().replace('/', '-')
114
-    volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
115
-
116
-    if errors:
117
-        return None
118
-    return volume_config
119
-
120
-
121
-def mount_volume(config):
122
-    if os.path.exists(config['mountpoint']):
123
-        if not os.path.isdir(config['mountpoint']):
124
-            hookenv.log('Not a directory: {}'.format(config['mountpoint']))
125
-            raise VolumeConfigurationError()
126
-    else:
127
-        host.mkdir(config['mountpoint'])
128
-    if os.path.ismount(config['mountpoint']):
129
-        unmount_volume(config)
130
-    if not host.mount(config['device'], config['mountpoint'], persist=True):
131
-        raise VolumeConfigurationError()
132
-
133
-
134
-def unmount_volume(config):
135
-    if os.path.ismount(config['mountpoint']):
136
-        if not host.umount(config['mountpoint'], persist=True):
137
-            raise VolumeConfigurationError()
138
-
139
-
140
-def managed_mounts():
141
-    '''List of all mounted managed volumes'''
142
-    return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
143
-
144
-
145
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
146
-    '''Set up storage (or don't) according to the charm's volume configuration.
147
-       Returns the mount point or "ephemeral". before_change and after_change
148
-       are optional functions to be called if the volume configuration changes.
149
-    '''
150
-
151
-    config = get_config()
152
-    if not config:
153
-        hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
154
-        raise VolumeConfigurationError()
155
-
156
-    if config['ephemeral']:
157
-        if os.path.ismount(config['mountpoint']):
158
-            before_change()
159
-            unmount_volume(config)
160
-            after_change()
161
-        return 'ephemeral'
162
-    else:
163
-        # persistent storage
164
-        if os.path.ismount(config['mountpoint']):
165
-            mounts = dict(managed_mounts())
166
-            if mounts.get(config['mountpoint']) != config['device']:
167
-                before_change()
168
-                unmount_volume(config)
169
-                mount_volume(config)
170
-                after_change()
171
-        else:
172
-            before_change()
173
-            mount_volume(config)
174
-            after_change()
175
-        return config['mountpoint']

+ 0
- 0
hooks/charmhelpers/contrib/database/__init__.py View File


+ 0
- 412
hooks/charmhelpers/contrib/database/mysql.py View File

@@ -1,412 +0,0 @@
1
-"""Helper for working with a MySQL database"""
2
-import json
3
-import re
4
-import sys
5
-import platform
6
-import os
7
-import glob
8
-
9
-# from string import upper
10
-
11
-from charmhelpers.core.host import (
12
-    mkdir,
13
-    pwgen,
14
-    write_file
15
-)
16
-from charmhelpers.core.hookenv import (
17
-    config as config_get,
18
-    relation_get,
19
-    related_units,
20
-    unit_get,
21
-    log,
22
-    DEBUG,
23
-    INFO,
24
-    WARNING,
25
-)
26
-from charmhelpers.fetch import (
27
-    apt_install,
28
-    apt_update,
29
-    filter_installed_packages,
30
-)
31
-from charmhelpers.contrib.peerstorage import (
32
-    peer_store,
33
-    peer_retrieve,
34
-)
35
-from charmhelpers.contrib.network.ip import get_host_ip
36
-
37
-try:
38
-    import MySQLdb
39
-except ImportError:
40
-    apt_update(fatal=True)
41
-    apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
42
-    import MySQLdb
43
-
44
-
45
-class MySQLHelper(object):
46
-
47
-    def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
48
-                 migrate_passwd_to_peer_relation=True,
49
-                 delete_ondisk_passwd_file=True):
50
-        self.host = host
51
-        # Password file path templates
52
-        self.root_passwd_file_template = rpasswdf_template
53
-        self.user_passwd_file_template = upasswdf_template
54
-
55
-        self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
56
-        # If we migrate we have the option to delete local copy of root passwd
57
-        self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
58
-
59
-    def connect(self, user='root', password=None):
60
-        log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
61
-        self.connection = MySQLdb.connect(user=user, host=self.host,
62
-                                          passwd=password)
63
-
64
-    def database_exists(self, db_name):
65
-        cursor = self.connection.cursor()
66
-        try:
67
-            cursor.execute("SHOW DATABASES")
68
-            databases = [i[0] for i in cursor.fetchall()]
69
-        finally:
70
-            cursor.close()
71
-
72
-        return db_name in databases
73
-
74
-    def create_database(self, db_name):
75
-        cursor = self.connection.cursor()
76
-        try:
77
-            cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
78
-                           .format(db_name))
79
-        finally:
80
-            cursor.close()
81
-
82
-    def grant_exists(self, db_name, db_user, remote_ip):
83
-        cursor = self.connection.cursor()
84
-        priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
85
-                      "TO '{}'@'{}'".format(db_name, db_user, remote_ip)
86
-        try:
87
-            cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
88
-                                                              remote_ip))
89
-            grants = [i[0] for i in cursor.fetchall()]
90
-        except MySQLdb.OperationalError:
91
-            return False
92
-        finally:
93
-            cursor.close()
94
-
95
-        # TODO: review for different grants
96
-        return priv_string in grants
97
-
98
-    def create_grant(self, db_name, db_user, remote_ip, password):
99
-        cursor = self.connection.cursor()
100
-        try:
101
-            # TODO: review for different grants
102
-            cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
103
-                           "IDENTIFIED BY '{}'".format(db_name,
104
-                                                       db_user,
105
-                                                       remote_ip,
106
-                                                       password))
107
-        finally:
108
-            cursor.close()
109
-
110
-    def create_admin_grant(self, db_user, remote_ip, password):
111
-        cursor = self.connection.cursor()
112
-        try:
113
-            cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
114
-                           "IDENTIFIED BY '{}'".format(db_user,
115
-                                                       remote_ip,
116
-                                                       password))
117
-        finally:
118
-            cursor.close()
119
-
120
-    def cleanup_grant(self, db_user, remote_ip):
121
-        cursor = self.connection.cursor()
122
-        try:
123
-            cursor.execute("DROP FROM mysql.user WHERE user='{}' "
124
-                           "AND HOST='{}'".format(db_user,
125
-                                                  remote_ip))
126
-        finally:
127
-            cursor.close()
128
-
129
-    def execute(self, sql):
130
-        """Execute arbitary SQL against the database."""
131
-        cursor = self.connection.cursor()
132
-        try:
133
-            cursor.execute(sql)
134
-        finally:
135
-            cursor.close()
136
-
137
-    def migrate_passwords_to_peer_relation(self, excludes=None):
138
-        """Migrate any passwords storage on disk to cluster peer relation."""
139
-        dirname = os.path.dirname(self.root_passwd_file_template)
140
-        path = os.path.join(dirname, '*.passwd')
141
-        for f in glob.glob(path):
142
-            if excludes and f in excludes:
143
-                log("Excluding %s from peer migration" % (f), level=DEBUG)
144
-                continue
145
-
146
-            key = os.path.basename(f)
147
-            with open(f, 'r') as passwd:
148
-                _value = passwd.read().strip()
149
-
150
-            try:
151
-                peer_store(key, _value)
152
-
153
-                if self.delete_ondisk_passwd_file:
154
-                    os.unlink(f)
155
-            except ValueError:
156
-                # NOTE cluster relation not yet ready - skip for now
157
-                pass
158
-
159
-    def get_mysql_password_on_disk(self, username=None, password=None):
160
-        """Retrieve, generate or store a mysql password for the provided
161
-        username on disk."""
162
-        if username:
163
-            template = self.user_passwd_file_template
164
-            passwd_file = template.format(username)
165
-        else:
166
-            passwd_file = self.root_passwd_file_template
167
-
168
-        _password = None
169
-        if os.path.exists(passwd_file):
170
-            log("Using existing password file '%s'" % passwd_file, level=DEBUG)
171
-            with open(passwd_file, 'r') as passwd:
172
-                _password = passwd.read().strip()
173
-        else:
174
-            log("Generating new password file '%s'" % passwd_file, level=DEBUG)
175
-            if not os.path.isdir(os.path.dirname(passwd_file)):
176
-                # NOTE: need to ensure this is not mysql root dir (which needs
177
-                # to be mysql readable)
178
-                mkdir(os.path.dirname(passwd_file), owner='root', group='root',
179
-                      perms=0o770)
180
-                # Force permissions - for some reason the chmod in makedirs
181
-                # fails
182
-                os.chmod(os.path.dirname(passwd_file), 0o770)
183
-
184
-            _password = password or pwgen(length=32)
185
-            write_file(passwd_file, _password, owner='root', group='root',
186
-                       perms=0o660)
187
-
188
-        return _password
189
-
190
-    def passwd_keys(self, username):
191
-        """Generator to return keys used to store passwords in peer store.
192
-
193
-        NOTE: we support both legacy and new format to support mysql
194
-        charm prior to refactor. This is necessary to avoid LP 1451890.
195
-        """
196
-        keys = []
197
-        if username == 'mysql':
198
-            log("Bad username '%s'" % (username), level=WARNING)
199
-
200
-        if username:
201
-            # IMPORTANT: *newer* format must be returned first
202
-            keys.append('mysql-%s.passwd' % (username))
203
-            keys.append('%s.passwd' % (username))
204
-        else:
205
-            keys.append('mysql.passwd')
206
-
207
-        for key in keys:
208
-            yield key
209
-
210
-    def get_mysql_password(self, username=None, password=None):
211
-        """Retrieve, generate or store a mysql password for the provided
212
-        username using peer relation cluster."""
213
-        excludes = []
214
-
215
-        # First check peer relation.
216
-        try:
217
-            for key in self.passwd_keys(username):
218
-                _password = peer_retrieve(key)
219
-                if _password:
220
-                    break
221
-
222
-            # If root password available don't update peer relation from local
223
-            if _password and not username:
224
-                excludes.append(self.root_passwd_file_template)
225
-
226
-        except ValueError:
227
-            # cluster relation is not yet started; use on-disk
228
-            _password = None
229
-
230
-        # If none available, generate new one
231
-        if not _password:
232
-            _password = self.get_mysql_password_on_disk(username, password)
233
-
234
-        # Put on wire if required
235
-        if self.migrate_passwd_to_peer_relation:
236
-            self.migrate_passwords_to_peer_relation(excludes=excludes)
237
-
238
-        return _password
239
-
240
-    def get_mysql_root_password(self, password=None):
241
-        """Retrieve or generate mysql root password for service units."""
242
-        return self.get_mysql_password(username=None, password=password)
243
-
244
-    def normalize_address(self, hostname):
245
-        """Ensure that address returned is an IP address (i.e. not fqdn)"""
246
-        if config_get('prefer-ipv6'):
247
-            # TODO: add support for ipv6 dns
248
-            return hostname
249
-
250
-        if hostname != unit_get('private-address'):
251
-            return get_host_ip(hostname, fallback=hostname)
252
-
253
-        # Otherwise assume localhost
254
-        return '127.0.0.1'
255
-
256
-    def get_allowed_units(self, database, username, relation_id=None):
257
-        """Get list of units with access grants for database with username.
258
-
259
-        This is typically used to provide shared-db relations with a list of
260
-        which units have been granted access to the given database.
261
-        """
262
-        self.connect(password=self.get_mysql_root_password())
263
-        allowed_units = set()
264
-        for unit in related_units(relation_id):
265
-            settings = relation_get(rid=relation_id, unit=unit)
266
-            # First check for setting with prefix, then without
267
-            for attr in ["%s_hostname" % (database), 'hostname']:
268
-                hosts = settings.get(attr, None)
269
-                if hosts:
270
-                    break
271
-
272
-            if hosts:
273
-                # hostname can be json-encoded list of hostnames
274
-                try:
275
-                    hosts = json.loads(hosts)
276
-                except ValueError:
277
-                    hosts = [hosts]
278
-            else:
279
-                hosts = [settings['private-address']]
280
-
281
-            if hosts:
282
-                for host in hosts:
283
-                    host = self.normalize_address(host)
284
-                    if self.grant_exists(database, username, host):
285
-                        log("Grant exists for host '%s' on db '%s'" %
286
-                            (host, database), level=DEBUG)
287
-                        if unit not in allowed_units:
288
-                            allowed_units.add(unit)
289
-                    else:
290
-                        log("Grant does NOT exist for host '%s' on db '%s'" %
291
-                            (host, database), level=DEBUG)
292
-            else:
293
-                log("No hosts found for grant check", level=INFO)
294
-
295
-        return allowed_units
296
-
297
-    def configure_db(self, hostname, database, username, admin=False):
298
-        """Configure access to database for username from hostname."""
299
-        self.connect(password=self.get_mysql_root_password())
300
-        if not self.database_exists(database):
301
-            self.create_database(database)
302
-
303
-        remote_ip = self.normalize_address(hostname)
304
-        password = self.get_mysql_password(username)
305
-        if not self.grant_exists(database, username, remote_ip):
306
-            if not admin:
307
-                self.create_grant(database, username, remote_ip, password)
308
-            else:
309
-                self.create_admin_grant(username, remote_ip, password)
310
-
311
-        return password
312
-
313
-
314
-class PerconaClusterHelper(object):
315
-
316
-    # Going for the biggest page size to avoid wasted bytes.
317
-    # InnoDB page size is 16MB
318
-
319
-    DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
320
-    DEFAULT_INNODB_BUFFER_FACTOR = 0.50
321
-
322
-    def human_to_bytes(self, human):
323
-        """Convert human readable configuration options to bytes."""
324
-        num_re = re.compile('^[0-9]+$')
325
-        if num_re.match(human):
326
-            return human
327
-
328
-        factors = {
329
-            'K': 1024,
330
-            'M': 1048576,
331
-            'G': 1073741824,
332
-            'T': 1099511627776
333
-        }
334
-        modifier = human[-1]
335
-        if modifier in factors:
336
-            return int(human[:-1]) * factors[modifier]
337
-
338
-        if modifier == '%':
339
-            total_ram = self.human_to_bytes(self.get_mem_total())
340
-            if self.is_32bit_system() and total_ram > self.sys_mem_limit():
341
-                total_ram = self.sys_mem_limit()
342
-            factor = int(human[:-1]) * 0.01
343
-            pctram = total_ram * factor
344
-            return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
345
-
346
-        raise ValueError("Can only convert K,M,G, or T")
347
-
348
-    def is_32bit_system(self):
349
-        """Determine whether system is 32 or 64 bit."""
350
-        try:
351
-            return sys.maxsize < 2 ** 32
352
-        except OverflowError:
353
-            return False
354
-
355
-    def sys_mem_limit(self):
356
-        """Determine the default memory limit for the current service unit."""
357
-        if platform.machine() in ['armv7l']:
358
-            _mem_limit = self.human_to_bytes('2700M')  # experimentally determined
359
-        else:
360
-            # Limit for x86 based 32bit systems
361
-            _mem_limit = self.human_to_bytes('4G')
362
-
363
-        return _mem_limit
364
-
365
-    def get_mem_total(self):
366
-        """Calculate the total memory in the current service unit."""
367
-        with open('/proc/meminfo') as meminfo_file:
368
-            for line in meminfo_file:
369
-                key, mem = line.split(':', 2)
370
-                if key == 'MemTotal':
371
-                    mtot, modifier = mem.strip().split(' ')
372
-                    return '%s%s' % (mtot, modifier[0].upper())
373
-
374
-    def parse_config(self):
375
-        """Parse charm configuration and calculate values for config files."""
376
-        config = config_get()
377
-        mysql_config = {}
378
-        if 'max-connections' in config: