Browse Source

Update rabbit driver config options

The stein version of python-oslo.messaging (9.0.0+) has removed
the following config options from the [oslo_messaging_rabbit]
section:

rabbit_host, rabbit_port, rabbit_hosts, rabbit_userid,
rabbit_password, rabbit_virtual_host rabbit_max_retries, and
rabbit_durable_queues.

The above change requires a sync from charm-helpers.

Additionally the transport_url directive has been moved to the
[DEFAULT] section.

These have been deprecated since Ocata, therefore this change
will be provided to pre-Stein templates in order to drop
deprecation warnings.

See release notes at:
https://docs.openstack.org/releasenotes/oslo.messaging/index.html

test_300_cinder_config is also removed in this change as amulet
tests no longer need to confirm config file settings.

Change-Id: Ia93be49430e8d95c38ed521d08bbb62f47e13e59
Closes-Bug: #1817672
changes/18/641118/1
Corey Bryant 3 months ago
parent
commit
efb1a1e2d9

+ 9
- 0
charmhelpers/cli/unitdata.py View File

@@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
19 19
 @cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
20 20
 def unitdata_cmd(subparser):
21 21
     nested = subparser.add_subparsers()
22
+
22 23
     get_cmd = nested.add_parser('get', help='Retrieve data')
23 24
     get_cmd.add_argument('key', help='Key to retrieve the value of')
24 25
     get_cmd.set_defaults(action='get', value=None)
26
+
27
+    getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
28
+    getrange_cmd.add_argument('key', metavar='prefix',
29
+                              help='Prefix of the keys to retrieve')
30
+    getrange_cmd.set_defaults(action='getrange', value=None)
31
+
25 32
     set_cmd = nested.add_parser('set', help='Store data')
26 33
     set_cmd.add_argument('key', help='Key to set')
27 34
     set_cmd.add_argument('value', help='Value to store')
@@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
30 37
     def _unitdata_cmd(action, key, value):
31 38
         if action == 'get':
32 39
             return unitdata.kv().get(key)
40
+        elif action == 'getrange':
41
+            return unitdata.kv().getrange(key)
33 42
         elif action == 'set':
34 43
             unitdata.kv().set(key, value)
35 44
             unitdata.kv().flush()

+ 212
- 0
charmhelpers/contrib/openstack/audits/__init__.py View File

@@ -0,0 +1,212 @@
1
+# Copyright 2019 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+"""OpenStack Security Audit code"""
16
+
17
+import collections
18
+from enum import Enum
19
+import traceback
20
+
21
+from charmhelpers.core.host import cmp_pkgrevno
22
+import charmhelpers.contrib.openstack.utils as openstack_utils
23
+import charmhelpers.core.hookenv as hookenv
24
+
25
+
26
+class AuditType(Enum):
27
+    OpenStackSecurityGuide = 1
28
+
29
+
30
+_audits = {}
31
+
32
+Audit = collections.namedtuple('Audit', 'func filters')
33
+
34
+
35
+def audit(*args):
36
+    """Decorator to register an audit.
37
+
38
+    These are used to generate audits that can be run on a
39
+    deployed system that matches the given configuration
40
+
41
+    :param args: List of functions to filter tests against
42
+    :type args: List[Callable[Dict]]
43
+    """
44
+    def wrapper(f):
45
+        test_name = f.__name__
46
+        if _audits.get(test_name):
47
+            raise RuntimeError(
48
+                "Test name '{}' used more than once"
49
+                .format(test_name))
50
+        non_callables = [fn for fn in args if not callable(fn)]
51
+        if non_callables:
52
+            raise RuntimeError(
53
+                "Configuration includes non-callable filters: {}"
54
+                .format(non_callables))
55
+        _audits[test_name] = Audit(func=f, filters=args)
56
+        return f
57
+    return wrapper
58
+
59
+
60
+def is_audit_type(*args):
61
+    """This audit is included in the specified kinds of audits.
62
+
63
+    :param *args: List of AuditTypes to include this audit in
64
+    :type args: List[AuditType]
65
+    :rtype: Callable[Dict]
66
+    """
67
+    def _is_audit_type(audit_options):
68
+        if audit_options.get('audit_type') in args:
69
+            return True
70
+        else:
71
+            return False
72
+    return _is_audit_type
73
+
74
+
75
+def since_package(pkg, pkg_version):
76
+    """This audit should be run after the specified package version (incl).
77
+
78
+    :param pkg: Package name to compare
79
+    :type pkg: str
80
+    :param release: The package version
81
+    :type release: str
82
+    :rtype: Callable[Dict]
83
+    """
84
+    def _since_package(audit_options=None):
85
+        return cmp_pkgrevno(pkg, pkg_version) >= 0
86
+
87
+    return _since_package
88
+
89
+
90
+def before_package(pkg, pkg_version):
91
+    """This audit should be run before the specified package version (excl).
92
+
93
+    :param pkg: Package name to compare
94
+    :type pkg: str
95
+    :param release: The package version
96
+    :type release: str
97
+    :rtype: Callable[Dict]
98
+    """
99
+    def _before_package(audit_options=None):
100
+        return not since_package(pkg, pkg_version)()
101
+
102
+    return _before_package
103
+
104
+
105
+def since_openstack_release(pkg, release):
106
+    """This audit should run after the specified OpenStack version (incl).
107
+
108
+    :param pkg: Package name to compare
109
+    :type pkg: str
110
+    :param release: The OpenStack release codename
111
+    :type release: str
112
+    :rtype: Callable[Dict]
113
+    """
114
+    def _since_openstack_release(audit_options=None):
115
+        _release = openstack_utils.get_os_codename_package(pkg)
116
+        return openstack_utils.CompareOpenStackReleases(_release) >= release
117
+
118
+    return _since_openstack_release
119
+
120
+
121
+def before_openstack_release(pkg, release):
122
+    """This audit should run before the specified OpenStack version (excl).
123
+
124
+    :param pkg: Package name to compare
125
+    :type pkg: str
126
+    :param release: The OpenStack release codename
127
+    :type release: str
128
+    :rtype: Callable[Dict]
129
+    """
130
+    def _before_openstack_release(audit_options=None):
131
+        return not since_openstack_release(pkg, release)()
132
+
133
+    return _before_openstack_release
134
+
135
+
136
+def it_has_config(config_key):
137
+    """This audit should be run based on specified config keys.
138
+
139
+    :param config_key: Config key to look for
140
+    :type config_key: str
141
+    :rtype: Callable[Dict]
142
+    """
143
+    def _it_has_config(audit_options):
144
+        return audit_options.get(config_key) is not None
145
+
146
+    return _it_has_config
147
+
148
+
149
+def run(audit_options):
150
+    """Run the configured audits with the specified audit_options.
151
+
152
+    :param audit_options: Configuration for the audit
153
+    :type audit_options: Config
154
+
155
+    :rtype: Dict[str, str]
156
+    """
157
+    errors = {}
158
+    results = {}
159
+    for name, audit in sorted(_audits.items()):
160
+        result_name = name.replace('_', '-')
161
+        if result_name in audit_options.get('excludes', []):
162
+            print(
163
+                "Skipping {} because it is"
164
+                "excluded in audit config"
165
+                .format(result_name))
166
+            continue
167
+        if all(p(audit_options) for p in audit.filters):
168
+            try:
169
+                audit.func(audit_options)
170
+                print("{}: PASS".format(name))
171
+                results[result_name] = {
172
+                    'success': True,
173
+                }
174
+            except AssertionError as e:
175
+                print("{}: FAIL ({})".format(name, e))
176
+                results[result_name] = {
177
+                    'success': False,
178
+                    'message': e,
179
+                }
180
+            except Exception as e:
181
+                print("{}: ERROR ({})".format(name, e))
182
+                errors[name] = e
183
+                results[result_name] = {
184
+                    'success': False,
185
+                    'message': e,
186
+                }
187
+    for name, error in errors.items():
188
+        print("=" * 20)
189
+        print("Error in {}: ".format(name))
190
+        traceback.print_tb(error.__traceback__)
191
+        print()
192
+    return results
193
+
194
+
195
+def action_parse_results(result):
196
+    """Parse the result of `run` in the context of an action.
197
+
198
+    :param result: The result of running the security-checklist
199
+        action on a unit
200
+    :type result: Dict[str, Dict[str, str]]
201
+    :rtype: int
202
+    """
203
+    passed = True
204
+    for test, result in result.items():
205
+        if result['success']:
206
+            hookenv.action_set({test: 'PASS'})
207
+        else:
208
+            hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
209
+            passed = False
210
+    if not passed:
211
+        hookenv.action_fail("One or more tests failed")
212
+    return 0 if passed else 1

+ 303
- 0
charmhelpers/contrib/openstack/audits/openstack_security_guide.py View File

@@ -0,0 +1,303 @@
1
+# Copyright 2019 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+import collections
16
+import configparser
17
+import glob
18
+import os.path
19
+import subprocess
20
+
21
+from charmhelpers.contrib.openstack.audits import (
22
+    audit,
23
+    AuditType,
24
+    # filters
25
+    is_audit_type,
26
+    it_has_config,
27
+)
28
+
29
+from charmhelpers.core.hookenv import (
30
+    cached,
31
+)
32
+
33
+
34
+FILE_ASSERTIONS = {
35
+    'barbican': {
36
+        # From security guide
37
+        '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
38
+        '/etc/barbican/barbican-api-paste.ini':
39
+            {'group': 'barbican', 'mode': '640'},
40
+        '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
41
+    },
42
+    'ceph-mon': {
43
+        '/var/lib/charm/ceph-mon/ceph.conf':
44
+            {'owner': 'root', 'group': 'root', 'mode': '644'},
45
+        '/etc/ceph/ceph.client.admin.keyring':
46
+            {'owner': 'ceph', 'group': 'ceph'},
47
+        '/etc/ceph/rbdmap': {'mode': '644'},
48
+        '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
49
+        '/var/lib/ceph/bootstrap-*/ceph.keyring':
50
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
51
+    },
52
+    'ceph-osd': {
53
+        '/var/lib/charm/ceph-osd/ceph.conf':
54
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
55
+        '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
56
+        '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
57
+        '/var/lib/ceph/bootstrap-*/ceph.keyring':
58
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
59
+        '/var/lib/ceph/radosgw':
60
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
61
+    },
62
+    'cinder': {
63
+        # From security guide
64
+        '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
65
+        '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
66
+        '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
67
+    },
68
+    'glance': {
69
+        # From security guide
70
+        '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
71
+        '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
72
+        '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
73
+        '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
74
+        '/etc/glance/glance-registry-paste.ini':
75
+            {'group': 'glance', 'mode': '640'},
76
+        '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
77
+        '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
78
+        '/etc/glance/glance-swift-store.conf':
79
+            {'group': 'glance', 'mode': '640'},
80
+        '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
81
+        '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
82
+        '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
83
+    },
84
+    'keystone': {
85
+        # From security guide
86
+        '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
87
+        '/etc/keystone/keystone-paste.ini':
88
+            {'group': 'keystone', 'mode': '640'},
89
+        '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
90
+        '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
91
+        '/etc/keystone/ssl/certs/signing_cert.pem':
92
+            {'group': 'keystone', 'mode': '640'},
93
+        '/etc/keystone/ssl/private/signing_key.pem':
94
+            {'group': 'keystone', 'mode': '640'},
95
+        '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
96
+    },
97
+    'manilla': {
98
+        # From security guide
99
+        '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
100
+        '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
101
+        '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
102
+        '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
103
+    },
104
+    'neutron-gateway': {
105
+        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
106
+        '/etc/neutron/rootwrap.conf': {'mode': '640'},
107
+        '/etc/neutron/rootwrap.d': {'mode': '755'},
108
+        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
109
+    },
110
+    'neutron-api': {
111
+        # From security guide
112
+        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
113
+        '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
114
+        '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
115
+        # Additional validations
116
+        '/etc/neutron/rootwrap.d': {'mode': '755'},
117
+        '/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
118
+        '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
119
+        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
120
+    },
121
+    'nova-cloud-controller': {
122
+        # From security guide
123
+        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
124
+        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
125
+        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
126
+        # Additional validations
127
+        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
128
+    },
129
+    'nova-compute': {
130
+        # From security guide
131
+        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
132
+        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
133
+        '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
134
+        # Additional Validations
135
+        '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
136
+        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
137
+        '/etc/nova/nm.conf': {'mode': '644'},
138
+        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
139
+    },
140
+    'openstack-dashboard': {
141
+        # From security guide
142
+        '/etc/openstack-dashboard/local_settings.py':
143
+            {'group': 'horizon', 'mode': '640'},
144
+    },
145
+}
146
+
147
+Ownership = collections.namedtuple('Ownership', 'owner group mode')
148
+
149
+
150
+@cached
151
+def _stat(file):
152
+    """
153
+    Get the Ownership information from a file.
154
+
155
+    :param file: The path to a file to stat
156
+    :type file: str
157
+    :returns: owner, group, and mode of the specified file
158
+    :rtype: Ownership
159
+    :raises subprocess.CalledProcessError: If the underlying stat fails
160
+    """
161
+    out = subprocess.check_output(
162
+        ['stat', '-c', '%U %G %a', file]).decode('utf-8')
163
+    return Ownership(*out.strip().split(' '))
164
+
165
+
166
+@cached
167
+def _config_ini(path):
168
+    """
169
+    Parse an ini file
170
+
171
+    :param path: The path to a file to parse
172
+    :type file: str
173
+    :returns: Configuration contained in path
174
+    :rtype: Dict
175
+    """
176
+    conf = configparser.ConfigParser()
177
+    conf.read(path)
178
+    return dict(conf)
179
+
180
+
181
+def _validate_file_ownership(owner, group, file_name):
182
+    """
183
+    Validate that a specified file is owned by `owner:group`.
184
+
185
+    :param owner: Name of the owner
186
+    :type owner: str
187
+    :param group: Name of the group
188
+    :type group: str
189
+    :param file_name: Path to the file to verify
190
+    :type file_name: str
191
+    """
192
+    try:
193
+        ownership = _stat(file_name)
194
+    except subprocess.CalledProcessError as e:
195
+        print("Error reading file: {}".format(e))
196
+        assert False, "Specified file does not exist: {}".format(file_name)
197
+    assert owner == ownership.owner, \
198
+        "{} has an incorrect owner: {} should be {}".format(
199
+            file_name, ownership.owner, owner)
200
+    assert group == ownership.group, \
201
+        "{} has an incorrect group: {} should be {}".format(
202
+            file_name, ownership.group, group)
203
+    print("Validate ownership of {}: PASS".format(file_name))
204
+
205
+
206
+def _validate_file_mode(mode, file_name):
207
+    """
208
+    Validate that a specified file has the specified permissions.
209
+
210
+    :param mode: file mode that is desires
211
+    :type owner: str
212
+    :param file_name: Path to the file to verify
213
+    :type file_name: str
214
+    """
215
+    try:
216
+        ownership = _stat(file_name)
217
+    except subprocess.CalledProcessError as e:
218
+        print("Error reading file: {}".format(e))
219
+        assert False, "Specified file does not exist: {}".format(file_name)
220
+    assert mode == ownership.mode, \
221
+        "{} has an incorrect mode: {} should be {}".format(
222
+            file_name, ownership.mode, mode)
223
+    print("Validate mode of {}: PASS".format(file_name))
224
+
225
+
226
+@cached
227
+def _config_section(config, section):
228
+    """Read the configuration file and return a section."""
229
+    path = os.path.join(config.get('config_path'), config.get('config_file'))
230
+    conf = _config_ini(path)
231
+    return conf.get(section)
232
+
233
+
234
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
235
+       it_has_config('files'))
236
+def validate_file_ownership(config):
237
+    """Verify that configuration files are owned by the correct user/group."""
238
+    files = config.get('files', {})
239
+    for file_name, options in files.items():
240
+        for key in options.keys():
241
+            if key not in ["owner", "group", "mode"]:
242
+                raise RuntimeError(
243
+                    "Invalid ownership configuration: {}".format(key))
244
+        owner = options.get('owner', config.get('owner', 'root'))
245
+        group = options.get('group', config.get('group', 'root'))
246
+        if '*' in file_name:
247
+            for file in glob.glob(file_name):
248
+                if file not in files.keys():
249
+                    if os.path.isfile(file):
250
+                        _validate_file_ownership(owner, group, file)
251
+        else:
252
+            if os.path.isfile(file_name):
253
+                _validate_file_ownership(owner, group, file_name)
254
+
255
+
256
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
257
+       it_has_config('files'))
258
+def validate_file_permissions(config):
259
+    """Verify that permissions on configuration files are secure enough."""
260
+    files = config.get('files', {})
261
+    for file_name, options in files.items():
262
+        for key in options.keys():
263
+            if key not in ["owner", "group", "mode"]:
264
+                raise RuntimeError(
265
+                    "Invalid ownership configuration: {}".format(key))
266
+        mode = options.get('mode', config.get('permissions', '600'))
267
+        if '*' in file_name:
268
+            for file in glob.glob(file_name):
269
+                if file not in files.keys():
270
+                    if os.path.isfile(file):
271
+                        _validate_file_mode(mode, file)
272
+        else:
273
+            if os.path.isfile(file_name):
274
+                _validate_file_mode(mode, file_name)
275
+
276
+
277
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
278
+def validate_uses_keystone(audit_options):
279
+    """Validate that the service uses Keystone for authentication."""
280
+    section = _config_section(audit_options, 'DEFAULT')
281
+    assert section is not None, "Missing section 'DEFAULT'"
282
+    assert section.get('auth_strategy') == "keystone", \
283
+        "Application is not using Keystone"
284
+
285
+
286
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
287
+def validate_uses_tls_for_keystone(audit_options):
288
+    """Verify that TLS is used to communicate with Keystone."""
289
+    section = _config_section(audit_options, 'keystone_authtoken')
290
+    assert section is not None, "Missing section 'keystone_authtoken'"
291
+    assert not section.get('insecure') and \
292
+        "https://" in section.get("auth_uri"), \
293
+        "TLS is not used for Keystone"
294
+
295
+
296
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
297
+def validate_uses_tls_for_glance(audit_options):
298
+    """Verify that TLS is used to communicate with Glance."""
299
+    section = _config_section(audit_options, 'glance')
300
+    assert section is not None, "Missing section 'glance'"
301
+    assert not section.get('insecure') and \
302
+        "https://" in section.get("api_servers"), \
303
+        "TLS is not used for Glance"

+ 2
- 1
charmhelpers/contrib/openstack/context.py View File

@@ -29,6 +29,7 @@ from charmhelpers.fetch import (
29 29
     filter_installed_packages,
30 30
 )
31 31
 from charmhelpers.core.hookenv import (
32
+    NoNetworkBinding,
32 33
     config,
33 34
     is_relation_made,
34 35
     local_unit,
@@ -868,7 +869,7 @@ class ApacheSSLContext(OSContextGenerator):
868 869
                     addr = network_get_primary_address(
869 870
                         ADDRESS_MAP[net_type]['binding']
870 871
                     )
871
-                except NotImplementedError:
872
+                except (NotImplementedError, NoNetworkBinding):
872 873
                     addr = fallback
873 874
 
874 875
             endpoint = resolve_address(net_type)

+ 2
- 1
charmhelpers/contrib/openstack/ip.py View File

@@ -13,6 +13,7 @@
13 13
 # limitations under the License.
14 14
 
15 15
 from charmhelpers.core.hookenv import (
16
+    NoNetworkBinding,
16 17
     config,
17 18
     unit_get,
18 19
     service_name,
@@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
175 176
             #       configuration is not in use
176 177
             try:
177 178
                 resolved_address = network_get_primary_address(binding)
178
-            except NotImplementedError:
179
+            except (NotImplementedError, NoNetworkBinding):
179 180
                 resolved_address = fallback_addr
180 181
 
181 182
     if resolved_address is None:

+ 10
- 0
charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit View File

@@ -0,0 +1,10 @@
1
+[oslo_messaging_rabbit]
2
+{% if rabbitmq_ha_queues -%}
3
+rabbit_ha_queues = True
4
+{% endif -%}
5
+{% if rabbit_ssl_port -%}
6
+ssl = True
7
+{% endif -%}
8
+{% if rabbit_ssl_ca -%}
9
+ssl_ca_file = {{ rabbit_ssl_ca }}
10
+{% endif -%}

+ 2
- 2
charmhelpers/contrib/openstack/utils.py View File

@@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
194 194
     ('rocky',
195 195
         ['2.18.0', '2.19.0']),
196 196
     ('stein',
197
-        ['2.19.0']),
197
+        ['2.20.0']),
198 198
 ])
199 199
 
200 200
 # >= Liberty version->codename mapping
@@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
656 656
     else:
657 657
         avail_vers = get_os_version_install_source(src)
658 658
     apt.init()
659
-    return apt.version_compare(avail_vers, cur_vers) == 1
659
+    return apt.version_compare(avail_vers, cur_vers) >= 1
660 660
 
661 661
 
662 662
 def ensure_block_device(block_device):

+ 133
- 58
charmhelpers/contrib/storage/linux/ceph.py View File

@@ -59,6 +59,7 @@ from charmhelpers.core.host import (
59 59
     service_stop,
60 60
     service_running,
61 61
     umount,
62
+    cmp_pkgrevno,
62 63
 )
63 64
 from charmhelpers.fetch import (
64 65
     apt_install,
@@ -178,7 +179,6 @@ class Pool(object):
178 179
         """
179 180
         # read-only is easy, writeback is much harder
180 181
         mode = get_cache_mode(self.service, cache_pool)
181
-        version = ceph_version()
182 182
         if mode == 'readonly':
183 183
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
184 184
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@@ -186,7 +186,7 @@ class Pool(object):
186 186
         elif mode == 'writeback':
187 187
             pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
188 188
                                 'cache-mode', cache_pool, 'forward']
189
-            if version >= '10.1':
189
+            if cmp_pkgrevno('ceph-common', '10.1') >= 0:
190 190
                 # Jewel added a mandatory flag
191 191
                 pool_forward_cmd.append('--yes-i-really-mean-it')
192 192
 
@@ -196,7 +196,8 @@ class Pool(object):
196 196
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
197 197
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
198 198
 
199
-    def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
199
+    def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
200
+                device_class=None):
200 201
         """Return the number of placement groups to use when creating the pool.
201 202
 
202 203
         Returns the number of placement groups which should be specified when
@@ -229,6 +230,9 @@ class Pool(object):
229 230
             increased. NOTE: the default is primarily to handle the scenario
230 231
             where related charms requiring pools has not been upgraded to
231 232
             include an update to indicate their relative usage of the pools.
233
+        :param device_class: str. class of storage to use for basis of pgs
234
+            calculation; ceph supports nvme, ssd and hdd by default based
235
+            on presence of devices of each type in the deployment.
232 236
         :return: int.  The number of pgs to use.
233 237
         """
234 238
 
@@ -243,17 +247,20 @@ class Pool(object):
243 247
 
244 248
         # If the expected-osd-count is specified, then use the max between
245 249
         # the expected-osd-count and the actual osd_count
246
-        osd_list = get_osds(self.service)
250
+        osd_list = get_osds(self.service, device_class)
247 251
         expected = config('expected-osd-count') or 0
248 252
 
249 253
         if osd_list:
250
-            osd_count = max(expected, len(osd_list))
254
+            if device_class:
255
+                osd_count = len(osd_list)
256
+            else:
257
+                osd_count = max(expected, len(osd_list))
251 258
 
252 259
             # Log a message to provide some insight if the calculations claim
253 260
             # to be off because someone is setting the expected count and
254 261
             # there are more OSDs in reality. Try to make a proper guess
255 262
             # based upon the cluster itself.
256
-            if expected and osd_count != expected:
263
+            if not device_class and expected and osd_count != expected:
257 264
                 log("Found more OSDs than provided expected count. "
258 265
                     "Using the actual count instead", INFO)
259 266
         elif expected:
@@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
575 582
         raise
576 583
 
577 584
 
578
-# max_bytes should be an int or long
579
-def set_pool_quota(service, pool_name, max_bytes):
585
+def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
580 586
     """
581
-    :param service: six.string_types. The Ceph user name to run the command under
582
-    :param pool_name: six.string_types
583
-    :param max_bytes: int or long
584
-    :return: None.  Can raise CalledProcessError
587
+    :param service: The Ceph user name to run the command under
588
+    :type service: str
589
+    :param pool_name: Name of pool
590
+    :type pool_name: str
591
+    :param max_bytes: Maximum bytes quota to apply
592
+    :type max_bytes: int
593
+    :param max_objects: Maximum objects quota to apply
594
+    :type max_objects: int
595
+    :raises: subprocess.CalledProcessError
585 596
     """
586
-    # Set a byte quota on a RADOS pool in ceph.
587
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
588
-           'max_bytes', str(max_bytes)]
589
-    try:
590
-        check_call(cmd)
591
-    except CalledProcessError:
592
-        raise
597
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
598
+    if max_bytes:
599
+        cmd = cmd + ['max_bytes', str(max_bytes)]
600
+    if max_objects:
601
+        cmd = cmd + ['max_objects', str(max_objects)]
602
+    check_call(cmd)
593 603
 
594 604
 
595 605
 def remove_pool_quota(service, pool_name):
@@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name):
626 636
 def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
627 637
                            failure_domain='host',
628 638
                            data_chunks=2, coding_chunks=1,
629
-                           locality=None, durability_estimator=None):
639
+                           locality=None, durability_estimator=None,
640
+                           device_class=None):
630 641
     """
631 642
     Create a new erasure code profile if one does not already exist for it.  Updates
632 643
     the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
640 651
     :param coding_chunks: int
641 652
     :param locality: int
642 653
     :param durability_estimator: int
654
+    :param device_class: six.string_types
643 655
     :return: None.  Can raise CalledProcessError
644 656
     """
645
-    version = ceph_version()
646
-
647 657
     # Ensure this failure_domain is allowed by Ceph
648 658
     validator(failure_domain, six.string_types,
649 659
               ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
654 664
     if locality is not None and durability_estimator is not None:
655 665
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
656 666
 
667
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
657 668
     # failure_domain changed in luminous
658
-    if version and version >= '12.0.0':
669
+    if luminous_or_later:
659 670
         cmd.append('crush-failure-domain=' + failure_domain)
660 671
     else:
661 672
         cmd.append('ruleset-failure-domain=' + failure_domain)
662 673
 
674
+    # device class new in luminous
675
+    if luminous_or_later and device_class:
676
+        cmd.append('crush-device-class={}'.format(device_class))
677
+    else:
678
+        log('Skipping device class configuration (ceph < 12.0.0)',
679
+            level=DEBUG)
680
+
663 681
     # Add plugin specific information
664 682
     if locality is not None:
665 683
         # For local erasure codes
@@ -744,20 +762,26 @@ def pool_exists(service, name):
744 762
     return name in out.split()
745 763
 
746 764
 
747
-def get_osds(service):
765
+def get_osds(service, device_class=None):
748 766
     """Return a list of all Ceph Object Storage Daemons currently in the
749
-    cluster.
767
+    cluster (optionally filtered by storage device class).
768
+
769
+    :param device_class: Class of storage device for OSD's
770
+    :type device_class: str
750 771
     """
751
-    version = ceph_version()
752
-    if version and version >= '0.56':
772
+    luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
773
+    if luminous_or_later and device_class:
774
+        out = check_output(['ceph', '--id', service,
775
+                            'osd', 'crush', 'class',
776
+                            'ls-osd', device_class,
777
+                            '--format=json'])
778
+    else:
753 779
         out = check_output(['ceph', '--id', service,
754 780
                             'osd', 'ls',
755 781
                             '--format=json'])
756
-        if six.PY3:
757
-            out = out.decode('UTF-8')
758
-        return json.loads(out)
759
-
760
-    return None
782
+    if six.PY3:
783
+        out = out.decode('UTF-8')
784
+    return json.loads(out)
761 785
 
762 786
 
763 787
 def install():
@@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
811 835
 
812 836
     :raises: CalledProcessError if ceph call fails
813 837
     """
814
-    if ceph_version() >= '12.0.0':
838
+    if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
815 839
         cmd = ['ceph', '--id', client, 'osd', 'pool',
816 840
                'application', 'enable', pool, name]
817 841
         check_call(cmd)
@@ -1091,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
1091 1115
     return True
1092 1116
 
1093 1117
 
1094
-def ceph_version():
1095
-    """Retrieve the local version of ceph."""
1096
-    if os.path.exists('/usr/bin/ceph'):
1097
-        cmd = ['ceph', '-v']
1098
-        output = check_output(cmd)
1099
-        if six.PY3:
1100
-            output = output.decode('UTF-8')
1101
-        output = output.split()
1102
-        if len(output) > 3:
1103
-            return output[2]
1104
-        else:
1105
-            return None
1106
-    else:
1107
-        return None
1108
-
1109
-
1110 1118
 class CephBrokerRq(object):
1111 1119
     """Ceph broker request.
1112 1120
 
@@ -1147,14 +1155,47 @@ class CephBrokerRq(object):
1147 1155
             'object-prefix-permissions': object_prefix_permissions})
1148 1156
 
1149 1157
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
1150
-                           weight=None, group=None, namespace=None):
1151
-        """Adds an operation to create a pool.
1152
-
1153
-        @param pg_num setting:  optional setting. If not provided, this value
1154
-        will be calculated by the broker based on how many OSDs are in the
1155
-        cluster at the time of creation. Note that, if provided, this value
1156
-        will be capped at the current available maximum.
1157
-        @param weight: the percentage of data the pool makes up
1158
+                           weight=None, group=None, namespace=None,
1159
+                           app_name=None, max_bytes=None, max_objects=None):
1160
+        """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
1161
+                       ``add_op_create_erasure_pool()`` instead.
1162
+        """
1163
+        return self.add_op_create_replicated_pool(
1164
+            name, replica_count=replica_count, pg_num=pg_num, weight=weight,
1165
+            group=group, namespace=namespace, app_name=app_name,
1166
+            max_bytes=max_bytes, max_objects=max_objects)
1167
+
1168
+    def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
1169
+                                      weight=None, group=None, namespace=None,
1170
+                                      app_name=None, max_bytes=None,
1171
+                                      max_objects=None):
1172
+        """Adds an operation to create a replicated pool.
1173
+
1174
+        :param name: Name of pool to create
1175
+        :type name: str
1176
+        :param replica_count: Number of copies Ceph should keep of your data.
1177
+        :type replica_count: int
1178
+        :param pg_num: Request specific number of Placement Groups to create
1179
+                       for pool.
1180
+        :type pg_num: int
1181
+        :param weight: The percentage of data that is expected to be contained
1182
+                       in the pool from the total available space on the OSDs.
1183
+                       Used to calculate number of Placement Groups to create
1184
+                       for pool.
1185
+        :type weight: float
1186
+        :param group: Group to add pool to
1187
+        :type group: str
1188
+        :param namespace: Group namespace
1189
+        :type namespace: str
1190
+        :param app_name: (Optional) Tag pool with application name.  Note that
1191
+                         there is certain protocols emerging upstream with
1192
+                         regard to meaningful application names to use.
1193
+                         Examples are ``rbd`` and ``rgw``.
1194
+        :type app_name: str
1195
+        :param max_bytes: Maximum bytes quota to apply
1196
+        :type max_bytes: int
1197
+        :param max_objects: Maximum objects quota to apply
1198
+        :type max_objects: int
1158 1199
         """
1159 1200
         if pg_num and weight:
1160 1201
             raise ValueError('pg_num and weight are mutually exclusive')
@@ -1162,7 +1203,41 @@ class CephBrokerRq(object):
1162 1203
         self.ops.append({'op': 'create-pool', 'name': name,
1163 1204
                          'replicas': replica_count, 'pg_num': pg_num,
1164 1205
                          'weight': weight, 'group': group,
1165
-                         'group-namespace': namespace})
1206
+                         'group-namespace': namespace, 'app-name': app_name,
1207
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
1208
+
1209
+    def add_op_create_erasure_pool(self, name, erasure_profile=None,
1210
+                                   weight=None, group=None, app_name=None,
1211
+                                   max_bytes=None, max_objects=None):
1212
+        """Adds an operation to create a erasure coded pool.
1213
+
1214
+        :param name: Name of pool to create
1215
+        :type name: str
1216
+        :param erasure_profile: Name of erasure code profile to use.  If not
1217
+                                set the ceph-mon unit handling the broker
1218
+                                request will set its default value.
1219
+        :type erasure_profile: str
1220
+        :param weight: The percentage of data that is expected to be contained
1221
+                       in the pool from the total available space on the OSDs.
1222
+        :type weight: float
1223
+        :param group: Group to add pool to
1224
+        :type group: str
1225
+        :param app_name: (Optional) Tag pool with application name.  Note that
1226
+                         there is certain protocols emerging upstream with
1227
+                         regard to meaningful application names to use.
1228
+                         Examples are ``rbd`` and ``rgw``.
1229
+        :type app_name: str
1230
+        :param max_bytes: Maximum bytes quota to apply
1231
+        :type max_bytes: int
1232
+        :param max_objects: Maximum objects quota to apply
1233
+        :type max_objects: int
1234
+        """
1235
+        self.ops.append({'op': 'create-pool', 'name': name,
1236
+                         'pool-type': 'erasure',
1237
+                         'erasure-profile': erasure_profile,
1238
+                         'weight': weight,
1239
+                         'group': group, 'app-name': app_name,
1240
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
1166 1241
 
1167 1242
     def set_ops(self, ops):
1168 1243
         """Set request ops to provided value.

+ 74
- 0
charmhelpers/core/hookenv.py View File

@@ -50,6 +50,11 @@ TRACE = "TRACE"
50 50
 MARKER = object()
51 51
 SH_MAX_ARG = 131071
52 52
 
53
+
54
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
55
+                 'This may not be compatible with software you are '
56
+                 'running in your shell.')
57
+
53 58
 cache = {}
54 59
 
55 60
 
@@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
1414 1419
     # I don't think 'dead' units ever show up in the goal-state, but
1415 1420
     # check anyway in addition to 'dying'.
1416 1421
     return units[unit]['status'] in ('dying', 'dead')
1422
+
1423
+
1424
+def env_proxy_settings(selected_settings=None):
1425
+    """Get proxy settings from process environment variables.
1426
+
1427
+    Get charm proxy settings from environment variables that correspond to
1428
+    juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
1429
+    see lp:1782236) in a format suitable for passing to an application that
1430
+    reacts to proxy settings passed as environment variables. Some applications
1431
+    support lowercase or uppercase notation (e.g. curl), some support only
1432
+    lowercase (e.g. wget), there are also subjectively rare cases of only
1433
+    uppercase notation support. no_proxy CIDR and wildcard support also varies
1434
+    between runtimes and applications as there is no enforced standard.
1435
+
1436
+    Some applications may connect to multiple destinations and expose config
1437
+    options that would affect only proxy settings for a specific destination
1438
+    these should be handled in charms in an application-specific manner.
1439
+
1440
+    :param selected_settings: format only a subset of possible settings
1441
+    :type selected_settings: list
1442
+    :rtype: Option(None, dict[str, str])
1443
+    """
1444
+    SUPPORTED_SETTINGS = {
1445
+        'http': 'HTTP_PROXY',
1446
+        'https': 'HTTPS_PROXY',
1447
+        'no_proxy': 'NO_PROXY',
1448
+        'ftp': 'FTP_PROXY'
1449
+    }
1450
+    if selected_settings is None:
1451
+        selected_settings = SUPPORTED_SETTINGS
1452
+
1453
+    selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
1454
+                     if k in selected_settings]
1455
+    proxy_settings = {}
1456
+    for var in selected_vars:
1457
+        var_val = os.getenv(var)
1458
+        if var_val:
1459
+            proxy_settings[var] = var_val
1460
+            proxy_settings[var.lower()] = var_val
1461
+        # Now handle juju-prefixed environment variables. The legacy vs new
1462
+        # environment variable usage is mutually exclusive
1463
+        charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
1464
+        if charm_var_val:
1465
+            proxy_settings[var] = charm_var_val
1466
+            proxy_settings[var.lower()] = charm_var_val
1467
+    if 'no_proxy' in proxy_settings:
1468
+        if _contains_range(proxy_settings['no_proxy']):
1469
+            log(RANGE_WARNING, level=WARNING)
1470
+    return proxy_settings if proxy_settings else None
1471
+
1472
+
1473
+def _contains_range(addresses):
1474
+    """Check for cidr or wildcard domain in a string.
1475
+
1476
+    Given a string comprising a comma seperated list of ip addresses
1477
+    and domain names, determine whether the string contains IP ranges
1478
+    or wildcard domains.
1479
+
1480
+    :param addresses: comma seperated list of domains and ip addresses.
1481
+    :type addresses: str
1482
+    """
1483
+    return (
1484
+        # Test for cidr (e.g. 10.20.20.0/24)
1485
+        "/" in addresses or
1486
+        # Test for wildcard domains (*.foo.com or .foo.com)
1487
+        "*" in addresses or
1488
+        addresses.startswith(".") or
1489
+        ",." in addresses or
1490
+        " ." in addresses)

+ 144
- 39
charmhelpers/fetch/ubuntu.py View File

@@ -19,15 +19,14 @@ import re
19 19
 import six
20 20
 import time
21 21
 import subprocess
22
-from tempfile import NamedTemporaryFile
23 22
 
24
-from charmhelpers.core.host import (
25
-    lsb_release
26
-)
23
+from charmhelpers.core.host import get_distrib_codename
24
+
27 25
 from charmhelpers.core.hookenv import (
28 26
     log,
29 27
     DEBUG,
30 28
     WARNING,
29
+    env_proxy_settings,
31 30
 )
32 31
 from charmhelpers.fetch import SourceConfigError, GPGKeyError
33 32
 
@@ -303,12 +302,17 @@ def import_key(key):
303 302
     """Import an ASCII Armor key.
304 303
 
305 304
     A Radix64 format keyid is also supported for backwards
306
-    compatibility, but should never be used; the key retrieval
307
-    mechanism is insecure and subject to man-in-the-middle attacks
308
-    voiding all signature checks using that key.
309
-
310
-    :param keyid: The key in ASCII armor format,
311
-                  including BEGIN and END markers.
305
+    compatibility. In this case Ubuntu keyserver will be
306
+    queried for a key via HTTPS by its keyid. This method
307
+    is less preferrable because https proxy servers may
308
+    require traffic decryption which is equivalent to a
309
+    man-in-the-middle attack (a proxy server impersonates
310
+    keyserver TLS certificates and has to be explicitly
311
+    trusted by the system).
312
+
313
+    :param key: A GPG key in ASCII armor format,
314
+                  including BEGIN and END markers or a keyid.
315
+    :type key: (bytes, str)
312 316
     :raises: GPGKeyError if the key could not be imported
313 317
     """
314 318
     key = key.strip()
@@ -319,35 +323,131 @@ def import_key(key):
319 323
         log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
320 324
         if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
321 325
                 '-----END PGP PUBLIC KEY BLOCK-----' in key):
322
-            log("Importing ASCII Armor PGP key", level=DEBUG)
323
-            with NamedTemporaryFile() as keyfile:
324
-                with open(keyfile.name, 'w') as fd:
325
-                    fd.write(key)
326
-                    fd.write("\n")
327
-                cmd = ['apt-key', 'add', keyfile.name]
328
-                try:
329
-                    subprocess.check_call(cmd)
330
-                except subprocess.CalledProcessError:
331
-                    error = "Error importing PGP key '{}'".format(key)
332
-                    log(error)
333
-                    raise GPGKeyError(error)
326
+            log("Writing provided PGP key in the binary format", level=DEBUG)
327
+            if six.PY3:
328
+                key_bytes = key.encode('utf-8')
329
+            else:
330
+                key_bytes = key
331
+            key_name = _get_keyid_by_gpg_key(key_bytes)
332
+            key_gpg = _dearmor_gpg_key(key_bytes)
333
+            _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
334 334
         else:
335 335
             raise GPGKeyError("ASCII armor markers missing from GPG key")
336 336
     else:
337
-        # We should only send things obviously not a keyid offsite
338
-        # via this unsecured protocol, as it may be a secret or part
339
-        # of one.
340 337
         log("PGP key found (looks like Radix64 format)", level=WARNING)
341
-        log("INSECURLY importing PGP key from keyserver; "
338
+        log("SECURELY importing PGP key from keyserver; "
342 339
             "full key not provided.", level=WARNING)
343
-        cmd = ['apt-key', 'adv', '--keyserver',
344
-               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
345
-        try:
346
-            _run_with_retries(cmd)
347
-        except subprocess.CalledProcessError:
348
-            error = "Error importing PGP key '{}'".format(key)
349
-            log(error)
350
-            raise GPGKeyError(error)
340
+        # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
341
+        # to retrieve GPG keys. `apt-key adv` command is deprecated as is
342
+        # apt-key in general as noted in its manpage. See lp:1433761 for more
343
+        # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
344
+        # gpg
345
+        key_asc = _get_key_by_keyid(key)
346
+        # write the key in GPG format so that apt-key list shows it
347
+        key_gpg = _dearmor_gpg_key(key_asc)
348
+        _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
349
+
350
+
351
+def _get_keyid_by_gpg_key(key_material):
352
+    """Get a GPG key fingerprint by GPG key material.
353
+    Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
354
+    or binary GPG key material. Can be used, for example, to generate file
355
+    names for keys passed via charm options.
356
+
357
+    :param key_material: ASCII armor-encoded or binary GPG key material
358
+    :type key_material: bytes
359
+    :raises: GPGKeyError if invalid key material has been provided
360
+    :returns: A GPG key fingerprint
361
+    :rtype: str
362
+    """
363
+    # Use the same gpg command for both Xenial and Bionic
364
+    cmd = 'gpg --with-colons --with-fingerprint'
365
+    ps = subprocess.Popen(cmd.split(),
366
+                          stdout=subprocess.PIPE,
367
+                          stderr=subprocess.PIPE,
368
+                          stdin=subprocess.PIPE)
369
+    out, err = ps.communicate(input=key_material)
370
+    if six.PY3:
371
+        out = out.decode('utf-8')
372
+        err = err.decode('utf-8')
373
+    if 'gpg: no valid OpenPGP data found.' in err:
374
+        raise GPGKeyError('Invalid GPG key material provided')
375
+    # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
376
+    return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
377
+
378
+
379
+def _get_key_by_keyid(keyid):
380
+    """Get a key via HTTPS from the Ubuntu keyserver.
381
+    Different key ID formats are supported by SKS keyservers (the longer ones
382
+    are more secure, see "dead beef attack" and https://evil32.com/). Since
383
+    HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
384
+    impersonate keyserver.ubuntu.com and generate a certificate with
385
+    keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
386
+    certificate. If such proxy behavior is expected it is necessary to add the
387
+    CA certificate chain containing the intermediate CA of the SSLBump proxy to
388
+    every machine that this code runs on via ca-certs cloud-init directive (via
389
+    cloudinit-userdata model-config) or via other means (such as through a
390
+    custom charm option). Also note that DNS resolution for the hostname in a
391
+    URL is done at a proxy server - not at the client side.
392
+
393
+    8-digit (32 bit) key ID
394
+    https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
395
+    16-digit (64 bit) key ID
396
+    https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
397
+    40-digit key ID:
398
+    https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
399
+
400
+    :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
401
+    :type keyid: (bytes, str)
402
+    :returns: A key material for the specified GPG key id
403
+    :rtype: (str, bytes)
404
+    :raises: subprocess.CalledProcessError
405
+    """
406
+    # options=mr - machine-readable output (disables html wrappers)
407
+    keyserver_url = ('https://keyserver.ubuntu.com'
408
+                     '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
409
+    curl_cmd = ['curl', keyserver_url.format(keyid)]
410
+    # use proxy server settings in order to retrieve the key
411
+    return subprocess.check_output(curl_cmd,
412
+                                   env=env_proxy_settings(['https']))
413
+
414
+
415
+def _dearmor_gpg_key(key_asc):
416
+    """Converts a GPG key in the ASCII armor format to the binary format.
417
+
418
+    :param key_asc: A GPG key in ASCII armor format.
419
+    :type key_asc: (str, bytes)
420
+    :returns: A GPG key in binary format
421
+    :rtype: (str, bytes)
422
+    :raises: GPGKeyError
423
+    """
424
+    ps = subprocess.Popen(['gpg', '--dearmor'],
425
+                          stdout=subprocess.PIPE,
426
+                          stderr=subprocess.PIPE,
427
+                          stdin=subprocess.PIPE)
428
+    out, err = ps.communicate(input=key_asc)
429
+    # no need to decode output as it is binary (invalid utf-8), only error
430
+    if six.PY3:
431
+        err = err.decode('utf-8')
432
+    if 'gpg: no valid OpenPGP data found.' in err:
433
+        raise GPGKeyError('Invalid GPG key material. Check your network setup'
434
+                          ' (MTU, routing, DNS) and/or proxy server settings'
435
+                          ' as well as destination keyserver status.')
436
+    else:
437
+        return out
438
+
439
+
440
+def _write_apt_gpg_keyfile(key_name, key_material):
441
+    """Writes GPG key material into a file at a provided path.
442
+
443
+    :param key_name: A key name to use for a key file (could be a fingerprint)
444
+    :type key_name: str
445
+    :param key_material: A GPG key material (binary)
446
+    :type key_material: (str, bytes)
447
+    """
448
+    with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
449
+              'wb') as keyf:
450
+        keyf.write(key_material)
351 451
 
352 452
 
353 453
 def add_source(source, key=None, fail_invalid=False):
@@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False):
442 542
 def _add_proposed():
443 543
     """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
444 544
 
445
-    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
545
+    Uses get_distrib_codename to determine the correct stanza for
446 546
     the deb line.
447 547
 
448 548
     For intel architecutres PROPOSED_POCKET is used for the release, but for
449 549
     other architectures PROPOSED_PORTS_POCKET is used for the release.
450 550
     """
451
-    release = lsb_release()['DISTRIB_CODENAME']
551
+    release = get_distrib_codename()
452 552
     arch = platform.machine()
453 553
     if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
454 554
         raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@@ -461,11 +561,16 @@ def _add_apt_repository(spec):
461 561
     """Add the spec using add_apt_repository
462 562
 
463 563
     :param spec: the parameter to pass to add_apt_repository
564
+    :type spec: str
464 565
     """
465 566
     if '{series}' in spec:
466
-        series = lsb_release()['DISTRIB_CODENAME']
567
+        series = get_distrib_codename()
467 568
         spec = spec.replace('{series}', series)
468
-    _run_with_retries(['add-apt-repository', '--yes', spec])
569
+    # software-properties package for bionic properly reacts to proxy settings
570
+    # passed as environment variables (See lp:1433761). This is not the case
571
+    # LTS and non-LTS releases below bionic.
572
+    _run_with_retries(['add-apt-repository', '--yes', spec],
573
+                      cmd_env=env_proxy_settings(['https']))
469 574
 
470 575
 
471 576
 def _add_cloud_pocket(pocket):
@@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release):
534 639
     :raises: SourceConfigError if the release is not the same as the ubuntu
535 640
         release.
536 641
     """
537
-    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
642
+    ubuntu_rel = get_distrib_codename()
538 643
     if release != ubuntu_rel:
539 644
         raise SourceConfigError(
540 645
             'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'

+ 5
- 1
templates/ocata/cinder.conf View File

@@ -14,6 +14,10 @@ auth_strategy = keystone
14 14
 state_path = /var/lib/cinder
15 15
 osapi_volume_workers = {{ workers }}
16 16
 
17
+{% if transport_url %}
18
+transport_url = {{ transport_url }}
19
+{% endif %}
20
+
17 21
 {% if use_internal_endpoints -%}
18 22
 swift_catalog_info = object-store:swift:internalURL
19 23
 keystone_catalog_info = identity:Identity Service:internalURL
@@ -51,7 +55,7 @@ volume_usage_audit_period = {{ volume_usage_audit_period }}
51 55
 
52 56
 {% include "parts/section-database" %}
53 57
 
54
-{% include "section-rabbitmq-oslo" %}
58
+{% include "section-oslo-messaging-rabbit" %}
55 59
 
56 60
 {% include "section-oslo-notifications" %}
57 61
 

+ 0
- 48
tests/basic_deployment.py View File

@@ -705,54 +705,6 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
705 705
             msg = u.relation_error('cinder amqp', ret)
706 706
             amulet.raise_status(amulet.FAIL, msg=msg)
707 707
 
708
-    def test_300_cinder_config(self):
709
-        """Verify the data in the cinder.conf file."""
710
-        u.log.debug('Checking cinder config file data...')
711
-        unit = self.cinder_sentry
712
-        conf = '/etc/cinder/cinder.conf'
713
-        unit_mq = self.rabbitmq_sentry
714
-        rel_mq_ci = unit_mq.relation('amqp', 'cinder:amqp')
715
-
716
-        expected = {
717
-            'DEFAULT': {
718
-                'use_syslog': 'False',
719
-                'debug': 'False',
720
-                'verbose': 'False',
721
-                'iscsi_helper': 'tgtadm',
722
-                'auth_strategy': 'keystone',
723
-            },
724
-        }
725
-        if self._get_openstack_release() < self.xenial_ocata:
726
-            expected['DEFAULT']['volume_group'] = 'cinder-volumes'
727
-            expected['DEFAULT']['volumes_dir'] = '/var/lib/cinder/volumes'
728
-        else:
729
-            expected['DEFAULT']['enabled_backends'] = 'LVM'
730
-            expected['LVM'] = {
731
-                'volume_group': 'cinder-volumes',
732
-                'volumes_dir': '/var/lib/cinder/volumes',
733
-                'volume_name_template': 'volume-%s',
734
-                'volume_driver': 'cinder.volume.drivers.lvm.LVMVolumeDriver',
735
-                'volume_backend_name': 'LVM'}
736
-        expected_rmq = {
737
-            'rabbit_userid': 'cinder',
738
-            'rabbit_virtual_host': 'openstack',
739
-            'rabbit_password': rel_mq_ci['password'],
740
-            'rabbit_host': rel_mq_ci['hostname'],
741
-        }
742
-
743
-        if self._get_openstack_release() >= self.trusty_kilo:
744
-            # Kilo or later
745
-            expected['oslo_messaging_rabbit'] = expected_rmq
746
-        else:
747
-            # Juno or earlier
748
-            expected['DEFAULT'].update(expected_rmq)
749
-
750
-        for section, pairs in expected.iteritems():
751
-            ret = u.validate_config_data(unit, conf, section, pairs)
752
-            if ret:
753
-                message = "cinder config error: {}".format(ret)
754
-                amulet.raise_status(amulet.FAIL, msg=message)
755
-
756 708
     def test_301_cinder_logging_config(self):
757 709
         """Verify the data in the cinder logging conf file."""
758 710
         u.log.debug('Checking cinder logging config file data...')

Loading…
Cancel
Save