Browse Source

Add security-checklist action

Change-Id: I39b53cb049b84a9ccde91adebd5aec6ea7a8168f
changes/86/639986/4
Chris MacNaughton 6 months ago
parent
commit
59adca21e2

+ 2
- 0
actions.yaml View File

@@ -4,3 +4,5 @@ pause:
4 4
     description: Pause the neutron-gateway unit.
5 5
 resume:
6 6
     descrpition: Resume the neutron-gateway unit.
7
+security-checklist:
8
+  description: Validate the running configuration against the OpenStack security guides checklist

+ 1
- 0
actions/security-checklist View File

@@ -0,0 +1 @@
1
+security_checklist.py

+ 51
- 0
actions/security_checklist.py View File

@@ -0,0 +1,51 @@
1
+#!/usr/bin/env python3
2
+#
3
+# Copyright 2019 Canonical Ltd
4
+#
5
+# Licensed under the Apache License, Version 2.0 (the "License");
6
+# you may not use this file except in compliance with the License.
7
+# You may obtain a copy of the License at
8
+#
9
+#  http://www.apache.org/licenses/LICENSE-2.0
10
+#
11
+# Unless required by applicable law or agreed to in writing, software
12
+# distributed under the License is distributed on an "AS IS" BASIS,
13
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+
17
+import configparser
18
+import sys
19
+
20
+sys.path.append('hooks')
21
+
22
+import charmhelpers.contrib.openstack.audits as audits
23
+from charmhelpers.contrib.openstack.audits import (
24
+    openstack_security_guide,
25
+)
26
+
27
+
28
+# Via the openstack_security_guide above, we are running the following
29
+# security assertions automatically:
30
+#
31
+# - Check-Neutron-01 - validate-file-ownership
32
+# - Check-Neutron-02 - validate-file-permissions
33
+
34
+
35
+def main():
36
+    config = {
37
+        'audit_type': audits.AuditType.OpenStackSecurityGuide,
38
+        'files': openstack_security_guide.FILE_ASSERTIONS['neutron-gateway'],
39
+        'excludes': [
40
+            'validate-uses-keystone',
41
+            'validate-uses-tls-for-glance',
42
+            'validate-uses-tls-for-keystone',
43
+        ],
44
+    }
45
+    conf = configparser.ConfigParser()
46
+    conf.read("/etc/neutron/neutron.conf")
47
+    config['neutron_config'] = dict(conf)
48
+    return audits.action_parse_results(audits.run(config))
49
+
50
+if __name__ == "__main__":
51
+    sys.exit(main())

+ 90
- 12
hooks/charmhelpers/contrib/openstack/audits/__init__.py View File

@@ -19,7 +19,7 @@ from enum import Enum
19 19
 import traceback
20 20
 
21 21
 from charmhelpers.core.host import cmp_pkgrevno
22
-
22
+import charmhelpers.contrib.openstack.utils as openstack_utils
23 23
 import charmhelpers.core.hookenv as hookenv
24 24
 
25 25
 
@@ -39,7 +39,7 @@ def audit(*args):
39 39
     deployed system that matches the given configuration
40 40
 
41 41
     :param args: List of functions to filter tests against
42
-    :type args: List[Callable(Config)]
42
+    :type args: List[Callable[Dict]]
43 43
     """
44 44
     def wrapper(f):
45 45
         test_name = f.__name__
@@ -58,28 +58,92 @@ def audit(*args):
58 58
 
59 59
 
60 60
 def is_audit_type(*args):
61
-    """This audit is included in the specified kinds of audits."""
62
-    def should_run(audit_options):
61
+    """This audit is included in the specified kinds of audits.
62
+
63
+    :param *args: List of AuditTypes to include this audit in
64
+    :type args: List[AuditType]
65
+    :rtype: Callable[Dict]
66
+    """
67
+    def _is_audit_type(audit_options):
63 68
         if audit_options.get('audit_type') in args:
64 69
             return True
65 70
         else:
66 71
             return False
67
-    return should_run
72
+    return _is_audit_type
68 73
 
69 74
 
70 75
 def since_package(pkg, pkg_version):
71
-    """This audit should be run after the specified package version (incl)."""
72
-    return lambda audit_options=None: cmp_pkgrevno(pkg, pkg_version) >= 0
76
+    """This audit should be run after the specified package version (incl).
77
+
78
+    :param pkg: Package name to compare
79
+    :type pkg: str
80
+    :param release: The package version
81
+    :type release: str
82
+    :rtype: Callable[Dict]
83
+    """
84
+    def _since_package(audit_options=None):
85
+        return cmp_pkgrevno(pkg, pkg_version) >= 0
86
+
87
+    return _since_package
73 88
 
74 89
 
75 90
 def before_package(pkg, pkg_version):
76
-    """This audit should be run before the specified package version (excl)."""
77
-    return lambda audit_options=None: not since_package(pkg, pkg_version)()
91
+    """This audit should be run before the specified package version (excl).
92
+
93
+    :param pkg: Package name to compare
94
+    :type pkg: str
95
+    :param release: The package version
96
+    :type release: str
97
+    :rtype: Callable[Dict]
98
+    """
99
+    def _before_package(audit_options=None):
100
+        return not since_package(pkg, pkg_version)()
101
+
102
+    return _before_package
103
+
104
+
105
+def since_openstack_release(pkg, release):
106
+    """This audit should run after the specified OpenStack version (incl).
107
+
108
+    :param pkg: Package name to compare
109
+    :type pkg: str
110
+    :param release: The OpenStack release codename
111
+    :type release: str
112
+    :rtype: Callable[Dict]
113
+    """
114
+    def _since_openstack_release(audit_options=None):
115
+        _release = openstack_utils.get_os_codename_package(pkg)
116
+        return openstack_utils.CompareOpenStackReleases(_release) >= release
117
+
118
+    return _since_openstack_release
119
+
120
+
121
+def before_openstack_release(pkg, release):
122
+    """This audit should run before the specified OpenStack version (excl).
123
+
124
+    :param pkg: Package name to compare
125
+    :type pkg: str
126
+    :param release: The OpenStack release codename
127
+    :type release: str
128
+    :rtype: Callable[Dict]
129
+    """
130
+    def _before_openstack_release(audit_options=None):
131
+        return not since_openstack_release(pkg, release)()
132
+
133
+    return _before_openstack_release
78 134
 
79 135
 
80 136
 def it_has_config(config_key):
81
-    """This audit should be run based on specified config keys."""
82
-    return lambda audit_options: audit_options.get(config_key) is not None
137
+    """This audit should be run based on specified config keys.
138
+
139
+    :param config_key: Config key to look for
140
+    :type config_key: str
141
+    :rtype: Callable[Dict]
142
+    """
143
+    def _it_has_config(audit_options):
144
+        return audit_options.get(config_key) is not None
145
+
146
+    return _it_has_config
83 147
 
84 148
 
85 149
 def run(audit_options):
@@ -87,11 +151,19 @@ def run(audit_options):
87 151
 
88 152
     :param audit_options: Configuration for the audit
89 153
     :type audit_options: Config
154
+
155
+    :rtype: Dict[str, str]
90 156
     """
91 157
     errors = {}
92 158
     results = {}
93 159
     for name, audit in sorted(_audits.items()):
94 160
         result_name = name.replace('_', '-')
161
+        if result_name in audit_options.get('excludes', []):
162
+            print(
163
+                "Skipping {} because it is"
164
+                "excluded in audit config"
165
+                .format(result_name))
166
+            continue
95 167
         if all(p(audit_options) for p in audit.filters):
96 168
             try:
97 169
                 audit.func(audit_options)
@@ -121,7 +193,13 @@ def run(audit_options):
121 193
 
122 194
 
123 195
 def action_parse_results(result):
124
-    """Parse the result of `run` in the context of an action."""
196
+    """Parse the result of `run` in the context of an action.
197
+
198
+    :param result: The result of running the security-checklist
199
+        action on a unit
200
+    :type result: Dict[str, Dict[str, str]]
201
+    :rtype: int
202
+    """
125 203
     passed = True
126 204
     for test, result in result.items():
127 205
         if result['success']:

+ 86
- 22
hooks/charmhelpers/contrib/storage/linux/ceph.py View File

@@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
582 582
         raise
583 583
 
584 584
 
585
-# max_bytes should be an int or long
586
-def set_pool_quota(service, pool_name, max_bytes):
585
+def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
587 586
     """
588
-    :param service: six.string_types. The Ceph user name to run the command under
589
-    :param pool_name: six.string_types
590
-    :param max_bytes: int or long
591
-    :return: None.  Can raise CalledProcessError
587
+    :param service: The Ceph user name to run the command under
588
+    :type service: str
589
+    :param pool_name: Name of pool
590
+    :type pool_name: str
591
+    :param max_bytes: Maximum bytes quota to apply
592
+    :type max_bytes: int
593
+    :param max_objects: Maximum objects quota to apply
594
+    :type max_objects: int
595
+    :raises: subprocess.CalledProcessError
592 596
     """
593
-    # Set a byte quota on a RADOS pool in ceph.
594
-    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
595
-           'max_bytes', str(max_bytes)]
596
-    try:
597
-        check_call(cmd)
598
-    except CalledProcessError:
599
-        raise
597
+    cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
598
+    if max_bytes:
599
+        cmd = cmd + ['max_bytes', str(max_bytes)]
600
+    if max_objects:
601
+        cmd = cmd + ['max_objects', str(max_objects)]
602
+    check_call(cmd)
600 603
 
601 604
 
602 605
 def remove_pool_quota(service, pool_name):
@@ -1153,19 +1156,46 @@ class CephBrokerRq(object):
1153 1156
 
1154 1157
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
1155 1158
                            weight=None, group=None, namespace=None,
1156
-                           app_name=None):
1157
-        """Adds an operation to create a pool.
1158
-
1159
-        @param pg_num setting:  optional setting. If not provided, this value
1160
-        will be calculated by the broker based on how many OSDs are in the
1161
-        cluster at the time of creation. Note that, if provided, this value
1162
-        will be capped at the current available maximum.
1163
-        @param weight: the percentage of data the pool makes up
1159
+                           app_name=None, max_bytes=None, max_objects=None):
1160
+        """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
1161
+                       ``add_op_create_erasure_pool()`` instead.
1162
+        """
1163
+        return self.add_op_create_replicated_pool(
1164
+            name, replica_count=replica_count, pg_num=pg_num, weight=weight,
1165
+            group=group, namespace=namespace, app_name=app_name,
1166
+            max_bytes=max_bytes, max_objects=max_objects)
1167
+
1168
+    def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
1169
+                                      weight=None, group=None, namespace=None,
1170
+                                      app_name=None, max_bytes=None,
1171
+                                      max_objects=None):
1172
+        """Adds an operation to create a replicated pool.
1173
+
1174
+        :param name: Name of pool to create
1175
+        :type name: str
1176
+        :param replica_count: Number of copies Ceph should keep of your data.
1177
+        :type replica_count: int
1178
+        :param pg_num: Request specific number of Placement Groups to create
1179
+                       for pool.
1180
+        :type pg_num: int
1181
+        :param weight: The percentage of data that is expected to be contained
1182
+                       in the pool from the total available space on the OSDs.
1183
+                       Used to calculate number of Placement Groups to create
1184
+                       for pool.
1185
+        :type weight: float
1186
+        :param group: Group to add pool to
1187
+        :type group: str
1188
+        :param namespace: Group namespace
1189
+        :type namespace: str
1164 1190
         :param app_name: (Optional) Tag pool with application name.  Note that
1165 1191
                          there is certain protocols emerging upstream with
1166 1192
                          regard to meaningful application names to use.
1167 1193
                          Examples are ``rbd`` and ``rgw``.
1168 1194
         :type app_name: str
1195
+        :param max_bytes: Maximum bytes quota to apply
1196
+        :type max_bytes: int
1197
+        :param max_objects: Maximum objects quota to apply
1198
+        :type max_objects: int
1169 1199
         """
1170 1200
         if pg_num and weight:
1171 1201
             raise ValueError('pg_num and weight are mutually exclusive')
@@ -1173,7 +1203,41 @@ class CephBrokerRq(object):
1173 1203
         self.ops.append({'op': 'create-pool', 'name': name,
1174 1204
                          'replicas': replica_count, 'pg_num': pg_num,
1175 1205
                          'weight': weight, 'group': group,
1176
-                         'group-namespace': namespace, 'app-name': app_name})
1206
+                         'group-namespace': namespace, 'app-name': app_name,
1207
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
1208
+
1209
+    def add_op_create_erasure_pool(self, name, erasure_profile=None,
1210
+                                   weight=None, group=None, app_name=None,
1211
+                                   max_bytes=None, max_objects=None):
1212
+        """Adds an operation to create a erasure coded pool.
1213
+
1214
+        :param name: Name of pool to create
1215
+        :type name: str
1216
+        :param erasure_profile: Name of erasure code profile to use.  If not
1217
+                                set the ceph-mon unit handling the broker
1218
+                                request will set its default value.
1219
+        :type erasure_profile: str
1220
+        :param weight: The percentage of data that is expected to be contained
1221
+                       in the pool from the total available space on the OSDs.
1222
+        :type weight: float
1223
+        :param group: Group to add pool to
1224
+        :type group: str
1225
+        :param app_name: (Optional) Tag pool with application name.  Note that
1226
+                         there is certain protocols emerging upstream with
1227
+                         regard to meaningful application names to use.
1228
+                         Examples are ``rbd`` and ``rgw``.
1229
+        :type app_name: str
1230
+        :param max_bytes: Maximum bytes quota to apply
1231
+        :type max_bytes: int
1232
+        :param max_objects: Maximum objects quota to apply
1233
+        :type max_objects: int
1234
+        """
1235
+        self.ops.append({'op': 'create-pool', 'name': name,
1236
+                         'pool-type': 'erasure',
1237
+                         'erasure-profile': erasure_profile,
1238
+                         'weight': weight,
1239
+                         'group': group, 'app-name': app_name,
1240
+                         'max-bytes': max_bytes, 'max-objects': max_objects})
1177 1241
 
1178 1242
     def set_ops(self, ops):
1179 1243
         """Set request ops to provided value.

+ 74
- 0
hooks/charmhelpers/core/hookenv.py View File

@@ -50,6 +50,11 @@ TRACE = "TRACE"
50 50
 MARKER = object()
51 51
 SH_MAX_ARG = 131071
52 52
 
53
+
54
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
55
+                 'This may not be compatible with software you are '
56
+                 'running in your shell.')
57
+
53 58
 cache = {}
54 59
 
55 60
 
@@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
1414 1419
     # I don't think 'dead' units ever show up in the goal-state, but
1415 1420
     # check anyway in addition to 'dying'.
1416 1421
     return units[unit]['status'] in ('dying', 'dead')
1422
+
1423
+
1424
+def env_proxy_settings(selected_settings=None):
1425
+    """Get proxy settings from process environment variables.
1426
+
1427
+    Get charm proxy settings from environment variables that correspond to
1428
+    juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
1429
+    see lp:1782236) in a format suitable for passing to an application that
1430
+    reacts to proxy settings passed as environment variables. Some applications
1431
+    support lowercase or uppercase notation (e.g. curl), some support only
1432
+    lowercase (e.g. wget), there are also subjectively rare cases of only
1433
+    uppercase notation support. no_proxy CIDR and wildcard support also varies
1434
+    between runtimes and applications as there is no enforced standard.
1435
+
1436
+    Some applications may connect to multiple destinations and expose config
1437
+    options that would affect only proxy settings for a specific destination
1438
+    these should be handled in charms in an application-specific manner.
1439
+
1440
+    :param selected_settings: format only a subset of possible settings
1441
+    :type selected_settings: list
1442
+    :rtype: Option(None, dict[str, str])
1443
+    """
1444
+    SUPPORTED_SETTINGS = {
1445
+        'http': 'HTTP_PROXY',
1446
+        'https': 'HTTPS_PROXY',
1447
+        'no_proxy': 'NO_PROXY',
1448
+        'ftp': 'FTP_PROXY'
1449
+    }
1450
+    if selected_settings is None:
1451
+        selected_settings = SUPPORTED_SETTINGS
1452
+
1453
+    selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
1454
+                     if k in selected_settings]
1455
+    proxy_settings = {}
1456
+    for var in selected_vars:
1457
+        var_val = os.getenv(var)
1458
+        if var_val:
1459
+            proxy_settings[var] = var_val
1460
+            proxy_settings[var.lower()] = var_val
1461
+        # Now handle juju-prefixed environment variables. The legacy vs new
1462
+        # environment variable usage is mutually exclusive
1463
+        charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
1464
+        if charm_var_val:
1465
+            proxy_settings[var] = charm_var_val
1466
+            proxy_settings[var.lower()] = charm_var_val
1467
+    if 'no_proxy' in proxy_settings:
1468
+        if _contains_range(proxy_settings['no_proxy']):
1469
+            log(RANGE_WARNING, level=WARNING)
1470
+    return proxy_settings if proxy_settings else None
1471
+
1472
+
1473
+def _contains_range(addresses):
1474
+    """Check for cidr or wildcard domain in a string.
1475
+
1476
+    Given a string comprising a comma seperated list of ip addresses
1477
+    and domain names, determine whether the string contains IP ranges
1478
+    or wildcard domains.
1479
+
1480
+    :param addresses: comma seperated list of domains and ip addresses.
1481
+    :type addresses: str
1482
+    """
1483
+    return (
1484
+        # Test for cidr (e.g. 10.20.20.0/24)
1485
+        "/" in addresses or
1486
+        # Test for wildcard domains (*.foo.com or .foo.com)
1487
+        "*" in addresses or
1488
+        addresses.startswith(".") or
1489
+        ",." in addresses or
1490
+        " ." in addresses)

+ 150
- 37
hooks/charmhelpers/fetch/ubuntu.py View File

@@ -19,15 +19,16 @@ import re
19 19
 import six
20 20
 import time
21 21
 import subprocess
22
-from tempfile import NamedTemporaryFile
23 22
 
24 23
 from charmhelpers.core.host import (
25
-    lsb_release
24
+    get_distrib_codename,
25
+    CompareHostReleases,
26 26
 )
27 27
 from charmhelpers.core.hookenv import (
28 28
     log,
29 29
     DEBUG,
30 30
     WARNING,
31
+    env_proxy_settings,
31 32
 )
32 33
 from charmhelpers.fetch import SourceConfigError, GPGKeyError
33 34
 
@@ -303,12 +304,17 @@ def import_key(key):
303 304
     """Import an ASCII Armor key.
304 305
 
305 306
     A Radix64 format keyid is also supported for backwards
306
-    compatibility, but should never be used; the key retrieval
307
-    mechanism is insecure and subject to man-in-the-middle attacks
308
-    voiding all signature checks using that key.
309
-
310
-    :param keyid: The key in ASCII armor format,
311
-                  including BEGIN and END markers.
307
+    compatibility. In this case Ubuntu keyserver will be
308
+    queried for a key via HTTPS by its keyid. This method
309
+    is less preferrable because https proxy servers may
310
+    require traffic decryption which is equivalent to a
311
+    man-in-the-middle attack (a proxy server impersonates
312
+    keyserver TLS certificates and has to be explicitly
313
+    trusted by the system).
314
+
315
+    :param key: A GPG key in ASCII armor format,
316
+                  including BEGIN and END markers or a keyid.
317
+    :type key: (bytes, str)
312 318
     :raises: GPGKeyError if the key could not be imported
313 319
     """
314 320
     key = key.strip()
@@ -319,35 +325,137 @@ def import_key(key):
319 325
         log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
320 326
         if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
321 327
                 '-----END PGP PUBLIC KEY BLOCK-----' in key):
322
-            log("Importing ASCII Armor PGP key", level=DEBUG)
323
-            with NamedTemporaryFile() as keyfile:
324
-                with open(keyfile.name, 'w') as fd:
325
-                    fd.write(key)
326
-                    fd.write("\n")
327
-                cmd = ['apt-key', 'add', keyfile.name]
328
-                try:
329
-                    subprocess.check_call(cmd)
330
-                except subprocess.CalledProcessError:
331
-                    error = "Error importing PGP key '{}'".format(key)
332
-                    log(error)
333
-                    raise GPGKeyError(error)
328
+            log("Writing provided PGP key in the binary format", level=DEBUG)
329
+            if six.PY3:
330
+                key_bytes = key.encode('utf-8')
331
+            else:
332
+                key_bytes = key
333
+            key_name = _get_keyid_by_gpg_key(key_bytes)
334
+            key_gpg = _dearmor_gpg_key(key_bytes)
335
+            _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
334 336
         else:
335 337
             raise GPGKeyError("ASCII armor markers missing from GPG key")
336 338
     else:
337
-        # We should only send things obviously not a keyid offsite
338
-        # via this unsecured protocol, as it may be a secret or part
339
-        # of one.
340 339
         log("PGP key found (looks like Radix64 format)", level=WARNING)
341
-        log("INSECURLY importing PGP key from keyserver; "
340
+        log("SECURELY importing PGP key from keyserver; "
342 341
             "full key not provided.", level=WARNING)
343
-        cmd = ['apt-key', 'adv', '--keyserver',
344
-               'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
345
-        try:
346
-            _run_with_retries(cmd)
347
-        except subprocess.CalledProcessError:
348
-            error = "Error importing PGP key '{}'".format(key)
349
-            log(error)
350
-            raise GPGKeyError(error)
342
+        # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
343
+        # to retrieve GPG keys. `apt-key adv` command is deprecated as is
344
+        # apt-key in general as noted in its manpage. See lp:1433761 for more
345
+        # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
346
+        # gpg
347
+        key_asc = _get_key_by_keyid(key)
348
+        # write the key in GPG format so that apt-key list shows it
349
+        key_gpg = _dearmor_gpg_key(key_asc)
350
+        _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
351
+
352
+
353
+def _get_keyid_by_gpg_key(key_material):
354
+    """Get a GPG key fingerprint by GPG key material.
355
+    Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
356
+    or binary GPG key material. Can be used, for example, to generate file
357
+    names for keys passed via charm options.
358
+
359
+    :param key_material: ASCII armor-encoded or binary GPG key material
360
+    :type key_material: bytes
361
+    :raises: GPGKeyError if invalid key material has been provided
362
+    :returns: A GPG key fingerprint
363
+    :rtype: str
364
+    """
365
+    # trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change
366
+    release = get_distrib_codename()
367
+    is_gpgv2_distro = CompareHostReleases(release) >= "bionic"
368
+    if is_gpgv2_distro:
369
+        # --import is mandatory, otherwise fingerprint is not printed
370
+        cmd = 'gpg --with-colons --import-options show-only --import --dry-run'
371
+    else:
372
+        cmd = 'gpg --with-colons --with-fingerprint'
373
+    ps = subprocess.Popen(cmd.split(),
374
+                          stdout=subprocess.PIPE,
375
+                          stderr=subprocess.PIPE,
376
+                          stdin=subprocess.PIPE)
377
+    out, err = ps.communicate(input=key_material)
378
+    if six.PY3:
379
+        out = out.decode('utf-8')
380
+        err = err.decode('utf-8')
381
+    if 'gpg: no valid OpenPGP data found.' in err:
382
+        raise GPGKeyError('Invalid GPG key material provided')
383
+    # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
384
+    return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
385
+
386
+
387
+def _get_key_by_keyid(keyid):
388
+    """Get a key via HTTPS from the Ubuntu keyserver.
389
+    Different key ID formats are supported by SKS keyservers (the longer ones
390
+    are more secure, see "dead beef attack" and https://evil32.com/). Since
391
+    HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
392
+    impersonate keyserver.ubuntu.com and generate a certificate with
393
+    keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
394
+    certificate. If such proxy behavior is expected it is necessary to add the
395
+    CA certificate chain containing the intermediate CA of the SSLBump proxy to
396
+    every machine that this code runs on via ca-certs cloud-init directive (via
397
+    cloudinit-userdata model-config) or via other means (such as through a
398
+    custom charm option). Also note that DNS resolution for the hostname in a
399
+    URL is done at a proxy server - not at the client side.
400
+
401
+    8-digit (32 bit) key ID
402
+    https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
403
+    16-digit (64 bit) key ID
404
+    https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
405
+    40-digit key ID:
406
+    https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
407
+
408
+    :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
409
+    :type keyid: (bytes, str)
410
+    :returns: A key material for the specified GPG key id
411
+    :rtype: (str, bytes)
412
+    :raises: subprocess.CalledProcessError
413
+    """
414
+    # options=mr - machine-readable output (disables html wrappers)
415
+    keyserver_url = ('https://keyserver.ubuntu.com'
416
+                     '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
417
+    curl_cmd = ['curl', keyserver_url.format(keyid)]
418
+    # use proxy server settings in order to retrieve the key
419
+    return subprocess.check_output(curl_cmd,
420
+                                   env=env_proxy_settings(['https']))
421
+
422
+
423
+def _dearmor_gpg_key(key_asc):
424
+    """Converts a GPG key in the ASCII armor format to the binary format.
425
+
426
+    :param key_asc: A GPG key in ASCII armor format.
427
+    :type key_asc: (str, bytes)
428
+    :returns: A GPG key in binary format
429
+    :rtype: (str, bytes)
430
+    :raises: GPGKeyError
431
+    """
432
+    ps = subprocess.Popen(['gpg', '--dearmor'],
433
+                          stdout=subprocess.PIPE,
434
+                          stderr=subprocess.PIPE,
435
+                          stdin=subprocess.PIPE)
436
+    out, err = ps.communicate(input=key_asc)
437
+    # no need to decode output as it is binary (invalid utf-8), only error
438
+    if six.PY3:
439
+        err = err.decode('utf-8')
440
+    if 'gpg: no valid OpenPGP data found.' in err:
441
+        raise GPGKeyError('Invalid GPG key material. Check your network setup'
442
+                          ' (MTU, routing, DNS) and/or proxy server settings'
443
+                          ' as well as destination keyserver status.')
444
+    else:
445
+        return out
446
+
447
+
448
+def _write_apt_gpg_keyfile(key_name, key_material):
449
+    """Writes GPG key material into a file at a provided path.
450
+
451
+    :param key_name: A key name to use for a key file (could be a fingerprint)
452
+    :type key_name: str
453
+    :param key_material: A GPG key material (binary)
454
+    :type key_material: (str, bytes)
455
+    """
456
+    with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
457
+              'wb') as keyf:
458
+        keyf.write(key_material)
351 459
 
352 460
 
353 461
 def add_source(source, key=None, fail_invalid=False):
@@ -442,13 +550,13 @@ def add_source(source, key=None, fail_invalid=False):
442 550
 def _add_proposed():
443 551
     """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
444 552
 
445
-    Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
553
+    Uses get_distrib_codename to determine the correct stanza for
446 554
     the deb line.
447 555
 
448 556
     For intel architecutres PROPOSED_POCKET is used for the release, but for
449 557
     other architectures PROPOSED_PORTS_POCKET is used for the release.
450 558
     """
451
-    release = lsb_release()['DISTRIB_CODENAME']
559
+    release = get_distrib_codename()
452 560
     arch = platform.machine()
453 561
     if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
454 562
         raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@@ -461,11 +569,16 @@ def _add_apt_repository(spec):
461 569
     """Add the spec using add_apt_repository
462 570
 
463 571
     :param spec: the parameter to pass to add_apt_repository
572
+    :type spec: str
464 573
     """
465 574
     if '{series}' in spec:
466
-        series = lsb_release()['DISTRIB_CODENAME']
575
+        series = get_distrib_codename()
467 576
         spec = spec.replace('{series}', series)
468
-    _run_with_retries(['add-apt-repository', '--yes', spec])
577
+    # software-properties package for bionic properly reacts to proxy settings
578
+    # passed as environment variables (See lp:1433761). This is not the case
579
+    # LTS and non-LTS releases below bionic.
580
+    _run_with_retries(['add-apt-repository', '--yes', spec],
581
+                      cmd_env=env_proxy_settings(['https']))
469 582
 
470 583
 
471 584
 def _add_cloud_pocket(pocket):
@@ -534,7 +647,7 @@ def _verify_is_ubuntu_rel(release, os_release):
534 647
     :raises: SourceConfigError if the release is not the same as the ubuntu
535 648
         release.
536 649
     """
537
-    ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
650
+    ubuntu_rel = get_distrib_codename()
538 651
     if release != ubuntu_rel:
539 652
         raise SourceConfigError(
540 653
             'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'

+ 11
- 0
tests/basic_deployment.py View File

@@ -907,6 +907,17 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
907 907
             self.d.configure('neutron-api', set_default)
908 908
             u.log.debug('OK')
909 909
 
910
+    def test_500_security_checklist_action(self):
911
+        """Verify expected result on a default install"""
912
+        u.log.debug("Testing security-checklist")
913
+        sentry_unit = self.neutron_gateway_sentry
914
+
915
+        action_id = u.run_action(sentry_unit, "security-checklist")
916
+        u.wait_on_action(action_id)
917
+        data = amulet.actions.get_action_output(action_id, full_output=True)
918
+        assert data.get(u"status") == "failed", \
919
+            "Security check is expected to not pass by default"
920
+
910 921
     def test_900_restart_on_config_change(self):
911 922
         """Verify that the specified services are restarted when the
912 923
         config is changed."""

Loading…
Cancel
Save