Browse Source

Pre-release charm-helpers sync 16.10

Get each charm up to date with lp:charm-helpers for release testing.

Change-Id: I220409cf255378b57016dd6856ef02a87a21f79f
changes/13/380413/2
David Ames 2 years ago
parent
commit
49b75b04bc

+ 1
- 0
charm-helpers-hooks.yaml View File

@@ -3,6 +3,7 @@ destination: hooks/charmhelpers
3 3
 include:
4 4
     - core
5 5
     - fetch
6
+    - osplatform
6 7
     - contrib.openstack|inc=*
7 8
     - contrib.storage
8 9
     - contrib.hahelpers

+ 1
- 1
hooks/charmhelpers/contrib/network/ip.py View File

@@ -406,7 +406,7 @@ def is_ip(address):
406 406
         # Test to see if already an IPv4/IPv6 address
407 407
         address = netaddr.IPAddress(address)
408 408
         return True
409
-    except netaddr.AddrFormatError:
409
+    except (netaddr.AddrFormatError, ValueError):
410 410
         return False
411 411
 
412 412
 

+ 61
- 11
hooks/charmhelpers/contrib/openstack/amulet/deployment.py View File

@@ -98,8 +98,47 @@ class OpenStackAmuletDeployment(AmuletDeployment):
98 98
 
99 99
         return other_services
100 100
 
101
-    def _add_services(self, this_service, other_services):
102
-        """Add services to the deployment and set openstack-origin/source."""
101
+    def _add_services(self, this_service, other_services, use_source=None,
102
+                      no_origin=None):
103
+        """Add services to the deployment and optionally set
104
+        openstack-origin/source.
105
+
106
+        :param this_service dict: Service dictionary describing the service
107
+                                  whose amulet tests are being run
108
+        :param other_services dict: List of service dictionaries describing
109
+                                    the services needed to support the target
110
+                                    service
111
+        :param use_source list: List of services which use the 'source' config
112
+                                option rather than 'openstack-origin'
113
+        :param no_origin list: List of services which do not support setting
114
+                               the Cloud Archive.
115
+        Service Dict:
116
+            {
117
+                'name': str charm-name,
118
+                'units': int number of units,
119
+                'constraints': dict of juju constraints,
120
+                'location': str location of charm,
121
+            }
122
+        eg
123
+        this_service = {
124
+            'name': 'openvswitch-odl',
125
+            'constraints': {'mem': '8G'},
126
+        }
127
+        other_services = [
128
+            {
129
+                'name': 'nova-compute',
130
+                'units': 2,
131
+                'constraints': {'mem': '4G'},
132
+                'location': cs:~bob/xenial/nova-compute
133
+            },
134
+            {
135
+                'name': 'mysql',
136
+                'constraints': {'mem': '2G'},
137
+            },
138
+            {'neutron-api-odl'}]
139
+        use_source = ['mysql']
140
+        no_origin = ['neutron-api-odl']
141
+        """
103 142
         self.log.info('OpenStackAmuletDeployment:  adding services')
104 143
 
105 144
         other_services = self._determine_branch_locations(other_services)
@@ -110,16 +149,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
110 149
         services = other_services
111 150
         services.append(this_service)
112 151
 
152
+        use_source = use_source or []
153
+        no_origin = no_origin or []
154
+
113 155
         # Charms which should use the source config option
114
-        use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
115
-                      'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
156
+        use_source = list(set(
157
+            use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
158
+                          'ceph-osd', 'ceph-radosgw', 'ceph-mon',
159
+                          'ceph-proxy']))
116 160
 
117 161
         # Charms which can not use openstack-origin, ie. many subordinates
118
-        no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
119
-                     'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
120
-                     'cinder-backup', 'nexentaedge-data',
121
-                     'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
122
-                     'cinder-nexentaedge', 'nexentaedge-mgmt']
162
+        no_origin = list(set(
163
+            no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
164
+                         'nrpe', 'openvswitch-odl', 'neutron-api-odl',
165
+                         'odl-controller', 'cinder-backup', 'nexentaedge-data',
166
+                         'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
167
+                         'cinder-nexentaedge', 'nexentaedge-mgmt']))
123 168
 
124 169
         if self.openstack:
125 170
             for svc in services:
@@ -220,7 +265,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
220 265
          self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
221 266
          self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
222 267
          self.wily_liberty, self.trusty_mitaka,
223
-         self.xenial_mitaka) = range(14)
268
+         self.xenial_mitaka, self.xenial_newton,
269
+         self.yakkety_newton) = range(16)
224 270
 
225 271
         releases = {
226 272
             ('precise', None): self.precise_essex,
@@ -236,7 +282,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
236 282
             ('utopic', None): self.utopic_juno,
237 283
             ('vivid', None): self.vivid_kilo,
238 284
             ('wily', None): self.wily_liberty,
239
-            ('xenial', None): self.xenial_mitaka}
285
+            ('xenial', None): self.xenial_mitaka,
286
+            ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
287
+            ('yakkety', None): self.yakkety_newton,
288
+        }
240 289
         return releases[(self.series, self.openstack)]
241 290
 
242 291
     def _get_openstack_release_string(self):
@@ -254,6 +303,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
254 303
             ('vivid', 'kilo'),
255 304
             ('wily', 'liberty'),
256 305
             ('xenial', 'mitaka'),
306
+            ('yakkety', 'newton'),
257 307
         ])
258 308
         if self.openstack:
259 309
             os_origin = self.openstack.split(':')[1]

+ 118
- 1
hooks/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils):
83 83
         if not found:
84 84
             return 'endpoint not found'
85 85
 
86
+    def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
87
+                                  public_port, expected):
88
+        """Validate keystone v3 endpoint data.
89
+
90
+        Validate the v3 endpoint data which has changed from v2.  The
91
+        ports are used to find the matching endpoint.
92
+
93
+        The new v3 endpoint data looks like:
94
+
95
+        [<Endpoint enabled=True,
96
+                   id=0432655fc2f74d1e9fa17bdaa6f6e60b,
97
+                   interface=admin,
98
+                   links={u'self': u'<RESTful URL of this endpoint>'},
99
+                   region=RegionOne,
100
+                   region_id=RegionOne,
101
+                   service_id=17f842a0dc084b928e476fafe67e4095,
102
+                   url=http://10.5.6.5:9312>,
103
+         <Endpoint enabled=True,
104
+                   id=6536cb6cb92f4f41bf22b079935c7707,
105
+                   interface=admin,
106
+                   links={u'self': u'<RESTful url of this endpoint>'},
107
+                   region=RegionOne,
108
+                   region_id=RegionOne,
109
+                   service_id=72fc8736fb41435e8b3584205bb2cfa3,
110
+                   url=http://10.5.6.6:35357/v3>,
111
+                   ... ]
112
+        """
113
+        self.log.debug('Validating v3 endpoint data...')
114
+        self.log.debug('actual: {}'.format(repr(endpoints)))
115
+        found = []
116
+        for ep in endpoints:
117
+            self.log.debug('endpoint: {}'.format(repr(ep)))
118
+            if ((admin_port in ep.url and ep.interface == 'admin') or
119
+                    (internal_port in ep.url and ep.interface == 'internal') or
120
+                    (public_port in ep.url and ep.interface == 'public')):
121
+                found.append(ep.interface)
122
+                # note we ignore the links member.
123
+                actual = {'id': ep.id,
124
+                          'region': ep.region,
125
+                          'region_id': ep.region_id,
126
+                          'interface': self.not_null,
127
+                          'url': ep.url,
128
+                          'service_id': ep.service_id, }
129
+                ret = self._validate_dict_data(expected, actual)
130
+                if ret:
131
+                    return 'unexpected endpoint data - {}'.format(ret)
132
+
133
+        if len(found) != 3:
134
+            return 'Unexpected number of endpoints found'
135
+
86 136
     def validate_svc_catalog_endpoint_data(self, expected, actual):
87 137
         """Validate service catalog endpoint data.
88 138
 
@@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils):
100 150
                 return "endpoint {} does not exist".format(k)
101 151
         return ret
102 152
 
153
+    def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
154
+        """Validate the keystone v3 catalog endpoint data.
155
+
156
+        Validate a list of dictinaries that make up the keystone v3 service
157
+        catalogue.
158
+
159
+        It is in the form of:
160
+
161
+
162
+        {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
163
+                        u'interface': u'admin',
164
+                        u'region': u'RegionOne',
165
+                        u'region_id': u'RegionOne',
166
+                        u'url': u'http://10.5.5.224:35357/v3'},
167
+                       {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
168
+                        u'interface': u'public',
169
+                        u'region': u'RegionOne',
170
+                        u'region_id': u'RegionOne',
171
+                        u'url': u'http://10.5.5.224:5000/v3'},
172
+                       {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
173
+                        u'interface': u'internal',
174
+                        u'region': u'RegionOne',
175
+                        u'region_id': u'RegionOne',
176
+                        u'url': u'http://10.5.5.224:5000/v3'}],
177
+         u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
178
+                           u'interface': u'public',
179
+                           u'region': u'RegionOne',
180
+                           u'region_id': u'RegionOne',
181
+                           u'url': u'http://10.5.5.223:9311'},
182
+                          {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
183
+                           u'interface': u'internal',
184
+                           u'region': u'RegionOne',
185
+                           u'region_id': u'RegionOne',
186
+                           u'url': u'http://10.5.5.223:9311'},
187
+                          {u'id': u'f629388955bc407f8b11d8b7ca168086',
188
+                           u'interface': u'admin',
189
+                           u'region': u'RegionOne',
190
+                           u'region_id': u'RegionOne',
191
+                           u'url': u'http://10.5.5.223:9312'}]}
192
+
193
+        Note, that an added complication is that the order of admin, public,
194
+        internal against 'interface' in each region.
195
+
196
+        Thus, the function sorts the expected and actual lists using the
197
+        interface key as a sort key, prior to the comparison.
198
+        """
199
+        self.log.debug('Validating v3 service catalog endpoint data...')
200
+        self.log.debug('actual: {}'.format(repr(actual)))
201
+        for k, v in six.iteritems(expected):
202
+            if k in actual:
203
+                l_expected = sorted(v, key=lambda x: x['interface'])
204
+                l_actual = sorted(actual[k], key=lambda x: x['interface'])
205
+                if len(l_actual) != len(l_expected):
206
+                    return ("endpoint {} has differing number of interfaces "
207
+                            " - expected({}), actual({})"
208
+                            .format(k, len(l_expected), len(l_actual)))
209
+                for i_expected, i_actual in zip(l_expected, l_actual):
210
+                    self.log.debug("checking interface {}"
211
+                                   .format(i_expected['interface']))
212
+                    ret = self._validate_dict_data(i_expected, i_actual)
213
+                    if ret:
214
+                        return self.endpoint_error(k, ret)
215
+            else:
216
+                return "endpoint {} does not exist".format(k)
217
+        return ret
218
+
103 219
     def validate_tenant_data(self, expected, actual):
104 220
         """Validate tenant data.
105 221
 
@@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils):
928 1044
                                                    retry_delay=5,
929 1045
                                                    socket_timeout=1)
930 1046
             connection = pika.BlockingConnection(parameters)
931
-            assert connection.server_properties['product'] == 'RabbitMQ'
1047
+            assert connection.is_open is True
1048
+            assert connection.is_closing is False
932 1049
             self.log.debug('Connect OK')
933 1050
             return connection
934 1051
         except Exception as e:

+ 4
- 2
hooks/charmhelpers/contrib/openstack/context.py View File

@@ -1421,9 +1421,9 @@ class InternalEndpointContext(OSContextGenerator):
1421 1421
 class AppArmorContext(OSContextGenerator):
1422 1422
     """Base class for apparmor contexts."""
1423 1423
 
1424
-    def __init__(self):
1424
+    def __init__(self, profile_name=None):
1425 1425
         self._ctxt = None
1426
-        self.aa_profile = None
1426
+        self.aa_profile = profile_name
1427 1427
         self.aa_utils_packages = ['apparmor-utils']
1428 1428
 
1429 1429
     @property
@@ -1442,6 +1442,8 @@ class AppArmorContext(OSContextGenerator):
1442 1442
         if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
1443 1443
             ctxt = {'aa_profile_mode': config('aa-profile-mode'),
1444 1444
                     'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
1445
+            if self.aa_profile:
1446
+                ctxt['aa_profile'] = self.aa_profile
1445 1447
         else:
1446 1448
             ctxt = None
1447 1449
         return ctxt

+ 8
- 1
hooks/charmhelpers/contrib/openstack/ip.py View File

@@ -30,6 +30,7 @@ from charmhelpers.contrib.hahelpers.cluster import is_clustered
30 30
 PUBLIC = 'public'
31 31
 INTERNAL = 'int'
32 32
 ADMIN = 'admin'
33
+ACCESS = 'access'
33 34
 
34 35
 ADDRESS_MAP = {
35 36
     PUBLIC: {
@@ -49,7 +50,13 @@ ADDRESS_MAP = {
49 50
         'config': 'os-admin-network',
50 51
         'fallback': 'private-address',
51 52
         'override': 'os-admin-hostname',
52
-    }
53
+    },
54
+    ACCESS: {
55
+        'binding': 'access',
56
+        'config': 'access-network',
57
+        'fallback': 'private-address',
58
+        'override': 'os-access-hostname',
59
+    },
53 60
 }
54 61
 
55 62
 

+ 6
- 0
hooks/charmhelpers/contrib/openstack/neutron.py View File

@@ -245,6 +245,12 @@ def neutron_plugins():
245 245
             'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
246 246
         plugins['plumgrid']['server_packages'].remove(
247 247
             'neutron-plugin-plumgrid')
248
+    if release >= 'mitaka':
249
+        plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
250
+        plugins['nsx']['server_packages'].append('python-vmware-nsx')
251
+        plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
252
+        plugins['vsp']['driver'] = (
253
+            'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
248 254
     return plugins
249 255
 
250 256
 

+ 20
- 3
hooks/charmhelpers/contrib/openstack/utils.py View File

@@ -51,7 +51,8 @@ from charmhelpers.core.hookenv import (
51 51
     relation_set,
52 52
     service_name,
53 53
     status_set,
54
-    hook_name
54
+    hook_name,
55
+    application_version_set,
55 56
 )
56 57
 
57 58
 from charmhelpers.contrib.storage.linux.lvm import (
@@ -80,7 +81,12 @@ from charmhelpers.core.host import (
80 81
     service_resume,
81 82
     restart_on_change_helper,
82 83
 )
83
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
84
+from charmhelpers.fetch import (
85
+    apt_install,
86
+    apt_cache,
87
+    install_remote,
88
+    get_upstream_version
89
+)
84 90
 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
85 91
 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
86 92
 from charmhelpers.contrib.openstack.exceptions import OSContextError
@@ -145,7 +151,7 @@ SWIFT_CODENAMES = OrderedDict([
145 151
     ('mitaka',
146 152
         ['2.5.0', '2.6.0', '2.7.0']),
147 153
     ('newton',
148
-        ['2.8.0', '2.9.0']),
154
+        ['2.8.0', '2.9.0', '2.10.0']),
149 155
 ])
150 156
 
151 157
 # >= Liberty version->codename mapping
@@ -1889,3 +1895,14 @@ def config_flags_parser(config_flags):
1889 1895
         flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
1890 1896
 
1891 1897
     return flags
1898
+
1899
+
1900
+def os_application_version_set(package):
1901
+    '''Set version of application for Juju 2.0 and later'''
1902
+    application_version = get_upstream_version(package)
1903
+    # NOTE(jamespage) if not able to figure out package version, fallback to
1904
+    #                 openstack codename version detection.
1905
+    if not application_version:
1906
+        application_version_set(os_release(package))
1907
+    else:
1908
+        application_version_set(application_version)

+ 6
- 0
hooks/charmhelpers/contrib/storage/linux/ceph.py View File

@@ -87,6 +87,7 @@ clog to syslog = {use_syslog}
87 87
 DEFAULT_PGS_PER_OSD_TARGET = 100
88 88
 DEFAULT_POOL_WEIGHT = 10.0
89 89
 LEGACY_PG_COUNT = 200
90
+DEFAULT_MINIMUM_PGS = 2
90 91
 
91 92
 
92 93
 def validator(value, valid_type, valid_range=None):
@@ -266,6 +267,11 @@ class Pool(object):
266 267
         target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
267 268
         num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
268 269
 
270
+        # NOTE: ensure a sane minimum number of PGS otherwise we don't get any
271
+        #       reasonable data distribution in minimal OSD configurations
272
+        if num_pg < DEFAULT_MINIMUM_PGS:
273
+            num_pg = DEFAULT_MINIMUM_PGS
274
+
269 275
         # The CRUSH algorithm has a slight optimization for placement groups
270 276
         # with powers of 2 so find the nearest power of 2. If the nearest
271 277
         # power of 2 is more than 25% below the original value, the next

+ 14
- 0
hooks/charmhelpers/core/hookenv.py View File

@@ -843,6 +843,20 @@ def translate_exc(from_exc, to_exc):
843 843
     return inner_translate_exc1
844 844
 
845 845
 
846
+def application_version_set(version):
847
+    """Charm authors may trigger this command from any hook to output what
848
+    version of the application is running. This could be a package version,
849
+    for instance postgres version 9.5. It could also be a build number or
850
+    version control revision identifier, for instance git sha 6fb7ba68. """
851
+
852
+    cmd = ['application-version-set']
853
+    cmd.append(version)
854
+    try:
855
+        subprocess.check_call(cmd)
856
+    except OSError:
857
+        log("Application Version: {}".format(version))
858
+
859
+
846 860
 @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
847 861
 def is_leader():
848 862
     """Does the current unit hold the juju leadership

+ 28
- 59
hooks/charmhelpers/core/host.py View File

@@ -30,13 +30,29 @@ import subprocess
30 30
 import hashlib
31 31
 import functools
32 32
 import itertools
33
-from contextlib import contextmanager
34
-from collections import OrderedDict
35
-
36 33
 import six
37 34
 
35
+from contextlib import contextmanager
36
+from collections import OrderedDict
38 37
 from .hookenv import log
39 38
 from .fstab import Fstab
39
+from charmhelpers.osplatform import get_platform
40
+
41
+__platform__ = get_platform()
42
+if __platform__ == "ubuntu":
43
+    from charmhelpers.core.host_factory.ubuntu import (
44
+        service_available,
45
+        add_new_group,
46
+        lsb_release,
47
+        cmp_pkgrevno,
48
+    )  # flake8: noqa -- ignore F401 for this import
49
+elif __platform__ == "centos":
50
+    from charmhelpers.core.host_factory.centos import (
51
+        service_available,
52
+        add_new_group,
53
+        lsb_release,
54
+        cmp_pkgrevno,
55
+    )  # flake8: noqa -- ignore F401 for this import
40 56
 
41 57
 
42 58
 def service_start(service_name):
@@ -144,8 +160,11 @@ def service_running(service_name):
144 160
                 return False
145 161
             else:
146 162
                 # This works for upstart scripts where the 'service' command
147
-                # returns a consistent string to represent running 'start/running'
148
-                if "start/running" in output:
163
+                # returns a consistent string to represent running
164
+                # 'start/running'
165
+                if ("start/running" in output or
166
+                        "is running" in output or
167
+                        "up and running" in output):
149 168
                     return True
150 169
         elif os.path.exists(_INIT_D_CONF.format(service_name)):
151 170
             # Check System V scripts init script return codes
@@ -153,18 +172,6 @@ def service_running(service_name):
153 172
         return False
154 173
 
155 174
 
156
-def service_available(service_name):
157
-    """Determine whether a system service is available"""
158
-    try:
159
-        subprocess.check_output(
160
-            ['service', service_name, 'status'],
161
-            stderr=subprocess.STDOUT).decode('UTF-8')
162
-    except subprocess.CalledProcessError as e:
163
-        return b'unrecognized service' not in e.output
164
-    else:
165
-        return True
166
-
167
-
168 175
 SYSTEMD_SYSTEM = '/run/systemd/system'
169 176
 
170 177
 
@@ -173,8 +180,9 @@ def init_is_systemd():
173 180
     return os.path.isdir(SYSTEMD_SYSTEM)
174 181
 
175 182
 
176
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
177
-            primary_group=None, secondary_groups=None, uid=None, home_dir=None):
183
+def adduser(username, password=None, shell='/bin/bash',
184
+            system_user=False, primary_group=None,
185
+            secondary_groups=None, uid=None, home_dir=None):
178 186
     """Add a user to the system.
179 187
 
180 188
     Will log but otherwise succeed if the user already exists.
@@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None):
286 294
             log('group with gid {0} already exists!'.format(gid))
287 295
     except KeyError:
288 296
         log('creating group {0}'.format(group_name))
289
-        cmd = ['addgroup']
290
-        if gid:
291
-            cmd.extend(['--gid', str(gid)])
292
-        if system_group:
293
-            cmd.append('--system')
294
-        else:
295
-            cmd.extend([
296
-                '--group',
297
-            ])
298
-        cmd.append(group_name)
299
-        subprocess.check_call(cmd)
297
+        add_new_group(group_name, system_group, gid)
300 298
         group_info = grp.getgrnam(group_name)
301 299
     return group_info
302 300
 
@@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
541 539
     return r
542 540
 
543 541
 
544
-def lsb_release():
545
-    """Return /etc/lsb-release in a dict"""
546
-    d = {}
547
-    with open('/etc/lsb-release', 'r') as lsb:
548
-        for l in lsb:
549
-            k, v = l.split('=')
550
-            d[k.strip()] = v.strip()
551
-    return d
552
-
553
-
554 542
 def pwgen(length=None):
555 543
     """Generate a random pasword."""
556 544
     if length is None:
@@ -674,25 +662,6 @@ def get_nic_hwaddr(nic):
674 662
     return hwaddr
675 663
 
676 664
 
677
-def cmp_pkgrevno(package, revno, pkgcache=None):
678
-    """Compare supplied revno with the revno of the installed package
679
-
680
-    *  1 => Installed revno is greater than supplied arg
681
-    *  0 => Installed revno is the same as supplied arg
682
-    * -1 => Installed revno is less than supplied arg
683
-
684
-    This function imports apt_cache function from charmhelpers.fetch if
685
-    the pkgcache argument is None. Be sure to add charmhelpers.fetch if
686
-    you call this function, or pass an apt_pkg.Cache() instance.
687
-    """
688
-    import apt_pkg
689
-    if not pkgcache:
690
-        from charmhelpers.fetch import apt_cache
691
-        pkgcache = apt_cache()
692
-    pkg = pkgcache[package]
693
-    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
694
-
695
-
696 665
 @contextmanager
697 666
 def chdir(directory):
698 667
     """Change the current working directory to a different directory for a code

+ 0
- 0
hooks/charmhelpers/core/host_factory/__init__.py View File


+ 56
- 0
hooks/charmhelpers/core/host_factory/centos.py View File

@@ -0,0 +1,56 @@
1
+import subprocess
2
+import yum
3
+import os
4
+
5
+
6
+def service_available(service_name):
7
+    # """Determine whether a system service is available."""
8
+    if os.path.isdir('/run/systemd/system'):
9
+        cmd = ['systemctl', 'is-enabled', service_name]
10
+    else:
11
+        cmd = ['service', service_name, 'is-enabled']
12
+    return subprocess.call(cmd) == 0
13
+
14
+
15
+def add_new_group(group_name, system_group=False, gid=None):
16
+    cmd = ['groupadd']
17
+    if gid:
18
+        cmd.extend(['--gid', str(gid)])
19
+    if system_group:
20
+        cmd.append('-r')
21
+    cmd.append(group_name)
22
+    subprocess.check_call(cmd)
23
+
24
+
25
+def lsb_release():
26
+    """Return /etc/os-release in a dict."""
27
+    d = {}
28
+    with open('/etc/os-release', 'r') as lsb:
29
+        for l in lsb:
30
+            s = l.split('=')
31
+            if len(s) != 2:
32
+                continue
33
+            d[s[0].strip()] = s[1].strip()
34
+    return d
35
+
36
+
37
+def cmp_pkgrevno(package, revno, pkgcache=None):
38
+    """Compare supplied revno with the revno of the installed package.
39
+
40
+    *  1 => Installed revno is greater than supplied arg
41
+    *  0 => Installed revno is the same as supplied arg
42
+    * -1 => Installed revno is less than supplied arg
43
+
44
+    This function imports YumBase function if the pkgcache argument
45
+    is None.
46
+    """
47
+    if not pkgcache:
48
+        y = yum.YumBase()
49
+        packages = y.doPackageLists()
50
+        pkgcache = {i.Name: i.version for i in packages['installed']}
51
+    pkg = pkgcache[package]
52
+    if pkg > revno:
53
+        return 1
54
+    if pkg < revno:
55
+        return -1
56
+    return 0

+ 56
- 0
hooks/charmhelpers/core/host_factory/ubuntu.py View File

@@ -0,0 +1,56 @@
1
+import subprocess
2
+
3
+
4
+def service_available(service_name):
5
+    """Determine whether a system service is available"""
6
+    try:
7
+        subprocess.check_output(
8
+            ['service', service_name, 'status'],
9
+            stderr=subprocess.STDOUT).decode('UTF-8')
10
+    except subprocess.CalledProcessError as e:
11
+        return b'unrecognized service' not in e.output
12
+    else:
13
+        return True
14
+
15
+
16
+def add_new_group(group_name, system_group=False, gid=None):
17
+    cmd = ['addgroup']
18
+    if gid:
19
+        cmd.extend(['--gid', str(gid)])
20
+    if system_group:
21
+        cmd.append('--system')
22
+    else:
23
+        cmd.extend([
24
+            '--group',
25
+        ])
26
+    cmd.append(group_name)
27
+    subprocess.check_call(cmd)
28
+
29
+
30
+def lsb_release():
31
+    """Return /etc/lsb-release in a dict"""
32
+    d = {}
33
+    with open('/etc/lsb-release', 'r') as lsb:
34
+        for l in lsb:
35
+            k, v = l.split('=')
36
+            d[k.strip()] = v.strip()
37
+    return d
38
+
39
+
40
+def cmp_pkgrevno(package, revno, pkgcache=None):
41
+    """Compare supplied revno with the revno of the installed package.
42
+
43
+    *  1 => Installed revno is greater than supplied arg
44
+    *  0 => Installed revno is the same as supplied arg
45
+    * -1 => Installed revno is less than supplied arg
46
+
47
+    This function imports apt_cache function from charmhelpers.fetch if
48
+    the pkgcache argument is None. Be sure to add charmhelpers.fetch if
49
+    you call this function, or pass an apt_pkg.Cache() instance.
50
+    """
51
+    import apt_pkg
52
+    if not pkgcache:
53
+        from charmhelpers.fetch import apt_cache
54
+        pkgcache = apt_cache()
55
+    pkg = pkgcache[package]
56
+    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

+ 21
- 15
hooks/charmhelpers/core/kernel.py View File

@@ -15,15 +15,28 @@
15 15
 # See the License for the specific language governing permissions and
16 16
 # limitations under the License.
17 17
 
18
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
18
+import re
19
+import subprocess
19 20
 
21
+from charmhelpers.osplatform import get_platform
20 22
 from charmhelpers.core.hookenv import (
21 23
     log,
22 24
     INFO
23 25
 )
24 26
 
25
-from subprocess import check_call, check_output
26
-import re
27
+__platform__ = get_platform()
28
+if __platform__ == "ubuntu":
29
+    from charmhelpers.core.kernel_factory.ubuntu import (
30
+        persistent_modprobe,
31
+        update_initramfs,
32
+    )  # flake8: noqa -- ignore F401 for this import
33
+elif __platform__ == "centos":
34
+    from charmhelpers.core.kernel_factory.centos import (
35
+        persistent_modprobe,
36
+        update_initramfs,
37
+    )  # flake8: noqa -- ignore F401 for this import
38
+
39
+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
27 40
 
28 41
 
29 42
 def modprobe(module, persist=True):
@@ -32,11 +45,9 @@ def modprobe(module, persist=True):
32 45
 
33 46
     log('Loading kernel module %s' % module, level=INFO)
34 47
 
35
-    check_call(cmd)
48
+    subprocess.check_call(cmd)
36 49
     if persist:
37
-        with open('/etc/modules', 'r+') as modules:
38
-            if module not in modules.read():
39
-                modules.write(module)
50
+        persistent_modprobe(module)
40 51
 
41 52
 
42 53
 def rmmod(module, force=False):
@@ -46,21 +57,16 @@ def rmmod(module, force=False):
46 57
         cmd.append('-f')
47 58
     cmd.append(module)
48 59
     log('Removing kernel module %s' % module, level=INFO)
49
-    return check_call(cmd)
60
+    return subprocess.check_call(cmd)
50 61
 
51 62
 
52 63
 def lsmod():
53 64
     """Shows what kernel modules are currently loaded"""
54
-    return check_output(['lsmod'],
55
-                        universal_newlines=True)
65
+    return subprocess.check_output(['lsmod'],
66
+                                   universal_newlines=True)
56 67
 
57 68
 
58 69
 def is_module_loaded(module):
59 70
     """Checks if a kernel module is already loaded"""
60 71
     matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
61 72
     return len(matches) > 0
62
-
63
-
64
-def update_initramfs(version='all'):
65
-    """Updates an initramfs image"""
66
-    return check_call(["update-initramfs", "-k", version, "-u"])

+ 0
- 0
hooks/charmhelpers/core/kernel_factory/__init__.py View File


+ 17
- 0
hooks/charmhelpers/core/kernel_factory/centos.py View File

@@ -0,0 +1,17 @@
1
+import subprocess
2
+import os
3
+
4
+
5
+def persistent_modprobe(module):
6
+    """Load a kernel module and configure for auto-load on reboot."""
7
+    if not os.path.exists('/etc/rc.modules'):
8
+        open('/etc/rc.modules', 'a')
9
+        os.chmod('/etc/rc.modules', 111)
10
+    with open('/etc/rc.modules', 'r+') as modules:
11
+        if module not in modules.read():
12
+            modules.write('modprobe %s\n' % module)
13
+
14
+
15
+def update_initramfs(version='all'):
16
+    """Updates an initramfs image."""
17
+    return subprocess.check_call(["dracut", "-f", version])

+ 13
- 0
hooks/charmhelpers/core/kernel_factory/ubuntu.py View File

@@ -0,0 +1,13 @@
1
+import subprocess
2
+
3
+
4
+def persistent_modprobe(module):
5
+    """Load a kernel module and configure for auto-load on reboot."""
6
+    with open('/etc/modules', 'r+') as modules:
7
+        if module not in modules.read():
8
+            modules.write(module)
9
+
10
+
11
+def update_initramfs(version='all'):
12
+    """Updates an initramfs image."""
13
+    return subprocess.check_call(["update-initramfs", "-k", version, "-u"])

+ 27
- 297
hooks/charmhelpers/fetch/__init__.py View File

@@ -13,18 +13,12 @@
13 13
 # limitations under the License.
14 14
 
15 15
 import importlib
16
-from tempfile import NamedTemporaryFile
17
-import time
16
+from charmhelpers.osplatform import get_platform
18 17
 from yaml import safe_load
19
-from charmhelpers.core.host import (
20
-    lsb_release
21
-)
22
-import subprocess
23 18
 from charmhelpers.core.hookenv import (
24 19
     config,
25 20
     log,
26 21
 )
27
-import os
28 22
 
29 23
 import six
30 24
 if six.PY3:
@@ -33,87 +27,6 @@ else:
33 27
     from urlparse import urlparse, urlunparse
34 28
 
35 29
 
36
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
37
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
38
-"""
39
-PROPOSED_POCKET = """# Proposed
40
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
41
-"""
42
-CLOUD_ARCHIVE_POCKETS = {
43
-    # Folsom
44
-    'folsom': 'precise-updates/folsom',
45
-    'precise-folsom': 'precise-updates/folsom',
46
-    'precise-folsom/updates': 'precise-updates/folsom',
47
-    'precise-updates/folsom': 'precise-updates/folsom',
48
-    'folsom/proposed': 'precise-proposed/folsom',
49
-    'precise-folsom/proposed': 'precise-proposed/folsom',
50
-    'precise-proposed/folsom': 'precise-proposed/folsom',
51
-    # Grizzly
52
-    'grizzly': 'precise-updates/grizzly',
53
-    'precise-grizzly': 'precise-updates/grizzly',
54
-    'precise-grizzly/updates': 'precise-updates/grizzly',
55
-    'precise-updates/grizzly': 'precise-updates/grizzly',
56
-    'grizzly/proposed': 'precise-proposed/grizzly',
57
-    'precise-grizzly/proposed': 'precise-proposed/grizzly',
58
-    'precise-proposed/grizzly': 'precise-proposed/grizzly',
59
-    # Havana
60
-    'havana': 'precise-updates/havana',
61
-    'precise-havana': 'precise-updates/havana',
62
-    'precise-havana/updates': 'precise-updates/havana',
63
-    'precise-updates/havana': 'precise-updates/havana',
64
-    'havana/proposed': 'precise-proposed/havana',
65
-    'precise-havana/proposed': 'precise-proposed/havana',
66
-    'precise-proposed/havana': 'precise-proposed/havana',
67
-    # Icehouse
68
-    'icehouse': 'precise-updates/icehouse',
69
-    'precise-icehouse': 'precise-updates/icehouse',
70
-    'precise-icehouse/updates': 'precise-updates/icehouse',
71
-    'precise-updates/icehouse': 'precise-updates/icehouse',
72
-    'icehouse/proposed': 'precise-proposed/icehouse',
73
-    'precise-icehouse/proposed': 'precise-proposed/icehouse',
74
-    'precise-proposed/icehouse': 'precise-proposed/icehouse',
75
-    # Juno
76
-    'juno': 'trusty-updates/juno',
77
-    'trusty-juno': 'trusty-updates/juno',
78
-    'trusty-juno/updates': 'trusty-updates/juno',
79
-    'trusty-updates/juno': 'trusty-updates/juno',
80
-    'juno/proposed': 'trusty-proposed/juno',
81
-    'trusty-juno/proposed': 'trusty-proposed/juno',
82
-    'trusty-proposed/juno': 'trusty-proposed/juno',
83
-    # Kilo
84
-    'kilo': 'trusty-updates/kilo',
85
-    'trusty-kilo': 'trusty-updates/kilo',
86
-    'trusty-kilo/updates': 'trusty-updates/kilo',
87
-    'trusty-updates/kilo': 'trusty-updates/kilo',
88
-    'kilo/proposed': 'trusty-proposed/kilo',
89
-    'trusty-kilo/proposed': 'trusty-proposed/kilo',
90
-    'trusty-proposed/kilo': 'trusty-proposed/kilo',
91
-    # Liberty
92
-    'liberty': 'trusty-updates/liberty',
93
-    'trusty-liberty': 'trusty-updates/liberty',
94
-    'trusty-liberty/updates': 'trusty-updates/liberty',
95
-    'trusty-updates/liberty': 'trusty-updates/liberty',
96
-    'liberty/proposed': 'trusty-proposed/liberty',
97
-    'trusty-liberty/proposed': 'trusty-proposed/liberty',
98
-    'trusty-proposed/liberty': 'trusty-proposed/liberty',
99
-    # Mitaka
100
-    'mitaka': 'trusty-updates/mitaka',
101
-    'trusty-mitaka': 'trusty-updates/mitaka',
102
-    'trusty-mitaka/updates': 'trusty-updates/mitaka',
103
-    'trusty-updates/mitaka': 'trusty-updates/mitaka',
104
-    'mitaka/proposed': 'trusty-proposed/mitaka',
105
-    'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
106
-    'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
107
-    # Newton
108
-    'newton': 'xenial-updates/newton',
109
-    'xenial-newton': 'xenial-updates/newton',
110
-    'xenial-newton/updates': 'xenial-updates/newton',
111
-    'xenial-updates/newton': 'xenial-updates/newton',
112
-    'newton/proposed': 'xenial-proposed/newton',
113
-    'xenial-newton/proposed': 'xenial-proposed/newton',
114
-    'xenial-proposed/newton': 'xenial-proposed/newton',
115
-}
116
-
117 30
 # The order of this list is very important. Handlers should be listed in from
118 31
 # least- to most-specific URL matching.
119 32
 FETCH_HANDLERS = (
@@ -122,10 +35,6 @@ FETCH_HANDLERS = (
122 35
     'charmhelpers.fetch.giturl.GitUrlFetchHandler',
123 36
 )
124 37
 
125
-APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
126
-APT_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
127
-APT_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
128
-
129 38
 
130 39
 class SourceConfigError(Exception):
131 40
     pass
@@ -163,180 +72,38 @@ class BaseFetchHandler(object):
163 72
         return urlunparse(parts)
164 73
 
165 74
 
166
-def filter_installed_packages(packages):
167
-    """Returns a list of packages that require installation"""
168
-    cache = apt_cache()
169
-    _pkgs = []
170
-    for package in packages:
171
-        try:
172
-            p = cache[package]
173
-            p.current_ver or _pkgs.append(package)
174
-        except KeyError:
175
-            log('Package {} has no installation candidate.'.format(package),
176
-                level='WARNING')
177
-            _pkgs.append(package)
178
-    return _pkgs
179
-
180
-
181
-def apt_cache(in_memory=True, progress=None):
182
-    """Build and return an apt cache"""
183
-    from apt import apt_pkg
184
-    apt_pkg.init()
185
-    if in_memory:
186
-        apt_pkg.config.set("Dir::Cache::pkgcache", "")
187
-        apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
188
-    return apt_pkg.Cache(progress)
189
-
190
-
191
-def apt_install(packages, options=None, fatal=False):
192
-    """Install one or more packages"""
193
-    if options is None:
194
-        options = ['--option=Dpkg::Options::=--force-confold']
195
-
196
-    cmd = ['apt-get', '--assume-yes']
197
-    cmd.extend(options)
198
-    cmd.append('install')
199
-    if isinstance(packages, six.string_types):
200
-        cmd.append(packages)
201
-    else:
202
-        cmd.extend(packages)
203
-    log("Installing {} with options: {}".format(packages,
204
-                                                options))
205
-    _run_apt_command(cmd, fatal)
206
-
207
-
208
-def apt_upgrade(options=None, fatal=False, dist=False):
209
-    """Upgrade all packages"""
210
-    if options is None:
211
-        options = ['--option=Dpkg::Options::=--force-confold']
212
-
213
-    cmd = ['apt-get', '--assume-yes']
214
-    cmd.extend(options)
215
-    if dist:
216
-        cmd.append('dist-upgrade')
217
-    else:
218
-        cmd.append('upgrade')
219
-    log("Upgrading with options: {}".format(options))
220
-    _run_apt_command(cmd, fatal)
221
-
222
-
223
-def apt_update(fatal=False):
224
-    """Update local apt cache"""
225
-    cmd = ['apt-get', 'update']
226
-    _run_apt_command(cmd, fatal)
227
-
228
-
229
-def apt_purge(packages, fatal=False):
230
-    """Purge one or more packages"""
231
-    cmd = ['apt-get', '--assume-yes', 'purge']
232
-    if isinstance(packages, six.string_types):
233
-        cmd.append(packages)
234
-    else:
235
-        cmd.extend(packages)
236
-    log("Purging {}".format(packages))
237
-    _run_apt_command(cmd, fatal)
238
-
239
-
240
-def apt_mark(packages, mark, fatal=False):
241
-    """Flag one or more packages using apt-mark"""
242
-    log("Marking {} as {}".format(packages, mark))
243
-    cmd = ['apt-mark', mark]
244
-    if isinstance(packages, six.string_types):
245
-        cmd.append(packages)
246
-    else:
247
-        cmd.extend(packages)
248
-
249
-    if fatal:
250
-        subprocess.check_call(cmd, universal_newlines=True)
251
-    else:
252
-        subprocess.call(cmd, universal_newlines=True)
253
-
254
-
255
-def apt_hold(packages, fatal=False):
256
-    return apt_mark(packages, 'hold', fatal=fatal)
257
-
258
-
259
-def apt_unhold(packages, fatal=False):
260
-    return apt_mark(packages, 'unhold', fatal=fatal)
261
-
75
+__platform__ = get_platform()
76
+module = "charmhelpers.fetch.%s" % __platform__
77
+fetch = importlib.import_module(module)
262 78
 
263
-def add_source(source, key=None):
264
-    """Add a package source to this system.
79
+filter_installed_packages = fetch.filter_installed_packages
80
+install = fetch.install
81
+upgrade = fetch.upgrade
82
+update = fetch.update
83
+purge = fetch.purge
84
+add_source = fetch.add_source
265 85
 
266
-    @param source: a URL or sources.list entry, as supported by
267
-    add-apt-repository(1). Examples::
268
-
269
-        ppa:charmers/example
270
-        deb https://stub:key@private.example.com/ubuntu trusty main
271
-
272
-    In addition:
273
-        'proposed:' may be used to enable the standard 'proposed'
274
-        pocket for the release.
275
-        'cloud:' may be used to activate official cloud archive pockets,
276
-        such as 'cloud:icehouse'
277
-        'distro' may be used as a noop
278
-
279
-    @param key: A key to be added to the system's APT keyring and used
280
-    to verify the signatures on packages. Ideally, this should be an
281
-    ASCII format GPG public key including the block headers. A GPG key
282
-    id may also be used, but be aware that only insecure protocols are
283
-    available to retrieve the actual public key from a public keyserver
284
-    placing your Juju environment at risk. ppa and cloud archive keys
285
-    are securely added automtically, so sould not be provided.
286
-    """
287
-    if source is None:
288
-        log('Source is not present. Skipping')
289
-        return
290
-
291
-    if (source.startswith('ppa:') or
292
-        source.startswith('http') or
293
-        source.startswith('deb ') or
294
-            source.startswith('cloud-archive:')):
295
-        subprocess.check_call(['add-apt-repository', '--yes', source])
296
-    elif source.startswith('cloud:'):
297
-        apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
298
-                    fatal=True)
299
-        pocket = source.split(':')[-1]
300
-        if pocket not in CLOUD_ARCHIVE_POCKETS:
301
-            raise SourceConfigError(
302
-                'Unsupported cloud: source option %s' %
303
-                pocket)
304
-        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
305
-        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
306
-            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
307
-    elif source == 'proposed':
308
-        release = lsb_release()['DISTRIB_CODENAME']
309
-        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
310
-            apt.write(PROPOSED_POCKET.format(release))
311
-    elif source == 'distro':
312
-        pass
313
-    else:
314
-        log("Unknown source: {!r}".format(source))
315
-
316
-    if key:
317
-        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
318
-            with NamedTemporaryFile('w+') as key_file:
319
-                key_file.write(key)
320
-                key_file.flush()
321
-                key_file.seek(0)
322
-                subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
323
-        else:
324
-            # Note that hkp: is in no way a secure protocol. Using a
325
-            # GPG key id is pointless from a security POV unless you
326
-            # absolutely trust your network and DNS.
327
-            subprocess.check_call(['apt-key', 'adv', '--keyserver',
328
-                                   'hkp://keyserver.ubuntu.com:80', '--recv',
329
-                                   key])
86
+if __platform__ == "ubuntu":
87
+    apt_cache = fetch.apt_cache
88
+    apt_install = fetch.install
89
+    apt_update = fetch.update
90
+    apt_upgrade = fetch.upgrade
91
+    apt_purge = fetch.purge
92
+    apt_mark = fetch.apt_mark
93
+    apt_hold = fetch.apt_hold
94
+    apt_unhold = fetch.apt_unhold
95
+    get_upstream_version = fetch.get_upstream_version
96
+elif __platform__ == "centos":
97
+    yum_search = fetch.yum_search
330 98
 
331 99
 
332 100
 def configure_sources(update=False,
333 101
                       sources_var='install_sources',
334 102
                       keys_var='install_keys'):
335
-    """
336
-    Configure multiple sources from charm configuration.
103
+    """Configure multiple sources from charm configuration.
337 104
 
338 105
     The lists are encoded as yaml fragments in the configuration.
339
-    The frament needs to be included as a string. Sources and their
106
+    The fragment needs to be included as a string. Sources and their
340 107
     corresponding keys are of the types supported by add_source().
341 108
 
342 109
     Example config:
@@ -368,12 +135,11 @@ def configure_sources(update=False,
368 135
         for source, key in zip(sources, keys):
369 136
             add_source(source, key)
370 137
     if update:
371
-        apt_update(fatal=True)
138
+        fetch.update(fatal=True)
372 139
 
373 140
 
374 141
 def install_remote(source, *args, **kwargs):
375
-    """
376
-    Install a file tree from a remote source
142
+    """Install a file tree from a remote source.
377 143
 
378 144
     The specified source should be a url of the form:
379 145
         scheme://[host]/path[#[option=value][&...]]
@@ -406,6 +172,7 @@ def install_remote(source, *args, **kwargs):
406 172
 
407 173
 
408 174
 def install_from_config(config_var_name):
175
+    """Install a file from config."""
409 176
     charm_config = config()
410 177
     source = charm_config[config_var_name]
411 178
     return install_remote(source)
@@ -428,40 +195,3 @@ def plugins(fetch_handlers=None):
428 195
             log("FetchHandler {} not found, skipping plugin".format(
429 196
                 handler_name))
430 197
     return plugin_list
431
-
432
-
433
-def _run_apt_command(cmd, fatal=False):
434
-    """
435
-    Run an APT command, checking output and retrying if the fatal flag is set
436
-    to True.
437
-
438
-    :param: cmd: str: The apt command to run.
439
-    :param: fatal: bool: Whether the command's output should be checked and
440
-        retried.
441
-    """
442
-    env = os.environ.copy()
443
-
444
-    if 'DEBIAN_FRONTEND' not in env:
445
-        env['DEBIAN_FRONTEND'] = 'noninteractive'
446
-
447
-    if fatal:
448
-        retry_count = 0
449
-        result = None
450
-
451
-        # If the command is considered "fatal", we need to retry if the apt
452
-        # lock was not acquired.
453
-
454
-        while result is None or result == APT_NO_LOCK:
455
-            try:
456
-                result = subprocess.check_call(cmd, env=env)
457
-            except subprocess.CalledProcessError as e:
458
-                retry_count = retry_count + 1
459
-                if retry_count > APT_NO_LOCK_RETRY_COUNT:
460
-                    raise
461
-                result = e.returncode
462
-                log("Couldn't acquire DPKG lock. Will retry in {} seconds."
463
-                    "".format(APT_NO_LOCK_RETRY_DELAY))
464
-                time.sleep(APT_NO_LOCK_RETRY_DELAY)
465
-
466
-    else:
467
-        subprocess.call(cmd, env=env)

+ 4
- 3
hooks/charmhelpers/fetch/bzrurl.py View File

@@ -18,19 +18,20 @@ from charmhelpers.fetch import (
18 18
     BaseFetchHandler,
19 19
     UnhandledSource,
20 20
     filter_installed_packages,
21
-    apt_install,
21
+    install,
22 22
 )
23 23
 from charmhelpers.core.host import mkdir
24 24
 
25 25
 
26 26
 if filter_installed_packages(['bzr']) != []:
27
-    apt_install(['bzr'])
27
+    install(['bzr'])
28 28
     if filter_installed_packages(['bzr']) != []:
29 29
         raise NotImplementedError('Unable to install bzr')
30 30
 
31 31
 
32 32
 class BzrUrlFetchHandler(BaseFetchHandler):
33
-    """Handler for bazaar branches via generic and lp URLs"""
33
+    """Handler for bazaar branches via generic and lp URLs."""
34
+
34 35
     def can_handle(self, source):
35 36
         url_parts = self.parse_url(source)
36 37
         if url_parts.scheme not in ('bzr+ssh', 'lp', ''):

+ 171
- 0
hooks/charmhelpers/fetch/centos.py View File

@@ -0,0 +1,171 @@
1
+# Copyright 2014-2015 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+import subprocess
16
+import os
17
+import time
18
+import six
19
+import yum
20
+
21
+from tempfile import NamedTemporaryFile
22
+from charmhelpers.core.hookenv import log
23
+
24
+YUM_NO_LOCK = 1  # The return code for "couldn't acquire lock" in YUM.
25
+YUM_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
26
+YUM_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
27
+
28
+
29
+def filter_installed_packages(packages):
30
+    """Return a list of packages that require installation."""
31
+    yb = yum.YumBase()
32
+    package_list = yb.doPackageLists()
33
+    temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
34
+
35
+    _pkgs = [p for p in packages if not temp_cache.get(p, False)]
36
+    return _pkgs
37
+
38
+
39
+def install(packages, options=None, fatal=False):
40
+    """Install one or more packages."""
41
+    cmd = ['yum', '--assumeyes']
42
+    if options is not None:
43
+        cmd.extend(options)
44
+    cmd.append('install')
45
+    if isinstance(packages, six.string_types):
46
+        cmd.append(packages)
47
+    else:
48
+        cmd.extend(packages)
49
+    log("Installing {} with options: {}".format(packages,
50
+                                                options))
51
+    _run_yum_command(cmd, fatal)
52
+
53
+
54
+def upgrade(options=None, fatal=False, dist=False):
55
+    """Upgrade all packages."""
56
+    cmd = ['yum', '--assumeyes']
57
+    if options is not None:
58
+        cmd.extend(options)
59
+    cmd.append('upgrade')
60
+    log("Upgrading with options: {}".format(options))
61
+    _run_yum_command(cmd, fatal)
62
+
63
+
64
+def update(fatal=False):
65
+    """Update local yum cache."""
66
+    cmd = ['yum', '--assumeyes', 'update']
67
+    log("Update with fatal: {}".format(fatal))
68
+    _run_yum_command(cmd, fatal)
69
+
70
+
71
+def purge(packages, fatal=False):
72
+    """Purge one or more packages."""
73
+    cmd = ['yum', '--assumeyes', 'remove']
74
+    if isinstance(packages, six.string_types):
75
+        cmd.append(packages)
76
+    else:
77
+        cmd.extend(packages)
78
+    log("Purging {}".format(packages))
79
+    _run_yum_command(cmd, fatal)
80
+
81
+
82
+def yum_search(packages):
83
+    """Search for a package."""
84
+    output = {}
85
+    cmd = ['yum', 'search']
86
+    if isinstance(packages, six.string_types):
87
+        cmd.append(packages)
88
+    else:
89
+        cmd.extend(packages)
90
+    log("Searching for {}".format(packages))
91
+    result = subprocess.check_output(cmd)
92
+    for package in list(packages):
93
+        output[package] = package in result
94
+    return output
95
+
96
+
97
+def add_source(source, key=None):
98
+    """Add a package source to this system.
99
+
100
+    @param source: a URL with a rpm package
101
+
102
+    @param key: A key to be added to the system's keyring and used
103
+    to verify the signatures on packages. Ideally, this should be an
104
+    ASCII format GPG public key including the block headers. A GPG key
105
+    id may also be used, but be aware that only insecure protocols are
106
+    available to retrieve the actual public key from a public keyserver
107
+    placing your Juju environment at risk.
108
+    """
109
+    if source is None:
110
+        log('Source is not present. Skipping')
111
+        return
112
+
113
+    if source.startswith('http'):
114
+        directory = '/etc/yum.repos.d/'
115
+        for filename in os.listdir(directory):
116
+            with open(directory + filename, 'r') as rpm_file:
117
+                if source in rpm_file.read():
118
+                    break
119
+        else:
120
+            log("Add source: {!r}".format(source))
121
+            # write in the charms.repo
122
+            with open(directory + 'Charms.repo', 'a') as rpm_file:
123
+                rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
124
+                rpm_file.write('name=%s\n' % source[7:])
125
+                rpm_file.write('baseurl=%s\n\n' % source)
126
+    else:
127
+        log("Unknown source: {!r}".format(source))
128
+
129
+    if key:
130
+        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
131
+            with NamedTemporaryFile('w+') as key_file:
132
+                key_file.write(key)
133
+                key_file.flush()
134
+                key_file.seek(0)
135
+            subprocess.check_call(['rpm', '--import', key_file])
136
+        else:
137
+            subprocess.check_call(['rpm', '--import', key])
138
+
139
+
140
+def _run_yum_command(cmd, fatal=False):
141
+    """Run an YUM command.
142
+
143
+    Checks the output and retry if the fatal flag is set to True.
144
+
145
+    :param: cmd: str: The yum command to run.
146
+    :param: fatal: bool: Whether the command's output should be checked and
147
+        retried.
148
+    """
149
+    env = os.environ.copy()
150
+
151
+    if fatal:
152
+        retry_count = 0
153
+        result = None
154
+
155
+        # If the command is considered "fatal", we need to retry if the yum
156
+        # lock was not acquired.
157
+
158
+        while result is None or result == YUM_NO_LOCK:
159
+            try:
160
+                result = subprocess.check_call(cmd, env=env)
161
+            except subprocess.CalledProcessError as e:
162
+                retry_count = retry_count + 1
163
+                if retry_count > YUM_NO_LOCK_RETRY_COUNT:
164
+                    raise
165
+                result = e.returncode
166
+                log("Couldn't acquire YUM lock. Will retry in {} seconds."
167
+                    "".format(YUM_NO_LOCK_RETRY_DELAY))
168
+                time.sleep(YUM_NO_LOCK_RETRY_DELAY)
169
+
170
+    else:
171
+        subprocess.call(cmd, env=env)

+ 4
- 3
hooks/charmhelpers/fetch/giturl.py View File

@@ -18,17 +18,18 @@ from charmhelpers.fetch import (
18 18
     BaseFetchHandler,
19 19
     UnhandledSource,
20 20
     filter_installed_packages,
21
-    apt_install,
21
+    install,
22 22
 )
23 23
 
24 24
 if filter_installed_packages(['git']) != []:
25
-    apt_install(['git'])
25
+    install(['git'])
26 26
     if filter_installed_packages(['git']) != []:
27 27
         raise NotImplementedError('Unable to install git')
28 28
 
29 29
 
30 30
 class GitUrlFetchHandler(BaseFetchHandler):
31
-    """Handler for git branches via generic and github URLs"""
31
+    """Handler for git branches via generic and github URLs."""
32
+
32 33
     def can_handle(self, source):
33 34
         url_parts = self.parse_url(source)
34 35
         # TODO (mattyw) no support for ssh git@ yet

+ 336
- 0
hooks/charmhelpers/fetch/ubuntu.py View File

@@ -0,0 +1,336 @@
1
+# Copyright 2014-2015 Canonical Limited.
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#  http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+# See the License for the specific language governing permissions and
13
+# limitations under the License.
14
+
15
+import os
16
+import six
17
+import time
18
+import subprocess
19
+
20
+from tempfile import NamedTemporaryFile
21
+from charmhelpers.core.host import (
22
+    lsb_release
23
+)
24
+from charmhelpers.core.hookenv import log
25
+from charmhelpers.fetch import SourceConfigError
26
+
27
+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
28
+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
29
+"""
30
+
31
+PROPOSED_POCKET = """# Proposed
32
+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
33
+"""
34
+
35
+CLOUD_ARCHIVE_POCKETS = {
36
+    # Folsom
37
+    'folsom': 'precise-updates/folsom',
38
+    'precise-folsom': 'precise-updates/folsom',
39
+    'precise-folsom/updates': 'precise-updates/folsom',
40
+    'precise-updates/folsom': 'precise-updates/folsom',
41
+    'folsom/proposed': 'precise-proposed/folsom',
42
+    'precise-folsom/proposed': 'precise-proposed/folsom',
43
+    'precise-proposed/folsom': 'precise-proposed/folsom',
44
+    # Grizzly
45
+    'grizzly': 'precise-updates/grizzly',
46
+    'precise-grizzly': 'precise-updates/grizzly',
47
+    'precise-grizzly/updates': 'precise-updates/grizzly',
48
+    'precise-updates/grizzly': 'precise-updates/grizzly',
49
+    'grizzly/proposed': 'precise-proposed/grizzly',
50
+    'precise-grizzly/proposed': 'precise-proposed/grizzly',
51
+    'precise-proposed/grizzly': 'precise-proposed/grizzly',
52
+    # Havana
53
+    'havana': 'precise-updates/havana',
54
+    'precise-havana': 'precise-updates/havana',
55
+    'precise-havana/updates': 'precise-updates/havana',
56
+    'precise-updates/havana': 'precise-updates/havana',
57
+    'havana/proposed': 'precise-proposed/havana',
58
+    'precise-havana/proposed': 'precise-proposed/havana',
59
+    'precise-proposed/havana': 'precise-proposed/havana',
60
+    # Icehouse
61
+    'icehouse': 'precise-updates/icehouse',
62
+    'precise-icehouse': 'precise-updates/icehouse',
63
+    'precise-icehouse/updates': 'precise-updates/icehouse',
64
+    'precise-updates/icehouse': 'precise-updates/icehouse',
65
+    'icehouse/proposed': 'precise-proposed/icehouse',
66
+    'precise-icehouse/proposed': 'precise-proposed/icehouse',
67
+    'precise-proposed/icehouse': 'precise-proposed/icehouse',
68
+    # Juno
69
+    'juno': 'trusty-updates/juno',
70
+    'trusty-juno': 'trusty-updates/juno',
71
+    'trusty-juno/updates': 'trusty-updates/juno',
72
+    'trusty-updates/juno': 'trusty-updates/juno',
73
+    'juno/proposed': 'trusty-proposed/juno',
74
+    'trusty-juno/proposed': 'trusty-proposed/juno',
75
+    'trusty-proposed/juno': 'trusty-proposed/juno',
76
+    # Kilo
77
+    'kilo': 'trusty-updates/kilo',
78
+    'trusty-kilo': 'trusty-updates/kilo',
79
+    'trusty-kilo/updates': 'trusty-updates/kilo',
80
+    'trusty-updates/kilo': 'trusty-updates/kilo',
81
+    'kilo/proposed': 'trusty-proposed/kilo',
82
+    'trusty-kilo/proposed': 'trusty-proposed/kilo',
83
+    'trusty-proposed/kilo': 'trusty-proposed/kilo',
84
+    # Liberty
85
+    'liberty': 'trusty-updates/liberty',
86
+    'trusty-liberty': 'trusty-updates/liberty',
87
+    'trusty-liberty/updates': 'trusty-updates/liberty',
88
+    'trusty-updates/liberty': 'trusty-updates/liberty',
89
+    'liberty/proposed': 'trusty-proposed/liberty',
90
+    'trusty-liberty/proposed': 'trusty-proposed/liberty',
91
+    'trusty-proposed/liberty': 'trusty-proposed/liberty',
92
+    # Mitaka
93
+    'mitaka': 'trusty-updates/mitaka',
94
+    'trusty-mitaka': 'trusty-updates/mitaka',
95
+    'trusty-mitaka/updates': 'trusty-updates/mitaka',
96
+    'trusty-updates/mitaka': 'trusty-updates/mitaka',
97
+    'mitaka/proposed': 'trusty-proposed/mitaka',
98
+    'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
99
+    'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
100
+    # Newton
101
+    'newton': 'xenial-updates/newton',
102
+    'xenial-newton': 'xenial-updates/newton',
103
+    'xenial-newton/updates': 'xenial-updates/newton',
104
+    'xenial-updates/newton': 'xenial-updates/newton',
105
+    'newton/proposed': 'xenial-proposed/newton',
106
+    'xenial-newton/proposed': 'xenial-proposed/newton',
107
+    'xenial-proposed/newton': 'xenial-proposed/newton',
108
+}
109
+
110
+APT_NO_LOCK = 100  # The return code for "couldn't acquire lock" in APT.
111
+APT_NO_LOCK_RETRY_DELAY = 10  # Wait 10 seconds between apt lock checks.
112
+APT_NO_LOCK_RETRY_COUNT = 30  # Retry to acquire the lock X times.
113
+
114
+
115
+def filter_installed_packages(packages):
116
+    """Return a list of packages that require installation."""
117
+    cache = apt_cache()
118
+    _pkgs = []
119
+    for package in packages:
120
+        try:
121
+            p = cache[package]
122
+            p.current_ver or _pkgs.append(package)
123
+        except KeyError:
124
+            log('Package {} has no installation candidate.'.format(package),
125
+                level='WARNING')
126
+            _pkgs.append(package)
127
+    return _pkgs
128
+
129
+
130
+def apt_cache(in_memory=True, progress=None):
131
+    """Build and return an apt cache."""
132
+    from apt import apt_pkg
133
+    apt_pkg.init()
134
+    if in_memory:
135
+        apt_pkg.config.set("Dir::Cache::pkgcache", "")
136
+        apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
137
+    return apt_pkg.Cache(progress)
138
+
139
+
140
+def install(packages, options=None, fatal=False):
141
+    """Install one or more packages."""
142
+    if options is None:
143
+        options = ['--option=Dpkg::Options::=--force-confold']
144
+
145
+    cmd = ['apt-get', '--assume-yes']
146
+    cmd.extend(options)
147
+    cmd.append('install')
148
+    if isinstance(packages, six.string_types):
149
+        cmd.append(packages)
150
+    else:
151
+        cmd.extend(packages)
152
+    log("Installing {} with options: {}".format(packages,
153
+                                                options))
154
+    _run_apt_command(cmd, fatal)
155
+
156
+
157
+def upgrade(options=None, fatal=False, dist=False):
158
+    """Upgrade all packages."""
159
+    if options is None:
160
+        options = ['--option=Dpkg::Options::=--force-confold']
161
+
162
+    cmd = ['apt-get', '--assume-yes']
163
+    cmd.extend(options)
164
+    if dist:
165
+        cmd.append('dist-upgrade')
166
+    else:
167
+        cmd.append('upgrade')
168
+    log("Upgrading with options: {}".format(options))
169
+    _run_apt_command(cmd, fatal)
170
+
171
+
172
+def update(fatal=False):
173
+    """Update local apt cache."""
174
+    cmd = ['apt-get', 'update']
175
+    _run_apt_command(cmd, fatal)
176
+
177
+
178
+def purge(packages, fatal=False):
179
+    """Purge one or more packages."""
180
+    cmd = ['apt-get', '--assume-yes', 'purge']
181
+    if isinstance(packages, six.string_types):
182
+        cmd.append(packages)
183
+    else:
184
+        cmd.extend(packages)
185
+    log("Purging {}".format(packages))
186
+    _run_apt_command(cmd, fatal)
187
+
188
+
189
+def apt_mark(packages, mark, fatal=False):
190
+    """Flag one or more packages using apt-mark."""
191
+    log("Marking {} as {}".format(packages, mark))
192
+    cmd = ['apt-mark', mark]
193
+    if isinstance(packages, six.string_types):
194
+        cmd.append(packages)
195
+    else:
196
+        cmd.extend(packages)
197
+
198
+    if fatal:
199
+        subprocess.check_call(cmd, universal_newlines=True)
200
+    else:
201
+        subprocess.call(cmd, universal_newlines=True)
202
+
203
+
204
+def apt_hold(packages, fatal=False):
205
+    return apt_mark(packages, 'hold', fatal=fatal)
206
+
207
+
208
+def apt_unhold(packages, fatal=False):
209
+    return apt_mark(packages, 'unhold', fatal=fatal)
210
+
211
+
212
+def add_source(source, key=None):
213
+    """Add a package source to this system.
214
+
215
+    @param source: a URL or sources.list entry, as supported by
216
+    add-apt-repository(1). Examples::
217
+
218
+        ppa:charmers/example
219
+        deb https://stub:key@private.example.com/ubuntu trusty main
220
+
221
+    In addition:
222
+        'proposed:' may be used to enable the standard 'proposed'
223
+        pocket for the release.
224
+        'cloud:' may be used to activate official cloud archive pockets,
225
+        such as 'cloud:icehouse'
226
+        'distro' may be used as a noop
227
+
228
+    @param key: A key to be added to the system's APT keyring and used
229
+    to verify the signatures on packages. Ideally, this should be an
230
+    ASCII format GPG public key including the block headers. A GPG key
231
+    id may also be used, but be aware that only insecure protocols are
232
+    available to retrieve the actual public key from a public keyserver
233
+    placing your Juju environment at risk. ppa and cloud archive keys
234
+    are securely added automtically, so sould not be provided.
235
+    """
236
+    if source is None:
237
+        log('Source is not present. Skipping')
238
+        return
239
+
240
+    if (source.startswith('ppa:') or
241
+        source.startswith('http') or
242
+        source.startswith('deb ') or
243
+            source.startswith('cloud-archive:')):
244
+        subprocess.check_call(['add-apt-repository', '--yes', source])
245
+    elif source.startswith('cloud:'):
246
+        install(filter_installed_packages(['ubuntu-cloud-keyring']),
247
+                fatal=True)
248
+        pocket = source.split(':')[-1]
249
+        if pocket not in CLOUD_ARCHIVE_POCKETS:
250
+            raise SourceConfigError(
251
+                'Unsupported cloud: source option %s' %
252
+                pocket)
253
+        actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
254
+        with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
255
+            apt.write(CLOUD_ARCHIVE.format(actual_pocket))
256
+    elif source == 'proposed':
257
+        release = lsb_release()['DISTRIB_CODENAME']
258
+        with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
259
+            apt.write(PROPOSED_POCKET.format(release))
260
+    elif source == 'distro':
261
+        pass
262
+    else:
263
+        log("Unknown source: {!r}".format(source))
264
+
265
+    if key:
266
+        if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
267
+            with NamedTemporaryFile('w+') as key_file:
268
+                key_file.write(key)
269
+                key_file.flush()
270
+                key_file.seek(0)
271
+                subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
272
+        else:
273
+            # Note that hkp: is in no way a secure protocol. Using a
274
+            # GPG key id is pointless from a security POV unless you
275
+            # absolutely trust your network and DNS.
276
+            subprocess.check_call(['apt-key', 'adv', '--keyserver',
277
+                                   'hkp://keyserver.ubuntu.com:80', '--recv',
278
+                                   key])
279
+
280
+
281
+def _run_apt_command(cmd, fatal=False):
282
+    """Run an APT command.
283
+
284
+    Checks the output and retries if the fatal flag is set
285
+    to True.
286
+
287
+    :param: cmd: str: The apt command to run.
288
+    :param: fatal: bool: Whether the command's output should be checked and
289
+        retried.
290
+    """
291
+    env = os.environ.copy()
292
+
293
+    if 'DEBIAN_FRONTEND' not in env:
294
+        env['DEBIAN_FRONTEND'] = 'noninteractive'
295
+
296
+    if fatal:
297
+        retry_count = 0
298
+        result = None
299
+
300
+        # If the command is considered "fatal", we need to retry if the apt
301
+        # lock was not acquired.
302
+
303
+        while result is None or result == APT_NO_LOCK:
304
+            try:
305
+                result = subprocess.check_call(cmd, env=env)
306
+            except subprocess.CalledProcessError as e:
307
+                retry_count = retry_count + 1
308
+                if retry_count > APT_NO_LOCK_RETRY_COUNT:
309
+                    raise
310
+                result = e.returncode
311
+                log("Couldn't acquire DPKG lock. Will retry in {} seconds."
312
+                    "".format(APT_NO_LOCK_RETRY_DELAY))
313
+                time.sleep(APT_NO_LOCK_RETRY_DELAY)
314
+
315
+    else:
316
+        subprocess.call(cmd, env=env)
317
+
318
+
319
+def get_upstream_version(package):
320
+    """Determine upstream version based on installed package
321
+
322
+    @returns None (if not installed) or the upstream version
323
+    """
324
+    import apt_pkg
325
+    cache = apt_cache()
326
+    try:
327
+        pkg = cache[package]
328
+    except:
329
+        # the package is unknown to the current apt cache.
330
+        return None
331
+
332
+    if not pkg.current_ver:
333
+        # package is known, but no version is currently installed.
334
+        return None
335
+
336
+    return apt_pkg.upstream_version(pkg.current_ver.ver_str)

+ 19
- 0
hooks/charmhelpers/osplatform.py View File

@@ -0,0 +1,19 @@
1
+import platform
2
+
3
+
4
+def get_platform():
5
+    """Return the current OS platform.
6
+
7
+    For example: if current os platform is Ubuntu then a string "ubuntu"
8
+    will be returned (which is the name of the module).
9
+    This string is used to decide which platform module should be imported.
10
+    """
11
+    tuple_platform = platform.linux_distribution()
12
+    current_platform = tuple_platform[0]
13
+    if "Ubuntu" in current_platform:
14
+        return "ubuntu"
15
+    elif "CentOS" in current_platform:
16
+        return "centos"
17
+    else:
18
+        raise RuntimeError("This module is not supported on {}."
19
+                           .format(current_platform))

+ 3
- 2
hooks/charmhelpers/payload/execd.py View File

@@ -47,11 +47,12 @@ def execd_submodule_paths(command, execd_dir=None):
47 47
             yield path
48 48
 
49 49
 
50
-def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
50
+def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT):
51 51
     """Run command for each module within execd_dir which defines it."""
52 52
     for submodule_path in execd_submodule_paths(command, execd_dir):
53 53
         try:
54
-            subprocess.check_call(submodule_path, shell=True, stderr=stderr)
54
+            subprocess.check_output(submodule_path, stderr=stderr,
55
+                                    universal_newlines=True)
55 56
         except subprocess.CalledProcessError as e:
56 57
             hookenv.log("Error ({}) running  {}. Output: {}".format(
57 58
                 e.returncode, e.cmd, e.output))

+ 61
- 11
tests/charmhelpers/contrib/openstack/amulet/deployment.py View File

@@ -98,8 +98,47 @@ class OpenStackAmuletDeployment(AmuletDeployment):
98 98
 
99 99
         return other_services
100 100
 
101
-    def _add_services(self, this_service, other_services):
102
-        """Add services to the deployment and set openstack-origin/source."""
101
+    def _add_services(self, this_service, other_services, use_source=None,
102
+                      no_origin=None):
103
+        """Add services to the deployment and optionally set
104
+        openstack-origin/source.
105
+
106
+        :param this_service dict: Service dictionary describing the service
107
+                                  whose amulet tests are being run
108
+        :param other_services dict: List of service dictionaries describing
109
+                                    the services needed to support the target
110
+                                    service
111
+        :param use_source list: List of services which use the 'source' config
112
+                                option rather than 'openstack-origin'
113
+        :param no_origin list: List of services which do not support setting
114
+                               the Cloud Archive.
115
+        Service Dict:
116
+            {
117
+                'name': str charm-name,
118
+                'units': int number of units,
119
+                'constraints': dict of juju constraints,
120
+                'location': str location of charm,
121
+            }
122
+        eg
123
+        this_service = {
124
+            'name': 'openvswitch-odl',
125
+            'constraints': {'mem': '8G'},
126
+        }
127
+        other_services = [
128
+            {
129
+                'name': 'nova-compute',
130
+                'units': 2,
131
+                'constraints': {'mem': '4G'},
132
+                'location': cs:~bob/xenial/nova-compute
133
+            },
134
+            {
135
+                'name': 'mysql',
136
+                'constraints': {'mem': '2G'},
137
+            },
138
+            {'neutron-api-odl'}]
139
+        use_source = ['mysql']
140
+        no_origin = ['neutron-api-odl']
141
+        """
103 142
         self.log.info('OpenStackAmuletDeployment:  adding services')
104 143
 
105 144
         other_services = self._determine_branch_locations(other_services)
@@ -110,16 +149,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
110 149
         services = other_services
111 150
         services.append(this_service)
112 151
 
152
+        use_source = use_source or []
153
+        no_origin = no_origin or []
154
+
113 155
         # Charms which should use the source config option
114
-        use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
115
-                      'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
156
+        use_source = list(set(
157
+            use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
158
+                          'ceph-osd', 'ceph-radosgw', 'ceph-mon',
159
+                          'ceph-proxy']))
116 160
 
117 161
         # Charms which can not use openstack-origin, ie. many subordinates
118
-        no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
119
-                     'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
120
-                     'cinder-backup', 'nexentaedge-data',
121
-                     'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
122
-                     'cinder-nexentaedge', 'nexentaedge-mgmt']
162
+        no_origin = list(set(
163
+            no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
164
+                         'nrpe', 'openvswitch-odl', 'neutron-api-odl',
165
+                         'odl-controller', 'cinder-backup', 'nexentaedge-data',
166
+                         'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
167
+                         'cinder-nexentaedge', 'nexentaedge-mgmt']))
123 168
 
124 169
         if self.openstack:
125 170
             for svc in services:
@@ -220,7 +265,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
220 265
          self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
221 266
          self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
222 267
          self.wily_liberty, self.trusty_mitaka,
223
-         self.xenial_mitaka) = range(14)
268
+         self.xenial_mitaka, self.xenial_newton,
269
+         self.yakkety_newton) = range(16)
224 270
 
225 271
         releases = {
226 272
             ('precise', None): self.precise_essex,
@@ -236,7 +282,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
236 282
             ('utopic', None): self.utopic_juno,
237 283
             ('vivid', None): self.vivid_kilo,
238 284
             ('wily', None): self.wily_liberty,
239
-            ('xenial', None): self.xenial_mitaka}
285
+            ('xenial', None): self.xenial_mitaka,
286
+            ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
287
+            ('yakkety', None): self.yakkety_newton,
288
+        }
240 289
         return releases[(self.series, self.openstack)]
241 290
 
242 291
     def _get_openstack_release_string(self):
@@ -254,6 +303,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
254 303
             ('vivid', 'kilo'),
255 304
             ('wily', 'liberty'),
256 305
             ('xenial', 'mitaka'),
306
+            ('yakkety', 'newton'),
257 307
         ])
258 308
         if self.openstack:
259 309
             os_origin = self.openstack.split(':')[1]

+ 118
- 1
tests/charmhelpers/contrib/openstack/amulet/utils.py View File

@@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils):
83 83
         if not found:
84 84
             return 'endpoint not found'
85 85
 
86
+    def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
87
+                                  public_port, expected):
88
+        """Validate keystone v3 endpoint data.
89
+
90
+        Validate the v3 endpoint data which has changed from v2.  The
91
+        ports are used to find the matching endpoint.
92
+
93
+        The new v3 endpoint data looks like:
94
+
95
+        [<Endpoint enabled=True,
96
+                   id=0432655fc2f74d1e9fa17bdaa6f6e60b,
97
+                   interface=admin,
98
+                   links={u'self': u'<RESTful URL of this endpoint>'},
99
+                   region=RegionOne,
100
+                   region_id=RegionOne,
101
+                   service_id=17f842a0dc084b928e476fafe67e4095,
102
+                   url=http://10.5.6.5:9312>,
103
+         <Endpoint enabled=True,
104
+                   id=6536cb6cb92f4f41bf22b079935c7707,
105
+                   interface=admin,
106
+                   links={u'self': u'<RESTful url of this endpoint>'},
107
+                   region=RegionOne,
108
+                   region_id=RegionOne,
109
+                   service_id=72fc8736fb41435e8b3584205bb2cfa3,
110
+                   url=http://10.5.6.6:35357/v3>,
111
+                   ... ]
112
+        """
113
+        self.log.debug('Validating v3 endpoint data...')
114
+        self.log.debug('actual: {}'.format(repr(endpoints)))
115
+        found = []
116
+        for ep in endpoints:
117
+            self.log.debug('endpoint: {}'.format(repr(ep)))
118
+            if ((admin_port in ep.url and ep.interface == 'admin') or
119
+                    (internal_port in ep.url and ep.interface == 'internal') or
120
+                    (public_port in ep.url and ep.interface == 'public')):
121
+                found.append(ep.interface)
122
+                # note we ignore the links member.
123
+                actual = {'id': ep.id,
124
+                          'region': ep.region,
125
+                          'region_id': ep.region_id,
126
+                          'interface': self.not_null,
127
+                          'url': ep.url,
128
+                          'service_id': ep.service_id, }
129
+                ret = self._validate_dict_data(expected, actual)
130
+                if ret:
131
+                    return 'unexpected endpoint data - {}'.format(ret)
132
+
133
+        if len(found) != 3:
134
+            return 'Unexpected number of endpoints found'
135
+
86 136
     def validate_svc_catalog_endpoint_data(self, expected, actual):
87 137
         """Validate service catalog endpoint data.
88 138
 
@@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils):
100 150
                 return "endpoint {} does not exist".format(k)
101 151
         return ret
102 152
 
153
+    def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
154
+        """Validate the keystone v3 catalog endpoint data.
155
+
156
+        Validate a list of dictinaries that make up the keystone v3 service
157
+        catalogue.
158
+
159
+        It is in the form of:
160
+
161
+
162
+        {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
163
+                        u'interface': u'admin',
164
+                        u'region': u'RegionOne',
165
+                        u'region_id': u'RegionOne',
166
+                        u'url': u'http://10.5.5.224:35357/v3'},
167
+                       {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
168
+                        u'interface': u'public',
169
+                        u'region': u'RegionOne',
170
+                        u'region_id': u'RegionOne',
171
+                        u'url': u'http://10.5.5.224:5000/v3'},
172
+                       {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
173
+                        u'interface': u'internal',
174
+                        u'region': u'RegionOne',
175
+                        u'region_id': u'RegionOne',
176
+                        u'url': u'http://10.5.5.224:5000/v3'}],
177
+         u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
178
+                           u'interface': u'public',
179
+                           u'region': u'RegionOne',
180
+                           u'region_id': u'RegionOne',
181
+                           u'url': u'http://10.5.5.223:9311'},
182
+                          {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
183
+                           u'interface': u'internal',
184
+                           u'region': u'RegionOne',
185
+                           u'region_id': u'RegionOne',
186
+                           u'url': u'http://10.5.5.223:9311'},
187
+                          {u'id': u'f629388955bc407f8b11d8b7ca168086',
188
+                           u'interface': u'admin',
189
+                           u'region': u'RegionOne',
190
+                           u'region_id': u'RegionOne',
191
+                           u'url': u'http://10.5.5.223:9312'}]}
192
+
193
+        Note, that an added complication is that the order of admin, public,
194
+        internal against 'interface' in each region.
195
+
196
+        Thus, the function sorts the expected and actual lists using the
197
+        interface key as a sort key, prior to the comparison.
198
+        """
199
+        self.log.debug('Validating v3 service catalog endpoint data...')
200
+        self.log.debug('actual: {}'.format(repr(actual)))
201
+        for k, v in six.iteritems(expected):
202
+            if k in actual:
203
+                l_expected = sorted(v, key=lambda x: x['interface'])
204
+                l_actual = sorted(actual[k], key=lambda x: x['interface'])
205
+                if len(l_actual) != len(l_expected):
206
+                    return ("endpoint {} has differing number of interfaces "
207
+                            " - expected({}), actual({})"
208
+                            .format(k, len(l_expected), len(l_actual)))
209
+                for i_expected, i_actual in zip(l_expected, l_actual):
210
+                    self.log.debug("checking interface {}"
211
+                                   .format(i_expected['interface']))
212
+                    ret = self._validate_dict_data(i_expected, i_actual)
213
+                    if ret:
214
+                        return self.endpoint_error(k, ret)
215
+            else:
216
+                return "endpoint {} does not exist".format(k)
217
+        return ret
218
+
103 219
     def validate_tenant_data(self, expected, actual):
104 220
         """Validate tenant data.
105 221
 
@@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils):
928 1044
                                                    retry_delay=5,
929 1045
                                                    socket_timeout=1)
930 1046
             connection = pika.BlockingConnection(parameters)
931
-            assert connection.server_properties['product'] == 'RabbitMQ'
1047
+            assert connection.is_open is True
1048
+            assert connection.is_closing is False
932 1049
             self.log.debug('Connect OK')
933 1050
             return connection
934 1051
         except Exception as e:

Loading…
Cancel
Save