diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index f3313a55..e9eb9a65 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -1,7 +1,8 @@ -branch: lp:charm-helpers +branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - osplatform - cli - fetch - contrib.openstack|inc=* diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py index 17976fb5..1410512a 100644 --- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -38,6 +38,7 @@ from charmhelpers.core.hookenv import ( ) from charmhelpers.core.host import service +from charmhelpers.core import host # This module adds compatibility with the nrpe-external-master and plain nrpe # subordinate charms. To use it in your charm: @@ -108,6 +109,13 @@ from charmhelpers.core.host import service # def local_monitors_relation_changed(): # update_nrpe_config() # +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# # 5. ln -s hooks.py nrpe-external-master-relation-changed # ln -s hooks.py local-monitors-relation-changed @@ -220,9 +228,10 @@ class NRPE(object): nagios_exportdir = '/var/lib/nagios/export' nrpe_confdir = '/etc/nagios/nrpe.d' - def __init__(self, hostname=None): + def __init__(self, hostname=None, primary=True): super(NRPE, self).__init__() self.config = config() + self.primary = primary self.nagios_context = self.config['nagios_context'] if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: self.nagios_servicegroups = self.config['nagios_servicegroups'] @@ -238,6 +247,12 @@ class NRPE(object): else: self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation_ids('nrpe-external-master'): + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) def add_check(self, *args, **kwargs): self.checks.append(Check(*args, **kwargs)) @@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name): :param str unit_name: Unit name to use in check description """ for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + upstart_init = '/etc/init/%s.conf' % svc sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) elif os.path.exists(sysv_init): cronpath = '/etc/cron.d/nagios-service-check-%s' % svc cron_file = ('*/5 * * * * root ' diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils): if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils): retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index 03427b49..d1510dd3 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -245,6 +245,10 @@ def neutron_plugins(): 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2') plugins['plumgrid']['server_packages'].remove( 'neutron-plugin-plumgrid') + if release >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' return plugins diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 9d3e3d89..6cfa30fb 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -51,7 +51,8 @@ from charmhelpers.core.hookenv import ( relation_set, service_name, status_set, - hook_name + hook_name, + application_version_set, ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -1889,3 +1890,29 @@ def config_flags_parser(config_flags): flags[key.strip(post_strippers)] = value.rstrip(post_strippers) return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + import apt_pkg as apt + cache = apt_cache() + application_version = None + application_codename = os_release(package) + + try: + pkg = cache[package] + if not pkg.current_ver: + juju_log('Package {} is not currently installed.'.format(package), + DEBUG) + else: + application_version = apt.upstream_version(pkg.current_ver.ver_str) + except: + juju_log('Package {} has no installation candidate.'.format(package), + DEBUG) + + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(application_codename) + else: + application_version_set(application_version) diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index beff2703..edb536c7 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -87,6 +87,7 @@ clog to syslog = {use_syslog} DEFAULT_PGS_PER_OSD_TARGET = 100 DEFAULT_POOL_WEIGHT = 10.0 LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 def validator(value, valid_type, valid_range=None): @@ -266,6 +267,11 @@ class Pool(object): target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + # The CRUSH algorithm has a slight optimization for placement groups # with powers of 2 so find the nearest power of 2. If the nearest # power of 2 is more than 25% below the original value, the next diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 48b2b9dc..996e81cc 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -843,6 +843,20 @@ def translate_exc(from_exc, to_exc): return inner_translate_exc1 +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 53068599..0f1b2f35 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -30,13 +30,29 @@ import subprocess import hashlib import functools import itertools -from contextlib import contextmanager -from collections import OrderedDict - import six +from contextlib import contextmanager +from collections import OrderedDict from .hookenv import log from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + ) # flake8: noqa -- ignore F401 for this import def service_start(service_name): @@ -144,8 +160,11 @@ def service_running(service_name): return False else: # This works for upstart scripts where the 'service' command - # returns a consistent string to represent running 'start/running' - if "start/running" in output: + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): return True elif os.path.exists(_INIT_D_CONF.format(service_name)): # Check System V scripts init script return codes @@ -153,18 +172,6 @@ def service_running(service_name): return False -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - SYSTEMD_SYSTEM = '/run/systemd/system' @@ -173,8 +180,9 @@ def init_is_systemd(): return os.path.isdir(SYSTEMD_SYSTEM) -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None, uid=None, home_dir=None): +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -286,17 +294,7 @@ def add_group(group_name, system_group=False, gid=None): log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if gid: - cmd.extend(['--gid', str(gid)]) - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) + add_new_group(group_name, system_group, gid) group_info = grp.getgrnam(group_name) return group_info @@ -541,16 +539,6 @@ def restart_on_change_helper(lambda_f, restart_map, stopstart=False, return r -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - def pwgen(length=None): """Generate a random pasword.""" if length is None: @@ -674,25 +662,6 @@ def get_nic_hwaddr(nic): return hwaddr -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - @contextmanager def chdir(directory): """Change the current working directory to a different directory for a code diff --git a/hooks/charmhelpers/core/host_factory/__init__.py b/hooks/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/core/host_factory/centos.py b/hooks/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..902d469f --- /dev/null +++ b/hooks/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,56 @@ +import subprocess +import yum +import os + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..8c66af55 --- /dev/null +++ b/hooks/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,56 @@ +import subprocess + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py index b166efec..2d404528 100644 --- a/hooks/charmhelpers/core/kernel.py +++ b/hooks/charmhelpers/core/kernel.py @@ -15,15 +15,28 @@ # See the License for the specific language governing permissions and # limitations under the License. -__author__ = "Jorge Niedbalski " +import re +import subprocess +from charmhelpers.osplatform import get_platform from charmhelpers.core.hookenv import ( log, INFO ) -from subprocess import check_call, check_output -import re +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski " def modprobe(module, persist=True): @@ -32,11 +45,9 @@ def modprobe(module, persist=True): log('Loading kernel module %s' % module, level=INFO) - check_call(cmd) + subprocess.check_call(cmd) if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) + persistent_modprobe(module) def rmmod(module, force=False): @@ -46,21 +57,16 @@ def rmmod(module, force=False): cmd.append('-f') cmd.append(module) log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) + return subprocess.check_call(cmd) def lsmod(): """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) + return subprocess.check_output(['lsmod'], + universal_newlines=True) def is_module_loaded(module): """Checks if a kernel module is already loaded""" matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/core/kernel_factory/__init__.py b/hooks/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hooks/charmhelpers/core/kernel_factory/centos.py b/hooks/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/hooks/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/hooks/charmhelpers/core/kernel_factory/ubuntu.py b/hooks/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..21559642 --- /dev/null +++ b/hooks/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 52eaf824..174c6330 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -13,18 +13,12 @@ # limitations under the License. import importlib -from tempfile import NamedTemporaryFile -import time +from charmhelpers.osplatform import get_platform from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess from charmhelpers.core.hookenv import ( config, log, ) -import os import six if six.PY3: @@ -33,87 +27,6 @@ else: from urlparse import urlparse, urlunparse -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', - # Newton - 'newton': 'xenial-updates/newton', - 'xenial-newton': 'xenial-updates/newton', - 'xenial-newton/updates': 'xenial-updates/newton', - 'xenial-updates/newton': 'xenial-updates/newton', - 'newton/proposed': 'xenial-proposed/newton', - 'xenial-newton/proposed': 'xenial-proposed/newton', - 'xenial-proposed/newton': 'xenial-proposed/newton', -} - # The order of this list is very important. Handlers should be listed in from # least- to most-specific URL matching. FETCH_HANDLERS = ( @@ -122,10 +35,6 @@ FETCH_HANDLERS = ( 'charmhelpers.fetch.giturl.GitUrlFetchHandler', ) -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - class SourceConfigError(Exception): pass @@ -163,180 +72,37 @@ class BaseFetchHandler(object): return urlunparse(parts) -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) +filter_installed_packages = fetch.filter_installed_packages +install = fetch.install +upgrade = fetch.upgrade +update = fetch.update +purge = fetch.purge +add_source = fetch.add_source -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.install + apt_update = fetch.update + apt_upgrade = fetch.upgrade + apt_purge = fetch.purge + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold +elif __platform__ == "centos": + yum_search = fetch.yum_search def configure_sources(update=False, sources_var='install_sources', keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. + """Configure multiple sources from charm configuration. The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their + The fragment needs to be included as a string. Sources and their corresponding keys are of the types supported by add_source(). Example config: @@ -368,12 +134,11 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - apt_update(fatal=True) + fetch.update(fatal=True) def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source + """Install a file tree from a remote source. The specified source should be a url of the form: scheme://[host]/path[#[option=value][&...]] @@ -406,6 +171,7 @@ def install_remote(source, *args, **kwargs): def install_from_config(config_var_name): + """Install a file from config.""" charm_config = config() source = charm_config[config_var_name] return install_remote(source) @@ -428,40 +194,3 @@ def plugins(fetch_handlers=None): log("FetchHandler {} not found, skipping plugin".format( handler_name)) return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py index b3404d85..07cd0293 100644 --- a/hooks/charmhelpers/fetch/bzrurl.py +++ b/hooks/charmhelpers/fetch/bzrurl.py @@ -18,19 +18,20 @@ from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) from charmhelpers.core.host import mkdir if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) + install(['bzr']) if filter_installed_packages(['bzr']) != []: raise NotImplementedError('Unable to install bzr') class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" + """Handler for bazaar branches via generic and lp URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('bzr+ssh', 'lp', ''): diff --git a/hooks/charmhelpers/fetch/centos.py b/hooks/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..604bbfb5 --- /dev/null +++ b/hooks/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index f708d1ee..4cf21bc2 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -18,17 +18,18 @@ from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, filter_installed_packages, - apt_install, + install, ) if filter_installed_packages(['git']) != []: - apt_install(['git']) + install(['git']) if filter_installed_packages(['git']) != []: raise NotImplementedError('Unable to install git') class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" + """Handler for git branches via generic and github URLs.""" + def can_handle(self, source): url_parts = self.parse_url(source) # TODO (mattyw) no support for ssh git@ yet diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..077869e1 --- /dev/null +++ b/hooks/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,316 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import time +import subprocess + +from tempfile import NamedTemporaryFile +from charmhelpers.core.host import ( + lsb_release +) +from charmhelpers.core.hookenv import log +from charmhelpers.fetch import SourceConfigError + +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" + +PROPOSED_POCKET = """# Proposed +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted +""" + +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', +} + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_cache(in_memory=True, progress=None): + """Build and return an apt cache.""" + from apt import apt_pkg + apt_pkg.init() + if in_memory: + apt_pkg.config.set("Dir::Cache::pkgcache", "") + apt_pkg.config.set("Dir::Cache::srcpkgcache", "") + return apt_pkg.Cache(progress) + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + """ + if source is None: + log('Source is not present. Skipping') + return + + if (source.startswith('ppa:') or + source.startswith('http') or + source.startswith('deb ') or + source.startswith('cloud-archive:')): + subprocess.check_call(['add-apt-repository', '--yes', source]) + elif source.startswith('cloud:'): + install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + pocket = source.split(':')[-1] + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + elif source == 'proposed': + release = lsb_release()['DISTRIB_CODENAME'] + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(PROPOSED_POCKET.format(release)) + elif source == 'distro': + pass + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) + else: + # Note that hkp: is in no way a secure protocol. Using a + # GPG key id is pointless from a security POV unless you + # absolutely trust your network and DNS. + subprocess.check_call(['apt-key', 'adv', '--keyserver', + 'hkp://keyserver.ubuntu.com:80', '--recv', + key]) + + +def _run_apt_command(cmd, fatal=False): + """Run an APT command. + + Checks the output and retries if the fatal flag is set + to True. + + :param: cmd: str: The apt command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if 'DEBIAN_FRONTEND' not in env: + env['DEBIAN_FRONTEND'] = 'noninteractive' + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the apt + # lock was not acquired. + + while result is None or result == APT_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > APT_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire DPKG lock. Will retry in {} seconds." + "".format(APT_NO_LOCK_RETRY_DELAY)) + time.sleep(APT_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers/osplatform.py b/hooks/charmhelpers/osplatform.py new file mode 100644 index 00000000..ea490bbd --- /dev/null +++ b/hooks/charmhelpers/osplatform.py @@ -0,0 +1,19 @@ +import platform + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) diff --git a/hooks/cinder_utils.py b/hooks/cinder_utils.py index c4f8fbcf..9265cd83 100644 --- a/hooks/cinder_utils.py +++ b/hooks/cinder_utils.py @@ -103,6 +103,7 @@ from charmhelpers.contrib.openstack.utils import ( pause_unit, resume_unit, is_unit_paused_set, + os_application_version_set, ) from charmhelpers.core.decorators import ( @@ -176,6 +177,8 @@ APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend' APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \ 'openstack_https_frontend.conf' +VERSION_PACKAGE = 'cinder-common' + TEMPLATES = 'templates/' # The interface is said to be satisfied if anyone of the interfaces in @@ -925,6 +928,7 @@ def assess_status(configs): @returns None - this function is executed for its side-effect """ assess_status_func(configs)() + os_application_version_set(VERSION_PACKAGE) def assess_status_func(configs): diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 8040b570..24b353ee 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -83,6 +83,56 @@ class OpenStackAmuletUtils(AmuletUtils): if not found: return 'endpoint not found' + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + ['}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + '}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != 3: + return 'Unexpected number of endpoints found' + def validate_svc_catalog_endpoint_data(self, expected, actual): """Validate service catalog endpoint data. @@ -100,6 +150,72 @@ class OpenStackAmuletUtils(AmuletUtils): return "endpoint {} does not exist".format(k) return ret + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + def validate_tenant_data(self, expected, actual): """Validate tenant data. @@ -928,7 +1044,8 @@ class OpenStackAmuletUtils(AmuletUtils): retry_delay=5, socket_timeout=1) connection = pika.BlockingConnection(parameters) - assert connection.server_properties['product'] == 'RabbitMQ' + assert connection.is_open is True + assert connection.is_closing is False self.log.debug('Connect OK') return connection except Exception as e: diff --git a/unit_tests/test_cinder_utils.py b/unit_tests/test_cinder_utils.py index 76daf22e..03da1e60 100644 --- a/unit_tests/test_cinder_utils.py +++ b/unit_tests/test_cinder_utils.py @@ -63,6 +63,7 @@ TO_PATCH = [ 'is_elected_leader', 'templating', 'install_alternative', + 'os_application_version_set', # fetch 'apt_update', 'apt_upgrade', @@ -977,6 +978,9 @@ class TestCinderUtils(CharmTestCase): cinder_utils.assess_status('test-config') asf.assert_called_once_with('test-config') callee.assert_called_once_with() + self.os_application_version_set.assert_called_with( + 'cinder-common' + ) @patch.object(cinder_utils, 'get_optional_interfaces') @patch.object(cinder_utils, 'check_optional_relations')