[hopem,r=james-page] Resync helpers to get fix for bug 1337266

This commit is contained in:
James Page 2014-07-04 09:44:18 +01:00
commit d5388a96a3
15 changed files with 533 additions and 72 deletions

View File

@ -170,6 +170,7 @@ def canonical_url(configs, vip_setting='vip'):
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''

View File

@ -0,0 +1,55 @@
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
self.source = source
def _add_services(self, this_service, other_services):
"""Add services to the deployment and set openstack-origin."""
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
name = 0
services = other_services
services.append(this_service)
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
if self.openstack:
for svc in services:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)
def _configure_services(self, configs):
"""Configure all of the services."""
for service, config in configs.iteritems():
self.d.configure(service, config)
def _get_openstack_release(self):
"""Return an integer representing the enum value of the openstack
release."""
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
self.precise_havana, self.precise_icehouse, \
self.trusty_icehouse = range(6)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
return releases[(self.series, self.openstack)]

View File

@ -0,0 +1,209 @@
import logging
import os
import time
import urllib
import glanceclient.v1.client as glance_client
import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletUtils(AmuletUtils):
"""This class inherits from AmuletUtils and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, log_level=ERROR):
"""Initialize the deployment environment."""
super(OpenStackAmuletUtils, self).__init__(log_level)
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate actual endpoint data vs expected endpoint data. The ports
are used to find the matching endpoint."""
found = False
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if admin_port in ep.adminurl and internal_port in ep.internalurl \
and public_port in ep.publicurl:
found = True
actual = {'id': ep.id,
'region': ep.region,
'adminurl': ep.adminurl,
'internalurl': ep.internalurl,
'publicurl': ep.publicurl,
'service_id': ep.service_id}
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)
if not found:
return 'endpoint not found'
def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate a list of actual service catalog endpoints vs a list of
expected service catalog endpoints."""
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in expected.iteritems():
if k in actual:
ret = self._validate_dict_data(expected[k][0], actual[k][0])
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret
def validate_tenant_data(self, expected, actual):
"""Validate a list of actual tenant data vs list of expected tenant
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'description': act.description,
'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected tenant data - {}".format(ret)
if not found:
return "tenant {} does not exist".format(e['name'])
return ret
def validate_role_data(self, expected, actual):
"""Validate a list of actual role data vs a list of expected role
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'name': act.name, 'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected role data - {}".format(ret)
if not found:
return "role {} does not exist".format(e['name'])
return ret
def validate_user_data(self, expected, actual):
"""Validate a list of actual user data vs a list of expected user
data."""
self.log.debug('actual: {}'.format(repr(actual)))
for e in expected:
found = False
for act in actual:
a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId,
'id': act.id}
if e['name'] == a['name']:
found = True
ret = self._validate_dict_data(e, a)
if ret:
return "unexpected user data - {}".format(ret)
if not found:
return "user {} does not exist".format(e['name'])
return ret
def validate_flavor_data(self, expected, actual):
"""Validate a list of actual flavors vs a list of expected flavors."""
self.log.debug('actual: {}'.format(repr(actual)))
act = [a.name for a in actual]
return self._validate_list_data(expected, act)
def tenant_exists(self, keystone, tenant):
"""Return True if tenant exists"""
return tenant in [t.name for t in keystone.tenants.list()]
def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant):
"""Authenticates admin user with the keystone admin endpoint."""
service_ip = \
keystone_sentry.relation('shared-db',
'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep)
def authenticate_glance_admin(self, keystone):
"""Authenticates admin user with glance."""
ep = keystone.service_catalog.url_for(service_type='image',
endpoint_type='adminURL')
return glance_client.Client(ep, token=keystone.auth_token)
def authenticate_nova_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with nova-api."""
ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password,
project_id=tenant, auth_url=ep)
def create_cirros_image(self, glance, image_name):
"""Download the latest cirros image and upload it to glance."""
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies)
else:
opener = urllib.FancyURLopener()
f = opener.open("http://download.cirros-cloud.net/version/released")
version = f.read().strip()
cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
if not os.path.exists(cirros_img):
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
version, cirros_img)
opener.retrieve(cirros_url, cirros_img)
f.close()
with open(cirros_img) as f:
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
glance.images.delete(image)
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
flavor = nova.flavors.find(name=flavor)
instance = nova.servers.create(name=instance_name, image=image,
flavor=flavor)
count = 1
status = instance.status
while status != 'ACTIVE' and count < 60:
time.sleep(3)
instance = nova.servers.get(instance.id)
status = instance.status
self.log.debug('instance status: {}'.format(status))
count += 1
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
nova.servers.delete(instance)

View File

@ -24,6 +24,7 @@ from charmhelpers.core.hookenv import (
unit_get,
unit_private_ip,
ERROR,
INFO
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -243,23 +244,31 @@ class IdentityServiceContext(OSContextGenerator):
class AMQPContext(OSContextGenerator):
interfaces = ['amqp']
def __init__(self, ssl_dir=None):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
self.ssl_dir = ssl_dir
self.rel_name = rel_name
self.relation_prefix = relation_prefix
self.interfaces = [rel_name]
def __call__(self):
log('Generating template context for amqp')
conf = config()
user_setting = 'rabbit-user'
vhost_setting = 'rabbit-vhost'
if self.relation_prefix:
user_setting = self.relation_prefix + '-rabbit-user'
vhost_setting = self.relation_prefix + '-rabbit-vhost'
try:
username = conf['rabbit-user']
vhost = conf['rabbit-vhost']
username = conf[user_setting]
vhost = conf[vhost_setting]
except KeyError as e:
log('Could not generate shared_db context. '
'Missing required charm config options: %s.' % e)
raise OSContextError
ctxt = {}
for rid in relation_ids('amqp'):
for rid in relation_ids(self.rel_name):
ha_vip_only = False
for unit in related_units(rid):
if relation_get('clustered', rid=rid, unit=unit):
@ -418,12 +427,13 @@ class ApacheSSLContext(OSContextGenerator):
"""
Generates a context for an apache vhost configuration that configures
HTTPS reverse proxying for one or many endpoints. Generated context
looks something like:
{
'namespace': 'cinder',
'private_address': 'iscsi.mycinderhost.com',
'endpoints': [(8776, 8766), (8777, 8767)]
}
looks something like::
{
'namespace': 'cinder',
'private_address': 'iscsi.mycinderhost.com',
'endpoints': [(8776, 8766), (8777, 8767)]
}
The endpoints list consists of a tuples mapping external ports
to internal ports.
@ -541,6 +551,26 @@ class NeutronContext(OSContextGenerator):
return nvp_ctxt
def n1kv_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
self.network_manager)
n1kv_ctxt = {
'core_plugin': driver,
'neutron_plugin': 'n1kv',
'neutron_security_groups': self.neutron_security_groups,
'local_ip': unit_private_ip(),
'config': n1kv_config,
'vsm_ip': config('n1kv-vsm-ip'),
'vsm_username': config('n1kv-vsm-username'),
'vsm_password': config('n1kv-vsm-password'),
'restrict_policy_profiles': config(
'n1kv_restrict_policy_profiles'),
}
return n1kv_ctxt
def neutron_ctxt(self):
if https():
proto = 'https'
@ -572,6 +602,8 @@ class NeutronContext(OSContextGenerator):
ctxt.update(self.ovs_ctxt())
elif self.plugin in ['nvp', 'nsx']:
ctxt.update(self.nvp_ctxt())
elif self.plugin == 'n1kv':
ctxt.update(self.n1kv_ctxt())
alchemy_flags = config('neutron-alchemy-flags')
if alchemy_flags:
@ -611,7 +643,7 @@ class SubordinateConfigContext(OSContextGenerator):
The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json:
to both glance and nova may export to following yaml blob as json::
glance:
/etc/glance/glance-api.conf:
@ -630,7 +662,8 @@ class SubordinateConfigContext(OSContextGenerator):
It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as:
be available in the template context, in glance's case, as::
ctxt = {
... other context ...
'subordinate_config': {
@ -657,7 +690,7 @@ class SubordinateConfigContext(OSContextGenerator):
self.interface = interface
def __call__(self):
ctxt = {}
ctxt = {'sections': {}}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
@ -683,10 +716,14 @@ class SubordinateConfigContext(OSContextGenerator):
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if k == 'sections':
for section, config_dict in v.iteritems():
log("adding section '%s'" % (section))
ctxt[k][section] = config_dict
else:
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
return ctxt

View File

@ -128,6 +128,20 @@ def neutron_plugins():
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [
context.SharedDBContext(user=config('neutron-database-user'),
database=config('neutron-database'),
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR)],
'services': [],
'packages': [['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':

View File

@ -30,17 +30,17 @@ def get_loader(templates_dir, os_release):
loading dir.
A charm may also ship a templates dir with this module
and it will be appended to the bottom of the search list, eg:
hooks/charmhelpers/contrib/openstack/templates.
and it will be appended to the bottom of the search list, eg::
:param templates_dir: str: Base template directory containing release
sub-directories.
:param os_release : str: OpenStack release codename to construct template
loader.
hooks/charmhelpers/contrib/openstack/templates
:returns : jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
:param templates_dir (str): Base template directory containing release
sub-directories.
:param os_release (str): OpenStack release codename to construct template
loader.
:returns: jinja2.ChoiceLoader constructed with a list of
jinja2.FilesystemLoaders, ordered in descending
order by OpenStack release.
"""
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
for rel in OPENSTACK_CODENAMES.itervalues()]
@ -111,7 +111,8 @@ class OSConfigRenderer(object):
and ease the burden of managing config templates across multiple OpenStack
releases.
Basic usage:
Basic usage::
# import some common context generates from charmhelpers
from charmhelpers.contrib.openstack import context
@ -131,21 +132,19 @@ class OSConfigRenderer(object):
# write out all registered configs
configs.write_all()
Details:
**OpenStack Releases and template loading**
OpenStack Releases and template loading
---------------------------------------
When the object is instantiated, it is associated with a specific OS
release. This dictates how the template loader will be constructed.
The constructed loader attempts to load the template from several places
in the following order:
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
- from the most recent OS release-specific template dir (if one exists)
- the base templates_dir
- a template directory shipped in the charm with this helper file.
For the example above, '/tmp/templates' contains the following structure::
For the example above, '/tmp/templates' contains the following structure:
/tmp/templates/nova.conf
/tmp/templates/api-paste.ini
/tmp/templates/grizzly/api-paste.ini
@ -169,8 +168,8 @@ class OSConfigRenderer(object):
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
us to ship common templates (haproxy, apache) with the helpers.
Context generators
---------------------------------------
**Context generators**
Context generators are used to generate template contexts during hook
execution. Doing so may require inspecting service relations, charm
config, etc. When registered, a config file is associated with a list

View File

@ -3,7 +3,6 @@
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
import apt_pkg as apt
import subprocess
import os
import socket
@ -85,6 +84,8 @@ def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
rel = ''
if src is None:
return rel
if src in ['distro', 'distro-proposed']:
try:
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@ -132,6 +133,7 @@ def get_os_version_codename(codename):
def get_os_codename_package(package, fatal=True):
'''Derive OpenStack release codename from an installed package.'''
import apt_pkg as apt
apt.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
@ -189,7 +191,7 @@ def get_os_version_package(pkg, fatal=True):
for version, cname in vers_map.iteritems():
if cname == codename:
return version
#e = "Could not determine OpenStack version for package: %s" % pkg
# e = "Could not determine OpenStack version for package: %s" % pkg
# error_out(e)
@ -325,6 +327,7 @@ def openstack_upgrade_available(package):
"""
import apt_pkg as apt
src = config('openstack-origin')
cur_vers = get_os_version_package(package)
available_vers = get_os_version_install_source(src)

View File

@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
blk_device, fstype, system_services=[]):
"""
NOTE: This function must only be called from a single service unit for
the same rbd_img otherwise data loss will occur.
the same rbd_img otherwise data loss will occur.
Ensures given pool and RBD image exists, is mapped to a block device,
and the device is formatted and mounted at the given mount_point.

View File

@ -37,6 +37,7 @@ def zap_disk(block_device):
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False
if it isn't.

View File

@ -0,0 +1,116 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
import os
class Fstab(file):
"""This class extends file in order to implement a file reader/writer
for file `/etc/fstab`
"""
class Entry(object):
"""Entry class represents a non-comment line on the `/etc/fstab` file
"""
def __init__(self, device, mountpoint, filesystem,
options, d=0, p=0):
self.device = device
self.mountpoint = mountpoint
self.filesystem = filesystem
if not options:
options = "defaults"
self.options = options
self.d = d
self.p = p
def __eq__(self, o):
return str(self) == str(o)
def __str__(self):
return "{} {} {} {} {} {}".format(self.device,
self.mountpoint,
self.filesystem,
self.options,
self.d,
self.p)
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
def __init__(self, path=None):
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
file.__init__(self, self._path, 'r+')
def _hydrate_entry(self, line):
# NOTE: use split with no arguments to split on any
# whitespace including tabs
return Fstab.Entry(*filter(
lambda x: x not in ('', None),
line.strip("\n").split()))
@property
def entries(self):
self.seek(0)
for line in self.readlines():
try:
if not line.startswith("#"):
yield self._hydrate_entry(line)
except ValueError:
pass
def get_entry_by_attr(self, attr, value):
for entry in self.entries:
e_attr = getattr(entry, attr)
if e_attr == value:
return entry
return None
def add_entry(self, entry):
if self.get_entry_by_attr('device', entry.device):
return False
self.write(str(entry) + '\n')
self.truncate()
return entry
def remove_entry(self, entry):
self.seek(0)
lines = self.readlines()
found = False
for index, line in enumerate(lines):
if not line.startswith("#"):
if self._hydrate_entry(line) == entry:
found = True
break
if not found:
return False
lines.remove(line)
self.seek(0)
self.write(''.join(lines))
self.truncate()
return True
@classmethod
def remove_by_mountpoint(cls, mountpoint, path=None):
fstab = cls(path=path)
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
if entry:
return fstab.remove_entry(entry)
return False
@classmethod
def add(cls, device, mountpoint, filesystem, options=None, path=None):
return cls(path=path).add_entry(Fstab.Entry(device,
mountpoint, filesystem,
options=options))

View File

@ -25,7 +25,7 @@ cache = {}
def cached(func):
"""Cache return values for multiple executions of func + args
For example:
For example::
@cached
def unit_get(attribute):
@ -445,18 +445,19 @@ class UnregisteredHookError(Exception):
class Hooks(object):
"""A convenient handler for hook functions.
Example:
Example::
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
...
pass # your code here
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
...
pass # your code here
if __name__ == "__main__":
# execute a hook based on the name the program is called by

View File

@ -12,11 +12,11 @@ import random
import string
import subprocess
import hashlib
import apt_pkg
from collections import OrderedDict
from hookenv import log
from fstab import Fstab
def service_start(service_name):
@ -35,7 +35,8 @@ def service_restart(service_name):
def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if reload fails"""
"""Reload a system service, optionally falling back to restart if
reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
@ -144,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444):
target.write(content)
def mount(device, mountpoint, options=None, persist=False):
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab
"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file
"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
@ -155,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False):
except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return fstab_add(device, mountpoint, filesystem, options=options)
return True
@ -169,9 +182,9 @@ def umount(mountpoint, persist=False):
except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
# TODO: update fstab
pass
return fstab_remove(mountpoint)
return True
@ -198,13 +211,13 @@ def file_hash(path):
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
This function is used a decorator, for example
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
})
def ceph_client_changed():
...
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
@ -300,10 +313,13 @@ def get_nic_hwaddr(nic):
def cmp_pkgrevno(package, revno, pkgcache=None):
'''Compare supplied revno with the revno of the installed package
1 => Installed revno is greater than supplied arg
0 => Installed revno is the same as supplied arg
-1 => Installed revno is less than supplied arg
* 1 => Installed revno is greater than supplied arg
* 0 => Installed revno is the same as supplied arg
* -1 => Installed revno is less than supplied arg
'''
import apt_pkg
if not pkgcache:
apt_pkg.init()
pkgcache = apt_pkg.Cache()

View File

@ -13,7 +13,6 @@ from charmhelpers.core.hookenv import (
config,
log,
)
import apt_pkg
import os
@ -117,6 +116,7 @@ class BaseFetchHandler(object):
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
import apt_pkg
apt_pkg.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
@ -235,31 +235,39 @@ def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string.
Example config:
install_sources:
install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys:
install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load(config(sources_var))
keys = config(keys_var)
if keys is not None:
keys = safe_load(keys)
if isinstance(sources, basestring) and (
keys is None or isinstance(keys, basestring)):
add_source(sources, keys)
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if not len(sources) == len(keys):
msg = 'Install sources and keys lists are different lengths'
raise SourceConfigError(msg)
for src_num in range(len(sources)):
add_source(sources[src_num], keys[src_num])
if isinstance(keys, basestring):
keys = [keys]
if len(sources) != len(keys):
raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update:
apt_update(fatal=True)

View File

@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler):
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try: