Fix alphanumeric comparisons for openstack and ubuntu releases

- sync charmhelpers with fix-alpha helpers
- fix up code where the alpha comparisons are done
- fix tests which assumed mocks would just work on os_release()

Change-Id: I9c0dae8a37ded6536dc2b0c53e06647f380c48b0
Related-Bug: #1659575
This commit is contained in:
Alex Kavanagh 2017-04-03 13:52:53 +01:00
parent 1a2c2c74a4
commit 442ac04b4b
55 changed files with 6219 additions and 109 deletions

View File

@ -1,6 +1,13 @@
branch: lp:charm-helpers
destination: tests/charmhelpers
include:
- fetch
- core
- contrib.amulet
- contrib.openstack.amulet
- core
- contrib.openstack.utils
- contrib.openstack.exceptions
- contrib.network.ip
- contrib.storage|inc=*
- contrib.python|inc=*
- osplatform

View File

@ -373,7 +373,7 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-s /etc/init.d/%s status' % svc
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')

View File

@ -26,6 +26,7 @@ from charmhelpers.contrib.hardening.audits.file import (
DirectoryPermissionAudit,
NoReadWriteForOther,
TemplatedFile,
DeletedFile
)
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
@ -52,13 +53,13 @@ def get_audits():
'mods-available/alias.conf'),
context,
TEMPLATES_DIR,
mode=0o0755,
mode=0o0640,
user='root',
service_actions=[{'service': 'apache2',
'actions': ['restart']}]),
TemplatedFile(os.path.join(settings['common']['apache_dir'],
'conf-enabled/hardening.conf'),
'conf-enabled/99-hardening.conf'),
context,
TEMPLATES_DIR,
mode=0o0640,
@ -69,11 +70,13 @@ def get_audits():
DirectoryPermissionAudit(settings['common']['apache_dir'],
user='root',
group='root',
mode=0o640),
mode=0o0750),
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
NoReadWriteForOther(settings['common']['apache_dir']),
DeletedFile(['/var/www/html/index.html'])
]
return audits
@ -94,5 +97,4 @@ class ApacheConfContext(object):
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
out).group(1)
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
ctxt['traceenable'] = settings['hardening']['traceenable']
return ctxt

View File

@ -15,4 +15,18 @@
</LimitExcept>
</Location>
<Directory />
Options -Indexes -FollowSymLinks
AllowOverride None
</Directory>
<Directory /var/www/>
Options -Indexes -FollowSymLinks
AllowOverride None
</Directory>
TraceEnable {{ traceenable }}
ServerTokens {{ servertokens }}
SSLHonorCipherOrder {{ honor_cipher_order }}
SSLCipherSuite {{ cipher_suite }}

View File

@ -49,13 +49,6 @@ class BaseAudit(object): # NO-QA
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
results = self.unless()
if results:
return False
else:
return True
return not self.unless()
if self.unless:
return False
else:
return True
return not self.unless

View File

@ -11,3 +11,6 @@ hardening:
traceenable: 'off'
allowed_http_methods: "GET POST"
modules_to_disable: [ cgi, cgid ]
servertokens: 'Prod'
honor_cipher_order: 'on'
cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'

View File

@ -7,3 +7,6 @@ common:
hardening:
allowed_http_methods:
modules_to_disable:
servertokens:
honor_cipher_order:
cipher_suite:

View File

@ -58,6 +58,7 @@ security:
rsync
kernel_enable_module_loading: True # (type:boolean)
kernel_enable_core_dump: False # (type:boolean)
ssh_tmout: 300
sysctl:
kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128

View File

@ -34,6 +34,7 @@ security:
packages_list:
kernel_enable_module_loading:
kernel_enable_core_dump:
ssh_tmout:
sysctl:
kernel_secure_sysrq:
kernel_enable_sysrq:

View File

@ -25,7 +25,6 @@ def get_audits():
audits = []
settings = utils.get_settings('os')
# If core dumps are not enabled, then don't allow core dumps to be
# created as they may contain sensitive information.
if not settings['security']['kernel_enable_core_dump']:
@ -33,11 +32,18 @@ def get_audits():
ProfileContext(),
template_dir=TEMPLATES_DIR,
mode=0o0755, user='root', group='root'))
if settings['security']['ssh_tmout']:
audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh',
ProfileContext(),
template_dir=TEMPLATES_DIR,
mode=0o0644, user='root', group='root'))
return audits
class ProfileContext(object):
def __call__(self):
ctxt = {}
settings = utils.get_settings('os')
ctxt = {'ssh_tmout':
settings['security']['ssh_tmout']}
return ctxt

View File

@ -0,0 +1,5 @@
TMOUT={{ tmout }}
readonly TMOUT
export TMOUT
readonly HISTFILE

View File

@ -27,7 +27,10 @@ from charmhelpers.fetch import (
apt_install,
apt_update,
)
from charmhelpers.core.host import lsb_release
from charmhelpers.core.host import (
lsb_release,
CompareHostReleases,
)
from charmhelpers.contrib.hardening.audits.file import (
TemplatedFile,
FileContentAudit,
@ -68,7 +71,8 @@ class SSHConfigContext(object):
'weak': default + ',hmac-sha1'}
# Use newer ciphers on Ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) >= 'trusty':
log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
macs = macs_66
@ -96,7 +100,8 @@ class SSHConfigContext(object):
'weak': weak}
# Use newer kex on Ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new key exchange '
'algorithms', level=DEBUG)
kex = kex_66
@ -119,7 +124,8 @@ class SSHConfigContext(object):
'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
# Use newer ciphers on ubuntu Trusty and above
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) >= 'trusty':
log('Detected Ubuntu 14.04 or newer, using new ciphers',
level=DEBUG)
cipher = ciphers_66
@ -291,7 +297,8 @@ class SSHConfigFileContentAudit(FileContentAudit):
self.fail_cases = []
settings = utils.get_settings('ssh')
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:
@ -364,7 +371,8 @@ class SSHDConfigFileContentAudit(FileContentAudit):
self.fail_cases = []
settings = utils.get_settings('ssh')
if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) >= 'trusty':
if not settings['server']['weak_hmac']:
self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
else:

View File

@ -31,6 +31,7 @@ from charmhelpers.core.hookenv import (
from charmhelpers.core.host import (
lsb_release,
CompareHostReleases,
)
try:
@ -67,6 +68,24 @@ def no_ip_found_error_out(network):
raise ValueError(errmsg)
def _get_ipv6_network_from_address(address):
"""Get an netaddr.IPNetwork for the given IPv6 address
:param address: a dict as returned by netifaces.ifaddresses
:returns netaddr.IPNetwork: None if the address is a link local or loopback
address
"""
if address['addr'].startswith('fe80') or address['addr'] == "::1":
return None
prefix = address['netmask'].split("/")
if len(prefix) > 1:
netmask = prefix[1]
else:
netmask = address['netmask']
return netaddr.IPNetwork("%s/%s" % (address['addr'],
netmask))
def get_address_in_network(network, fallback=None, fatal=False):
"""Get an IPv4 or IPv6 address within the network from the host.
@ -92,19 +111,17 @@ def get_address_in_network(network, fallback=None, fatal=False):
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
if cidr in network:
return str(cidr.ip)
for addr in addresses[netifaces.AF_INET]:
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
cidr = _get_ipv6_network_from_address(addr)
if cidr and cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback
@ -180,18 +197,18 @@ def _get_for_address(address, key):
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
if not addr['addr'].startswith('fe80'):
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
elif key == 'netmask' and cidr:
return str(cidr).split('/')[1]
else:
return addr[key]
network = _get_ipv6_network_from_address(addr)
if not network:
continue
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
elif key == 'netmask' and cidr:
return str(cidr).split('/')[1]
else:
return addr[key]
return None
@ -521,7 +538,8 @@ def port_has_listener(address, port):
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(release) < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")

View File

@ -40,6 +40,7 @@ from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@ -1255,7 +1256,7 @@ class OpenStackAmuletUtils(AmuletUtils):
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
fatal=True)
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
if ubuntu_release <= 'trusty':
if CompareHostReleases(ubuntu_release) <= 'trusty':
memcache_listen_addr = 'ip6-localhost'
else:
memcache_listen_addr = '::1'

View File

@ -59,6 +59,8 @@ from charmhelpers.core.host import (
write_file,
pwgen,
lsb_release,
CompareHostReleases,
is_container,
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
@ -155,7 +157,8 @@ class OSContextGenerator(object):
if self.missing_data:
self.complete = False
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
log('Missing required data: %s' % ' '.join(self.missing_data),
level=INFO)
else:
self.complete = True
return self.complete
@ -213,8 +216,9 @@ class SharedDBContext(OSContextGenerator):
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
access_hostname = get_address_in_network(access_network,
unit_get('private-address'))
access_hostname = get_address_in_network(
access_network,
unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
@ -308,7 +312,10 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator):
def __init__(self, service=None, service_user=None, rel_name='identity-service'):
def __init__(self,
service=None,
service_user=None,
rel_name='identity-service'):
self.service = service
self.service_user = service_user
self.rel_name = rel_name
@ -457,19 +464,17 @@ class AMQPContext(OSContextGenerator):
host = format_ipv6_addr(host) or host
rabbitmq_hosts.append(host)
ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
rabbitmq_hosts = sorted(rabbitmq_hosts)
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
transport_hosts = rabbitmq_hosts
if transport_hosts:
transport_url_hosts = ''
for host in transport_hosts:
if transport_url_hosts:
format_string = ",{}:{}@{}:{}"
else:
format_string = "{}:{}@{}:{}"
transport_url_hosts += format_string.format(
ctxt['rabbitmq_user'], ctxt['rabbitmq_password'],
host, rabbitmq_port)
transport_url_hosts = ','.join([
"{}:{}@{}:{}".format(ctxt['rabbitmq_user'],
ctxt['rabbitmq_password'],
host_,
rabbitmq_port)
for host_ in transport_hosts])
ctxt['transport_url'] = "rabbit://{}/{}".format(
transport_url_hosts, vhost)
@ -1217,6 +1222,10 @@ class BindHostContext(OSContextGenerator):
return {'bind_host': '0.0.0.0'}
MAX_DEFAULT_WORKERS = 4
DEFAULT_MULTIPLIER = 2
class WorkerConfigContext(OSContextGenerator):
@property
@ -1228,10 +1237,19 @@ class WorkerConfigContext(OSContextGenerator):
return psutil.NUM_CPUS
def __call__(self):
multiplier = config('worker-multiplier') or 0
multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER
count = int(self.num_cpus * multiplier)
if multiplier > 0 and count == 0:
count = 1
if config('worker-multiplier') is None and is_container():
# NOTE(jamespage): Limit unconfigured worker-multiplier
# to MAX_DEFAULT_WORKERS to avoid insane
# worker configuration in LXD containers
# on large servers
# Reference: https://pad.lv/1665270
count = min(count, MAX_DEFAULT_WORKERS)
ctxt = {"workers": count}
return ctxt
@ -1601,7 +1619,8 @@ class MemcacheContext(OSContextGenerator):
if ctxt['use_memcache']:
# Trusty version of memcached does not support ::1 as a listen
# address so use host file entry instead
if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty':
release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(release) > 'trusty':
ctxt['memcache_server'] = '::1'
else:
ctxt['memcache_server'] = 'ip6-localhost'

View File

@ -23,7 +23,10 @@ from charmhelpers.core.hookenv import (
ERROR,
)
from charmhelpers.contrib.openstack.utils import os_release
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
def headers_package():
@ -198,7 +201,8 @@ def neutron_plugins():
},
'plumgrid': {
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
'contexts': [
context.SharedDBContext(user=config('database-user'),
database=config('database'),
@ -225,7 +229,7 @@ def neutron_plugins():
'server_services': ['neutron-server']
}
}
if release >= 'icehouse':
if CompareOpenStackReleases(release) >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
@ -233,10 +237,10 @@ def neutron_plugins():
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
if release >= 'kilo':
if CompareOpenStackReleases(release) >= 'kilo':
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
if release >= 'liberty':
if CompareOpenStackReleases(release) >= 'liberty':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['server_packages'].remove(
@ -244,10 +248,11 @@ def neutron_plugins():
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
'networking_plumgrid.neutron.plugins'
'.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
if release >= 'mitaka':
if CompareOpenStackReleases(release) >= 'mitaka':
plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
plugins['nsx']['server_packages'].append('python-vmware-nsx')
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'

View File

@ -5,6 +5,8 @@ global
user haproxy
group haproxy
spread-checks 0
stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats timeout 2m
defaults
log global
@ -58,6 +60,15 @@ frontend tcp-in_{{ service }}
{% for frontend in frontends -%}
backend {{ service }}_{{ frontend }}
balance leastconn
{% if backend_options -%}
{% if backend_options[service] -%}
{% for option in backend_options[service] -%}
{% for key, value in option.items() -%}
{{ key }} {{ value }}
{% endfor -%}
{% endfor -%}
{% endif -%}
{% endif -%}
{% for unit, address in frontends[frontend]['backends'].items() -%}
server {{ unit }} {{ address }}:{{ ports[1] }} check
{% endfor %}

View File

@ -33,9 +33,7 @@ import yaml
from charmhelpers.contrib.network import ip
from charmhelpers.core import (
unitdata,
)
from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import (
action_fail,
@ -55,6 +53,8 @@ from charmhelpers.core.hookenv import (
application_version_set,
)
from charmhelpers.core.strutils import BasicStringComparator
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
@ -97,6 +97,22 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
@ -238,6 +254,17 @@ GIT_DEFAULT_BRANCHES = {
DEFAULT_LOOPBACK_SIZE = '5G'
class CompareOpenStackReleases(BasicStringComparator):
"""Provide comparisons of OpenStack releases.
Use in the form of
if CompareOpenStackReleases(release) > 'mitaka':
# do something with mitaka
"""
_list = OPENSTACK_RELEASES
def error_out(msg):
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
sys.exit(1)
@ -1066,7 +1093,8 @@ def git_generate_systemd_init_files(templates_dir):
shutil.copyfile(init_in_source, init_source)
with open(init_source, 'a') as outfile:
template = '/usr/share/openstack-pkg-tools/init-script-template'
template = ('/usr/share/openstack-pkg-tools/'
'init-script-template')
with open(template) as infile:
outfile.write('\n\n{}'.format(infile.read()))
@ -1971,9 +1999,7 @@ def enable_memcache(source=None, release=None, package=None):
if not _release:
_release = get_os_codename_install_source(source)
# TODO: this should be changed to a numeric comparison using a known list
# of releases and comparing by index.
return _release >= 'mitaka'
return CompareOpenStackReleases(_release) >= 'mitaka'
def token_cache_pkgs(source=None, release=None):

View File

@ -987,18 +987,20 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
service_start(svc)
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
def ensure_ceph_keyring(service, user=None, group=None,
relation='ceph', key=None):
"""Ensures a ceph keyring is created for a named service and optionally
ensures user and group ownership.
Returns False if no ceph key is available in relation state.
@returns boolean: Flag to indicate whether a key was successfully written
to disk based on either relation data or a supplied key
"""
key = None
for rid in relation_ids(relation):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
for rid in relation_ids(relation):
for unit in related_units(rid):
key = relation_get('key', rid=rid, unit=unit)
if key:
break
if not key:
return False

View File

@ -45,6 +45,7 @@ if __platform__ == "ubuntu":
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
@ -52,6 +53,7 @@ elif __platform__ == "centos":
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'

View File

@ -2,6 +2,22 @@ import subprocess
import yum
import os
from charmhelpers.core.strutils import BasicStringComparator
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Host releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
def __init__(self, item):
raise NotImplementedError(
"CompareHostReleases() is not implemented for CentOS")
def service_available(service_name):
# """Determine whether a system service is available."""

View File

@ -1,5 +1,37 @@
import subprocess
from charmhelpers.core.strutils import BasicStringComparator
UBUNTU_RELEASES = (
'lucid',
'maverick',
'natty',
'oneiric',
'precise',
'quantal',
'raring',
'saucy',
'trusty',
'utopic',
'vivid',
'wily',
'xenial',
'yakkety',
'zesty',
)
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Ubuntu releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
_list = UBUNTU_RELEASES
def service_available(service_name):
"""Determine whether a system service is available"""

View File

@ -68,3 +68,56 @@ def bytes_from_string(value):
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
class BasicStringComparator(object):
"""Provides a class that will compare strings from an iterator type object.
Used to provide > and < comparisons on strings that may not necessarily be
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
z-wrap.
"""
_list = None
def __init__(self, item):
if self._list is None:
raise Exception("Must define the _list in the class definition!")
try:
self.index = self._list.index(item)
except Exception:
raise KeyError("Item '{}' is not in list '{}'"
.format(item, self._list))
def __eq__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index == self._list.index(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index < self._list.index(other)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index > self._list.index(other)
def __le__(self, other):
return not self.__gt__(other)
def __str__(self):
"""Always give back the item at the index so it can be used in
comparisons like:
s_mitaka = CompareOpenStack('mitaka')
s_newton = CompareOpenstack('newton')
assert s_newton > s_mitaka
@returns: <string>
"""
return self._list[self.index]

View File

@ -14,6 +14,7 @@ from charmhelpers.core.host import (
service_restart,
write_file,
init_is_systemd,
CompareHostReleases,
)
from charmhelpers.core.hookenv import (
charm_dir,
@ -57,6 +58,7 @@ from charmhelpers.contrib.openstack.utils import (
reset_os_release,
resume_unit,
os_application_version_set,
CompareOpenStackReleases,
)
from charmhelpers.contrib.openstack.neutron import (
@ -272,27 +274,27 @@ def get_packages():
'''Return a list of packages for install based on the configured plugin'''
plugin = config('plugin')
packages = deepcopy(GATEWAY_PKGS[plugin])
source = os_release('neutron-common')
cmp_os_source = CompareOpenStackReleases(os_release('neutron-common'))
cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
if plugin == OVS:
if (source >= 'icehouse' and
lsb_release()['DISTRIB_CODENAME'] < 'utopic'):
if cmp_os_source >= 'icehouse' and cmp_host_release < 'utopic':
# NOTE(jamespage) neutron-vpn-agent supercedes l3-agent for
# icehouse but openswan was removed in utopic.
packages.remove('neutron-l3-agent')
packages.append('neutron-vpn-agent')
packages.append('openswan')
if source >= 'liberty':
if cmp_os_source >= 'liberty':
# Switch out mysql driver
packages.remove('python-mysqldb')
packages.append('python-pymysql')
if source >= 'mitaka':
if cmp_os_source >= 'mitaka':
# Switch out to actual ovs agent package
packages.remove('neutron-plugin-openvswitch-agent')
packages.append('neutron-openvswitch-agent')
if source >= 'kilo':
if cmp_os_source >= 'kilo':
packages.append('python-neutron-fwaas')
if plugin in (OVS, OVS_ODL):
if source >= 'newton':
if cmp_os_source >= 'newton':
# LBaaS v1 dropped in newton
packages.remove('neutron-lbaas-agent')
packages.append('neutron-lbaasv2-agent')
@ -622,19 +624,20 @@ def resolve_config_files(plugin, release):
'''
config_files = deepcopy(CONFIG_FILES)
drop_config = []
cmp_os_release = CompareOpenStackReleases(release)
if plugin == OVS:
# NOTE: deal with switch to ML2 plugin for >= icehouse
drop_config = [NEUTRON_OVS_AGENT_CONF]
if release >= 'mitaka':
if cmp_os_release >= 'mitaka':
# ml2 -> ovs_agent
drop_config = [NEUTRON_ML2_PLUGIN_CONF]
# Use MAAS1.9 for MTU and external port config on xenial and above
if lsb_release()['DISTRIB_CODENAME'] >= 'xenial':
if CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'xenial':
drop_config.extend([EXT_PORT_CONF, PHY_NIC_MTU_CONF])
# Rename to lbaasv2 in newton
if os_release('neutron-common') < 'newton':
if cmp_os_release < 'newton':
drop_config.extend([NEUTRON_LBAASV2_AA_PROFILE_PATH])
else:
drop_config.extend([NEUTRON_LBAAS_AA_PROFILE_PATH])
@ -703,7 +706,8 @@ def restart_map():
svcs.remove('neutron-vpn-agent')
if 'neutron-vpn-agent' in svcs and 'neutron-l3-agent' in svcs:
svcs.remove('neutron-l3-agent')
if release >= 'newton' and 'neutron-lbaas-agent' in svcs:
if (CompareOpenStackReleases(release) >= 'newton' and
'neutron-lbaas-agent' in svcs):
svcs.remove('neutron-lbaas-agent')
svcs.add('neutron-lbaasv2-agent')
if svcs:
@ -1095,8 +1099,10 @@ def git_post_install(projects_yaml):
'/etc/cron.d/neutron-lbaas-agent-netns-cleanup', {}, perms=0o755)
bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
cmp_host_release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
# Use systemd init units/scripts from ubuntu wily onward
if lsb_release()['DISTRIB_RELEASE'] >= '15.10':
if cmp_host_release >= 'wily':
templates_dir = os.path.join(charm_dir(), 'templates/git')
daemons = ['neutron-dhcp-agent', 'neutron-l3-agent',
'neutron-lbaasv2-agent',
@ -1106,7 +1112,7 @@ def git_post_install(projects_yaml):
'neutron-ovs-cleanup', 'neutron-server',
'neutron-sriov-nic-agent', 'neutron-vpn-agent',
'nova-api-metadata']
if os_release('neutron-common') <= 'mitaka':
if cmp_os_release <= 'mitaka':
daemons.append('neutron-lbaas-agent')
for daemon in daemons:
neutron_context = {
@ -1116,7 +1122,7 @@ def git_post_install(projects_yaml):
if daemon == 'neutron-sriov-nic-agent':
filename = 'neutron-sriov-agent'
elif daemon == 'neutron-openvswitch-agent':
if os_release('neutron-common') < 'mitaka':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
template_file = 'git/{}.init.in.template'.format(filename)
init_in_file = '{}.init.in'.format(filename)
@ -1127,7 +1133,7 @@ def git_post_install(projects_yaml):
for daemon in daemons:
filename = daemon
if daemon == 'neutron-openvswitch-agent':
if os_release('neutron-common') < 'mitaka':
if cmp_os_release < 'mitaka':
filename = 'neutron-plugin-openvswitch-agent'
service('enable', filename)
else:
@ -1393,7 +1399,7 @@ def git_post_install(projects_yaml):
'/etc/init/neutron-plugin-openflow-agent.conf',
neutron_plugin_openflow_context, perms=0o644,
templates_dir=templates_dir)
if os_release('neutron-common') < 'mitaka':
if cmp_os_release < 'mitaka':
render('git.upstart',
'/etc/init/neutron-plugin-openvswitch-agent.conf',
neutron_plugin_openvswitch_context, perms=0o644,

View File

@ -17,6 +17,8 @@ from charmhelpers.contrib.openstack.amulet.utils import (
# ERROR
)
from charmhelpers.contrib.openstack.utils import CompareOpenStackReleases
# Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG)
@ -251,7 +253,8 @@ class NeutronGatewayBasicDeployment(OpenStackAmuletDeployment):
'nova-scheduler',
'nova-conductor']
if self._get_openstack_release_string() >= 'liberty':
_os_release = self._get_openstack_release_string()
if CompareOpenStackReleases(_os_release) >= 'liberty':
nova_cc_services.remove('nova-api-ec2')
nova_cc_services.remove('nova-objectstore')

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,574 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import re
import subprocess
import six
import socket
from functools import partial
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import (
config,
log,
network_get_primary_address,
unit_get,
WARNING,
)
from charmhelpers.core.host import (
lsb_release,
CompareHostReleases,
)
try:
import netifaces
except ImportError:
apt_update(fatal=True)
if six.PY2:
apt_install('python-netifaces', fatal=True)
else:
apt_install('python3-netifaces', fatal=True)
import netifaces
try:
import netaddr
except ImportError:
apt_update(fatal=True)
if six.PY2:
apt_install('python-netaddr', fatal=True)
else:
apt_install('python3-netaddr', fatal=True)
import netaddr
def _validate_cidr(network):
try:
netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
def no_ip_found_error_out(network):
errmsg = ("No IP address found in network(s): %s" % network)
raise ValueError(errmsg)
def _get_ipv6_network_from_address(address):
"""Get an netaddr.IPNetwork for the given IPv6 address
:param address: a dict as returned by netifaces.ifaddresses
:returns netaddr.IPNetwork: None if the address is a link local or loopback
address
"""
if address['addr'].startswith('fe80') or address['addr'] == "::1":
return None
prefix = address['netmask'].split("/")
if len(prefix) > 1:
netmask = prefix[1]
else:
netmask = address['netmask']
return netaddr.IPNetwork("%s/%s" % (address['addr'],
netmask))
def get_address_in_network(network, fallback=None, fatal=False):
"""Get an IPv4 or IPv6 address within the network from the host.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
:param fallback (str): If no address is found, return fallback.
:param fatal (boolean): If no address is found, fallback is not
set and fatal is True then exit(1).
"""
if network is None:
if fallback is not None:
return fallback
if fatal:
no_ip_found_error_out(network)
else:
return None
networks = network.split() or [network]
for network in networks:
_validate_cidr(network)
network = netaddr.IPNetwork(network)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if network.version == 4 and netifaces.AF_INET in addresses:
for addr in addresses[netifaces.AF_INET]:
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
addr['netmask']))
if cidr in network:
return str(cidr.ip)
if network.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
cidr = _get_ipv6_network_from_address(addr)
if cidr and cidr in network:
return str(cidr.ip)
if fallback is not None:
return fallback
if fatal:
no_ip_found_error_out(network)
return None
def is_ipv6(address):
"""Determine whether provided address is IPv6 or not."""
try:
address = netaddr.IPAddress(address)
except netaddr.AddrFormatError:
# probably a hostname - so not an address at all!
return False
return address.version == 6
def is_address_in_network(network, address):
"""
Determine whether the provided address is within a network range.
:param network (str): CIDR presentation format. For example,
'192.168.1.0/24'.
:param address: An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:returns boolean: Flag indicating whether address is in network.
"""
try:
network = netaddr.IPNetwork(network)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Network (%s) is not in CIDR presentation format" %
network)
try:
address = netaddr.IPAddress(address)
except (netaddr.core.AddrFormatError, ValueError):
raise ValueError("Address (%s) is not in correct presentation format" %
address)
if address in network:
return True
else:
return False
def _get_for_address(address, key):
"""Retrieve an attribute of or the physical interface that
the IP address provided could be bound to.
:param address (str): An individual IPv4 or IPv6 address without a net
mask or subnet prefix. For example, '192.168.1.1'.
:param key: 'iface' for the physical interface name or an attribute
of the configured interface, for example 'netmask'.
:returns str: Requested attribute or None if address is not bindable.
"""
address = netaddr.IPAddress(address)
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
if address.version == 4 and netifaces.AF_INET in addresses:
addr = addresses[netifaces.AF_INET][0]['addr']
netmask = addresses[netifaces.AF_INET][0]['netmask']
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
else:
return addresses[netifaces.AF_INET][0][key]
if address.version == 6 and netifaces.AF_INET6 in addresses:
for addr in addresses[netifaces.AF_INET6]:
network = _get_ipv6_network_from_address(addr)
if not network:
continue
cidr = network.cidr
if address in cidr:
if key == 'iface':
return iface
elif key == 'netmask' and cidr:
return str(cidr).split('/')[1]
else:
return addr[key]
return None
get_iface_for_address = partial(_get_for_address, key='iface')
get_netmask_for_address = partial(_get_for_address, key='netmask')
def resolve_network_cidr(ip_address):
'''
Resolves the full address cidr of an ip_address based on
configured network interfaces
'''
netmask = get_netmask_for_address(ip_address)
return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
def format_ipv6_addr(address):
"""If address is IPv6, wrap it in '[]' otherwise return None.
This is required by most configuration files when specifying IPv6
addresses.
"""
if is_ipv6(address):
return "[%s]" % address
return None
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
fatal=True, exc_list=None):
"""Return the assigned IP address for a given interface, if any.
:param iface: network interface on which address(es) are expected to
be found.
:param inet_type: inet address family
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:return: list of ip addresses
"""
# Extract nic if passed /dev/ethX
if '/' in iface:
iface = iface.split('/')[-1]
if not exc_list:
exc_list = []
try:
inet_num = getattr(netifaces, inet_type)
except AttributeError:
raise Exception("Unknown inet type '%s'" % str(inet_type))
interfaces = netifaces.interfaces()
if inc_aliases:
ifaces = []
for _iface in interfaces:
if iface == _iface or _iface.split(':')[0] == iface:
ifaces.append(_iface)
if fatal and not ifaces:
raise Exception("Invalid interface '%s'" % iface)
ifaces.sort()
else:
if iface not in interfaces:
if fatal:
raise Exception("Interface '%s' not found " % (iface))
else:
return []
else:
ifaces = [iface]
addresses = []
for netiface in ifaces:
net_info = netifaces.ifaddresses(netiface)
if inet_num in net_info:
for entry in net_info[inet_num]:
if 'addr' in entry and entry['addr'] not in exc_list:
addresses.append(entry['addr'])
if fatal and not addresses:
raise Exception("Interface '%s' doesn't have any %s addresses." %
(iface, inet_type))
return sorted(addresses)
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
def get_iface_from_addr(addr):
"""Work out on which interface the provided address is configured."""
for iface in netifaces.interfaces():
addresses = netifaces.ifaddresses(iface)
for inet_type in addresses:
for _addr in addresses[inet_type]:
_addr = _addr['addr']
# link local
ll_key = re.compile("(.+)%.*")
raw = re.match(ll_key, _addr)
if raw:
_addr = raw.group(1)
if _addr == addr:
log("Address '%s' is configured on iface '%s'" %
(addr, iface))
return iface
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
raise Exception(msg)
def sniff_iface(f):
"""Ensure decorated function is called with a value for iface.
If no iface provided, inject net iface inferred from unit private address.
"""
def iface_sniffer(*args, **kwargs):
if not kwargs.get('iface', None):
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
return f(*args, **kwargs)
return iface_sniffer
@sniff_iface
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
dynamic_only=True):
"""Get assigned IPv6 address for a given interface.
Returns list of addresses found. If no address found, returns empty list.
If iface is None, we infer the current primary interface by doing a reverse
lookup on the unit private-address.
We currently only support scope global IPv6 addresses i.e. non-temporary
addresses. If no global IPv6 address is found, return the first one found
in the ipv6 address list.
:param iface: network interface on which ipv6 address(es) are expected to
be found.
:param inc_aliases: include alias interfaces in search
:param fatal: if True, raise exception if address not found
:param exc_list: list of addresses to ignore
:param dynamic_only: only recognise dynamic addresses
:return: list of ipv6 addresses
"""
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
inc_aliases=inc_aliases, fatal=fatal,
exc_list=exc_list)
if addresses:
global_addrs = []
for addr in addresses:
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
m = re.match(key_scope_link_local, addr)
if m:
eui_64_mac = m.group(1)
iface = m.group(2)
else:
global_addrs.append(addr)
if global_addrs:
# Make sure any found global addresses are not temporary
cmd = ['ip', 'addr', 'show', iface]
out = subprocess.check_output(cmd).decode('UTF-8')
if dynamic_only:
key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
else:
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
addrs = []
for line in out.split('\n'):
line = line.strip()
m = re.match(key, line)
if m and 'temporary' not in line:
# Return the first valid address we find
for addr in global_addrs:
if m.group(1) == addr:
if not dynamic_only or \
m.group(1).endswith(eui_64_mac):
addrs.append(addr)
if addrs:
return addrs
if fatal:
raise Exception("Interface '%s' does not have a scope global "
"non-temporary ipv6 address." % iface)
return []
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
"""Return a list of bridges on the system."""
b_regex = "%s/*/bridge" % vnic_dir
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
"""Return a list of nics comprising a given bridge on the system."""
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
def is_bridge_member(nic):
"""Check if a given nic is a member of a bridge."""
for bridge in get_bridges():
if nic in get_bridge_nics(bridge):
return True
return False
def is_ip(address):
"""
Returns True if address is a valid IP address.
"""
try:
# Test to see if already an IPv4/IPv6 address
address = netaddr.IPAddress(address)
return True
except (netaddr.AddrFormatError, ValueError):
return False
def ns_query(address):
try:
import dns.resolver
except ImportError:
if six.PY2:
apt_install('python-dnspython', fatal=True)
else:
apt_install('python3-dnspython', fatal=True)
import dns.resolver
if isinstance(address, dns.name.Name):
rtype = 'PTR'
elif isinstance(address, six.string_types):
rtype = 'A'
else:
return None
try:
answers = dns.resolver.query(address, rtype)
except dns.resolver.NXDOMAIN:
return None
if answers:
return str(answers[0])
return None
def get_host_ip(hostname, fallback=None):
"""
Resolves the IP for a given hostname, or returns
the input if it is already an IP.
"""
if is_ip(hostname):
return hostname
ip_addr = ns_query(hostname)
if not ip_addr:
try:
ip_addr = socket.gethostbyname(hostname)
except:
log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING)
return fallback
return ip_addr
def get_hostname(address, fqdn=True):
"""
Resolves hostname for given IP, or returns the input
if it is already a hostname.
"""
if is_ip(address):
try:
import dns.reversename
except ImportError:
if six.PY2:
apt_install("python-dnspython", fatal=True)
else:
apt_install("python3-dnspython", fatal=True)
import dns.reversename
rev = dns.reversename.from_address(address)
result = ns_query(rev)
if not result:
try:
result = socket.gethostbyaddr(address)[0]
except:
return None
else:
result = address
if fqdn:
# strip trailing .
if result.endswith('.'):
return result[:-1]
else:
return result
else:
return result.split('.')[0]
def port_has_listener(address, port):
"""
Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(release) < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
def get_relation_ip(interface, config_override=None):
"""Return this unit's IP for the given relation.
Allow for an arbitrary interface to use with network-get to select an IP.
Handle all address selection options including configuration parameter
override and IPv6.
Usage: get_relation_ip('amqp', config_override='access-network')
@param interface: string name of the relation.
@param config_override: string name of the config option for network
override. Supports legacy network override configuration parameters.
@raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
@returns IPv6 or IPv4 address
"""
fallback = get_host_ip(unit_get('private-address'))
if config('prefer-ipv6'):
assert_charm_supports_ipv6()
return get_ipv6_addr()[0]
elif config_override and config(config_override):
return get_address_in_network(config(config_override),
fallback)
else:
try:
return network_get_primary_address(interface)
except NotImplementedError:
return fallback

View File

@ -40,6 +40,7 @@ from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
ERROR = logging.ERROR
@ -1255,7 +1256,7 @@ class OpenStackAmuletUtils(AmuletUtils):
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
fatal=True)
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
if ubuntu_release <= 'trusty':
if CompareHostReleases(ubuntu_release) <= 'trusty':
memcache_listen_addr = 'ip6-localhost'
else:
memcache_listen_addr = '::1'

View File

@ -0,0 +1,21 @@
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OSContextError(Exception):
"""Raised when an error occurs during context generation.
This exception is principally used in contrib.openstack.context
"""
pass

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,54 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import atexit
import sys
from charmhelpers.contrib.python.rpdb import Rpdb
from charmhelpers.core.hookenv import (
open_port,
close_port,
ERROR,
log
)
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
DEFAULT_ADDR = "0.0.0.0"
DEFAULT_PORT = 4444
def _error(message):
log(message, level=ERROR)
def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
"""
Set a trace point using the remote debugger
"""
atexit.register(close_port, port)
try:
log("Starting a remote python debugger session on %s:%s" % (addr,
port))
open_port(port)
debugger = Rpdb(addr=addr, port=port)
debugger.set_trace(sys._getframe().f_back)
except:
_error("Cannot start a remote debug session on %s:%s" % (addr,
port))

View File

@ -0,0 +1,154 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import subprocess
import sys
from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import charm_dir, log
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def pip_execute(*args, **kwargs):
"""Overriden pip_execute() to stop sys.path being changed.
The act of importing main from the pip module seems to cause add wheels
from the /usr/share/python-wheels which are installed by various tools.
This function ensures that sys.path remains the same after the call is
executed.
"""
try:
_path = sys.path
try:
from pip import main as _pip_execute
except ImportError:
apt_update()
if six.PY2:
apt_install('python-pip')
else:
apt_install('python3-pip')
from pip import main as _pip_execute
_pip_execute(*args, **kwargs)
finally:
sys.path = _path
def parse_options(given, available):
"""Given a set of options, check if available"""
for key, value in sorted(given.items()):
if not value:
continue
if key in available:
yield "--{0}={1}".format(key, value)
def pip_install_requirements(requirements, constraints=None, **options):
"""Install a requirements file.
:param constraints: Path to pip constraints file.
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
"""
command = ["install"]
available_options = ('proxy', 'src', 'log', )
for option in parse_options(options, available_options):
command.append(option)
command.append("-r {0}".format(requirements))
if constraints:
command.append("-c {0}".format(constraints))
log("Installing from file: {} with constraints {} "
"and options: {}".format(requirements, constraints, command))
else:
log("Installing from file: {} with options: {}".format(requirements,
command))
pip_execute(command)
def pip_install(package, fatal=False, upgrade=False, venv=None,
constraints=None, **options):
"""Install a python package"""
if venv:
venv_python = os.path.join(venv, 'bin/pip')
command = [venv_python, "install"]
else:
command = ["install"]
available_options = ('proxy', 'src', 'log', 'index-url', )
for option in parse_options(options, available_options):
command.append(option)
if upgrade:
command.append('--upgrade')
if constraints:
command.extend(['-c', constraints])
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Installing {} package with options: {}".format(package,
command))
if venv:
subprocess.check_call(command)
else:
pip_execute(command)
def pip_uninstall(package, **options):
"""Uninstall a python package"""
command = ["uninstall", "-q", "-y"]
available_options = ('proxy', 'log', )
for option in parse_options(options, available_options):
command.append(option)
if isinstance(package, list):
command.extend(package)
else:
command.append(package)
log("Uninstalling {} package with options: {}".format(package,
command))
pip_execute(command)
def pip_list():
"""Returns the list of current python installed packages
"""
return pip_execute(["list"])
def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
if six.PY2:
apt_install('python-virtualenv')
else:
apt_install('python3-virtualenv')
if path:
venv_path = path
else:
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])

View File

@ -0,0 +1,56 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote Python Debugger (pdb wrapper)."""
import pdb
import socket
import sys
__author__ = "Bertrand Janin <b@janin.com>"
__version__ = "0.1.3"
class Rpdb(pdb.Pdb):
def __init__(self, addr="127.0.0.1", port=4444):
"""Initialize the socket and initialize pdb."""
# Backup stdin and stdout before replacing them by the socket handle
self.old_stdout = sys.stdout
self.old_stdin = sys.stdin
# Open a 'reusable' socket to let the webapp reload on the same port
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.skt.bind((addr, port))
self.skt.listen(1)
(clientsocket, address) = self.skt.accept()
handle = clientsocket.makefile('rw')
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
sys.stdout = sys.stdin = handle
def shutdown(self):
"""Revert stdin and stdout, close the socket."""
sys.stdout = self.old_stdout
sys.stdin = self.old_stdin
self.skt.close()
self.set_continue()
def do_continue(self, arg):
"""Stop all operation on ``continue``."""
self.shutdown()
return 1
do_EOF = do_quit = do_exit = do_c = do_cont = do_continue

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def current_version():
"""Current system python version"""
return sys.version_info
def current_version_string():
"""Current system python version as string major.minor.micro"""
return "{0}.{1}.{2}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,86 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from subprocess import (
check_call,
check_output,
)
import six
##################################################
# loopback device helpers.
##################################################
def loopback_devices():
'''
Parse through 'losetup -a' output to determine currently mapped
loopback devices. Output is expected to look like:
/dev/loop0: [0807]:961814 (/tmp/my.img)
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
cmd = ['losetup', '-a']
devs = [d.strip().split(' ') for d in
check_output(cmd).splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
return loopbacks
def create_loopback(file_path):
'''
Create a loopback device for a given backing file.
:returns: str: Full path to new loopback device (eg, /dev/loop0)
'''
file_path = os.path.abspath(file_path)
check_call(['losetup', '--find', file_path])
for d, f in six.iteritems(loopback_devices()):
if f == file_path:
return d
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path)
def is_mapped_loopback_device(device):
"""
Checks if a given device name is an existing/mapped loopback device.
:param device: str: Full path to the device (eg, /dev/loop1).
:returns: str: Path to the backing file if is a loopback device
empty string otherwise
"""
return loopback_devices().get(device, "")

View File

@ -0,0 +1,103 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import (
CalledProcessError,
check_call,
check_output,
Popen,
PIPE,
)
##################################################
# LVM helpers.
##################################################
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd)
def is_lvm_physical_volume(block_device):
'''
Determine whether a block device is initialized as an LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: boolean: True if block device is a PV, False if not.
'''
try:
check_output(['pvdisplay', block_device])
return True
except CalledProcessError:
return False
def remove_lvm_physical_volume(block_device):
'''
Remove LVM PV signatures from a given block device.
:param block_device: str: Full path of block device to scrub.
'''
p = Popen(['pvremove', '-ff', block_device],
stdin=PIPE)
p.communicate(input='y\n')
def list_lvm_volume_group(block_device):
'''
List LVM volume group associated with a given block device.
Assumes block device is a valid LVM PV.
:param block_device: str: Full path of block device to inspect.
:returns: str: Name of volume group associated with block device or None
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
for l in pvd:
l = l.decode('UTF-8')
if l.strip().startswith('VG Name'):
vg = ' '.join(l.strip().split()[2:])
return vg
def create_lvm_physical_volume(block_device):
'''
Initialize a block device as an LVM physical volume.
:param block_device: str: Full path of block device to initialize.
'''
check_call(['pvcreate', block_device])
def create_lvm_volume_group(volume_group, block_device):
'''
Create an LVM volume group backed by a given block device.
Assumes block device has already been initialized as an LVM PV.
:param volume_group: str: Name of volume group to create.
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])

View File

@ -0,0 +1,69 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from stat import S_ISBLK
from subprocess import (
check_call,
check_output,
call
)
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
:returns: boolean: True if path is a block device, False if not.
'''
if not os.path.exists(path):
return False
return S_ISBLK(os.stat(path).st_mode)
def zap_disk(block_device):
'''
Clear a block device of partition table. Relies on sgdisk, which is
installed as pat of the 'gdisk' package in Ubuntu.
:param block_device: str: Full path of block device to clean.
'''
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
call(['sgdisk', '--zap-all', '--', block_device])
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
dev_end = check_output(['blockdev', '--getsz',
block_device]).decode('UTF-8')
gpt_end = int(dev_end.split()[0]) - 100
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=1M', 'count=1'])
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
def is_device_mounted(device):
'''Given a device path, return True if that device is mounted, and False
if it isn't.
:param device: str: Full path of the device to check.
:returns: boolean: True if the path represents a mounted device, False if
it doesn't.
'''
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
except:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))

View File

@ -45,6 +45,7 @@ if __platform__ == "ubuntu":
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
@ -52,6 +53,7 @@ elif __platform__ == "centos":
add_new_group,
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
) # flake8: noqa -- ignore F401 for this import
UPDATEDB_PATH = '/etc/updatedb.conf'

View File

@ -2,6 +2,22 @@ import subprocess
import yum
import os
from charmhelpers.core.strutils import BasicStringComparator
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Host releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
def __init__(self, item):
raise NotImplementedError(
"CompareHostReleases() is not implemented for CentOS")
def service_available(service_name):
# """Determine whether a system service is available."""

View File

@ -1,5 +1,37 @@
import subprocess
from charmhelpers.core.strutils import BasicStringComparator
UBUNTU_RELEASES = (
'lucid',
'maverick',
'natty',
'oneiric',
'precise',
'quantal',
'raring',
'saucy',
'trusty',
'utopic',
'vivid',
'wily',
'xenial',
'yakkety',
'zesty',
)
class CompareHostReleases(BasicStringComparator):
"""Provide comparisons of Ubuntu releases.
Use in the form of
if CompareHostReleases(release) > 'trusty':
# do something with mitaka
"""
_list = UBUNTU_RELEASES
def service_available(service_name):
"""Determine whether a system service is available"""

View File

@ -68,3 +68,56 @@ def bytes_from_string(value):
msg = "Unable to interpret string value '%s' as bytes" % (value)
raise ValueError(msg)
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
class BasicStringComparator(object):
"""Provides a class that will compare strings from an iterator type object.
Used to provide > and < comparisons on strings that may not necessarily be
alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
z-wrap.
"""
_list = None
def __init__(self, item):
if self._list is None:
raise Exception("Must define the _list in the class definition!")
try:
self.index = self._list.index(item)
except Exception:
raise KeyError("Item '{}' is not in list '{}'"
.format(item, self._list))
def __eq__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index == self._list.index(other)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index < self._list.index(other)
def __ge__(self, other):
return not self.__lt__(other)
def __gt__(self, other):
assert isinstance(other, str) or isinstance(other, self.__class__)
return self.index > self._list.index(other)
def __le__(self, other):
return not self.__gt__(other)
def __str__(self):
"""Always give back the item at the index so it can be used in
comparisons like:
s_mitaka = CompareOpenStack('mitaka')
s_newton = CompareOpenstack('newton')
assert s_newton > s_mitaka
@returns: <string>
"""
return self._list[self.index]

View File

@ -0,0 +1,197 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from charmhelpers.osplatform import get_platform
from yaml import safe_load
from charmhelpers.core.hookenv import (
config,
log,
)
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
class SourceConfigError(Exception):
pass
class UnhandledSource(Exception):
pass
class AptLockError(Exception):
pass
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
__platform__ = get_platform()
module = "charmhelpers.fetch.%s" % __platform__
fetch = importlib.import_module(module)
filter_installed_packages = fetch.filter_installed_packages
install = fetch.install
upgrade = fetch.upgrade
update = fetch.update
purge = fetch.purge
add_source = fetch.add_source
if __platform__ == "ubuntu":
apt_cache = fetch.apt_cache
apt_install = fetch.install
apt_update = fetch.update
apt_upgrade = fetch.upgrade
apt_purge = fetch.purge
apt_mark = fetch.apt_mark
apt_hold = fetch.apt_hold
apt_unhold = fetch.apt_unhold
get_upstream_version = fetch.get_upstream_version
elif __platform__ == "centos":
yum_search = fetch.yum_search
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The fragment needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, six.string_types):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, six.string_types):
keys = [keys]
if len(sources) != len(keys):
raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update:
fetch.update(fatal=True)
def install_remote(source, *args, **kwargs):
"""Install a file tree from a remote source.
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules.
Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
for handler in handlers:
try:
return handler.install(source, *args, **kwargs)
except UnhandledSource as e:
log('Install source attempt unsuccessful: {}'.format(e),
level='WARNING')
raise UnhandledSource("No handler found for source {}".format(source))
def install_from_config(config_var_name):
"""Install a file from config."""
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except NotImplementedError:
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list

View File

@ -0,0 +1,165 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import hashlib
import re
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource
)
from charmhelpers.payload.archive import (
get_archive_handler,
extract,
)
from charmhelpers.core.host import mkdir, check_hash
import six
if six.PY3:
from urllib.request import (
build_opener, install_opener, urlopen, urlretrieve,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
)
from urllib.parse import urlparse, urlunparse, parse_qs
from urllib.error import URLError
else:
from urllib import urlretrieve
from urllib2 import (
build_opener, install_opener, urlopen,
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
URLError
)
from urlparse import urlparse, urlunparse, parse_qs
def splituser(host):
'''urllib.splituser(), but six's support of this seems broken'''
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match:
return match.group(1, 2)
return None, host
def splitpasswd(user):
'''urllib.splitpasswd(), but six's support of this is missing'''
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
match = _passwdprog.match(user)
if match:
return match.group(1, 2)
return user, None
class ArchiveUrlFetchHandler(BaseFetchHandler):
"""
Handler to download archive files from arbitrary URLs.
Can fetch from http, https, ftp, and file URLs.
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
Installs the contents of the archive in $CHARM_DIR/fetched/.
"""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
# XXX: Why is this returning a boolean and a string? It's
# doomed to fail since "bool(can_handle('foo://'))" will be True.
return "Wrong source type"
if get_archive_handler(self.base_url(source)):
return True
return False
def download(self, source, dest):
"""
Download an archive file.
:param str source: URL pointing to an archive file.
:param str dest: Local path location to download archive file to.
"""
# propogate all exceptions
# URLError, OSError, etc
proto, netloc, path, params, query, fragment = urlparse(source)
if proto in ('http', 'https'):
auth, barehost = splituser(netloc)
if auth is not None:
source = urlunparse((proto, barehost, path, params, query, fragment))
username, password = splitpasswd(auth)
passman = HTTPPasswordMgrWithDefaultRealm()
# Realm is set to None in add_password to force the username and password
# to be used whatever the realm
passman.add_password(None, source, username, password)
authhandler = HTTPBasicAuthHandler(passman)
opener = build_opener(authhandler)
install_opener(opener)
response = urlopen(source)
try:
with open(dest, 'wb') as dest_file:
dest_file.write(response.read())
except Exception as e:
if os.path.isfile(dest):
os.unlink(dest)
raise e
# Mandatory file validation via Sha1 or MD5 hashing.
def download_and_validate(self, url, hashsum, validate="sha1"):
tempfile, headers = urlretrieve(url)
check_hash(tempfile, hashsum, validate)
return tempfile
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
"""
Download and install an archive file, with optional checksum validation.
The checksum can also be given on the `source` URL's fragment.
For example::
handler.install('http://example.com/file.tgz#sha1=deadbeef')
:param str source: URL pointing to an archive file.
:param str dest: Local destination path to install to. If not given,
installs to `$CHARM_DIR/archives/archive_file_name`.
:param str checksum: If given, validate the archive file after download.
:param str hash_type: Algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
url_parts = self.parse_url(source)
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0o755)
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
try:
self.download(source, dld_file)
except URLError as e:
raise UnhandledSource(e.reason)
except OSError as e:
raise UnhandledSource(e.strerror)
options = parse_qs(url_parts.fragment)
for key, value in options.items():
if not six.PY3:
algorithms = hashlib.algorithms
else:
algorithms = hashlib.algorithms_available
if key in algorithms:
if len(value) != 1:
raise TypeError(
"Expected 1 hash value, not %d" % len(value))
expected = value[0]
check_hash(dld_file, expected, key)
if checksum:
check_hash(dld_file, checksum, hash_type)
return extract(dld_file, dest)

View File

@ -0,0 +1,76 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import check_call
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
install,
)
from charmhelpers.core.host import mkdir
if filter_installed_packages(['bzr']) != []:
install(['bzr'])
if filter_installed_packages(['bzr']) != []:
raise NotImplementedError('Unable to install bzr')
class BzrUrlFetchHandler(BaseFetchHandler):
"""Handler for bazaar branches via generic and lp URLs."""
def can_handle(self, source):
url_parts = self.parse_url(source)
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.bzr'))
else:
return True
def branch(self, source, dest, revno=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
cmd_opts = []
if revno:
cmd_opts += ['-r', str(revno)]
if os.path.exists(dest):
cmd = ['bzr', 'pull']
cmd += cmd_opts
cmd += ['--overwrite', '-d', dest, source]
else:
cmd = ['bzr', 'branch']
cmd += cmd_opts
cmd += [source, dest]
check_call(cmd)
def install(self, source, dest=None, revno=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
if dest and not os.path.exists(dest):
mkdir(dest, perms=0o755)
try:
self.branch(source, dest_dir, revno)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -0,0 +1,171 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import time
import six
import yum
from tempfile import NamedTemporaryFile
from charmhelpers.core.hookenv import log
YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM.
YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
def filter_installed_packages(packages):
"""Return a list of packages that require installation."""
yb = yum.YumBase()
package_list = yb.doPackageLists()
temp_cache = {p.base_package_name: 1 for p in package_list['installed']}
_pkgs = [p for p in packages if not temp_cache.get(p, False)]
return _pkgs
def install(packages, options=None, fatal=False):
"""Install one or more packages."""
cmd = ['yum', '--assumeyes']
if options is not None:
cmd.extend(options)
cmd.append('install')
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
_run_yum_command(cmd, fatal)
def upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages."""
cmd = ['yum', '--assumeyes']
if options is not None:
cmd.extend(options)
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
_run_yum_command(cmd, fatal)
def update(fatal=False):
"""Update local yum cache."""
cmd = ['yum', '--assumeyes', 'update']
log("Update with fatal: {}".format(fatal))
_run_yum_command(cmd, fatal)
def purge(packages, fatal=False):
"""Purge one or more packages."""
cmd = ['yum', '--assumeyes', 'remove']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
_run_yum_command(cmd, fatal)
def yum_search(packages):
"""Search for a package."""
output = {}
cmd = ['yum', 'search']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Searching for {}".format(packages))
result = subprocess.check_output(cmd)
for package in list(packages):
output[package] = package in result
return output
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL with a rpm package
@param key: A key to be added to the system's keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk.
"""
if source is None:
log('Source is not present. Skipping')
return
if source.startswith('http'):
directory = '/etc/yum.repos.d/'
for filename in os.listdir(directory):
with open(directory + filename, 'r') as rpm_file:
if source in rpm_file.read():
break
else:
log("Add source: {!r}".format(source))
# write in the charms.repo
with open(directory + 'Charms.repo', 'a') as rpm_file:
rpm_file.write('[%s]\n' % source[7:].replace('/', '_'))
rpm_file.write('name=%s\n' % source[7:])
rpm_file.write('baseurl=%s\n\n' % source)
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['rpm', '--import', key_file])
else:
subprocess.check_call(['rpm', '--import', key])
def _run_yum_command(cmd, fatal=False):
"""Run an YUM command.
Checks the output and retry if the fatal flag is set to True.
:param: cmd: str: The yum command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the yum
# lock was not acquired.
while result is None or result == YUM_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > YUM_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire YUM lock. Will retry in {} seconds."
"".format(YUM_NO_LOCK_RETRY_DELAY))
time.sleep(YUM_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env)

View File

@ -0,0 +1,69 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subprocess import check_call, CalledProcessError
from charmhelpers.fetch import (
BaseFetchHandler,
UnhandledSource,
filter_installed_packages,
install,
)
if filter_installed_packages(['git']) != []:
install(['git'])
if filter_installed_packages(['git']) != []:
raise NotImplementedError('Unable to install git')
class GitUrlFetchHandler(BaseFetchHandler):
"""Handler for git branches via generic and github URLs."""
def can_handle(self, source):
url_parts = self.parse_url(source)
# TODO (mattyw) no support for ssh git@ yet
if url_parts.scheme not in ('http', 'https', 'git', ''):
return False
elif not url_parts.scheme:
return os.path.exists(os.path.join(source, '.git'))
else:
return True
def clone(self, source, dest, branch="master", depth=None):
if not self.can_handle(source):
raise UnhandledSource("Cannot handle {}".format(source))
if os.path.exists(dest):
cmd = ['git', '-C', dest, 'pull', source, branch]
else:
cmd = ['git', 'clone', source, dest, '--branch', branch]
if depth:
cmd.extend(['--depth', depth])
check_call(cmd)
def install(self, source, branch="master", dest=None, depth=None):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
if dest:
dest_dir = os.path.join(dest, branch_name)
else:
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
branch_name)
try:
self.clone(source, dest_dir, branch, depth)
except CalledProcessError as e:
raise UnhandledSource(e)
except OSError as e:
raise UnhandledSource(e.strerror)
return dest_dir

View File

@ -0,0 +1,122 @@
# Copyright 2014-2017 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Charm helpers snap for classic charms.
If writing reactive charms, use the snap layer:
https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html
"""
import subprocess
from os import environ
from time import sleep
from charmhelpers.core.hookenv import log
__author__ = 'Joseph Borg <joseph.borg@canonical.com>'
SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved).
SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks.
SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
class CouldNotAcquireLockException(Exception):
pass
def _snap_exec(commands):
"""
Execute snap commands.
:param commands: List commands
:return: Integer exit code
"""
assert type(commands) == list
retry_count = 0
return_code = None
while return_code is None or return_code == SNAP_NO_LOCK:
try:
return_code = subprocess.check_call(['snap'] + commands, env=environ)
except subprocess.CalledProcessError as e:
retry_count += + 1
if retry_count > SNAP_NO_LOCK_RETRY_COUNT:
raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT)
return_code = e.returncode
log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN')
sleep(SNAP_NO_LOCK_RETRY_DELAY)
return return_code
def snap_install(packages, *flags):
"""
Install a snap package.
:param packages: String or List String package name
:param flags: List String flags to pass to install command
:return: Integer return code from snap
"""
if type(packages) is not list:
packages = [packages]
flags = list(flags)
message = 'Installing snap(s) "%s"' % ', '.join(packages)
if flags:
message += ' with option(s) "%s"' % ', '.join(flags)
log(message, level='INFO')
return _snap_exec(['install'] + flags + packages)
def snap_remove(packages, *flags):
"""
Remove a snap package.
:param packages: String or List String package name
:param flags: List String flags to pass to remove command
:return: Integer return code from snap
"""
if type(packages) is not list:
packages = [packages]
flags = list(flags)
message = 'Removing snap(s) "%s"' % ', '.join(packages)
if flags:
message += ' with options "%s"' % ', '.join(flags)
log(message, level='INFO')
return _snap_exec(['remove'] + flags + packages)
def snap_refresh(packages, *flags):
"""
Refresh / Update snap package.
:param packages: String or List String package name
:param flags: List String flags to pass to refresh command
:return: Integer return code from snap
"""
if type(packages) is not list:
packages = [packages]
flags = list(flags)
message = 'Refreshing snap(s) "%s"' % ', '.join(packages)
if flags:
message += ' with options "%s"' % ', '.join(flags)
log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages)

View File

@ -0,0 +1,364 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import time
import subprocess
from tempfile import NamedTemporaryFile
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.core.hookenv import log
from charmhelpers.fetch import SourceConfigError
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
# Juno
'juno': 'trusty-updates/juno',
'trusty-juno': 'trusty-updates/juno',
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
# Kilo
'kilo': 'trusty-updates/kilo',
'trusty-kilo': 'trusty-updates/kilo',
'trusty-kilo/updates': 'trusty-updates/kilo',
'trusty-updates/kilo': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
# Liberty
'liberty': 'trusty-updates/liberty',
'trusty-liberty': 'trusty-updates/liberty',
'trusty-liberty/updates': 'trusty-updates/liberty',
'trusty-updates/liberty': 'trusty-updates/liberty',
'liberty/proposed': 'trusty-proposed/liberty',
'trusty-liberty/proposed': 'trusty-proposed/liberty',
'trusty-proposed/liberty': 'trusty-proposed/liberty',
# Mitaka
'mitaka': 'trusty-updates/mitaka',
'trusty-mitaka': 'trusty-updates/mitaka',
'trusty-mitaka/updates': 'trusty-updates/mitaka',
'trusty-updates/mitaka': 'trusty-updates/mitaka',
'mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
# Newton
'newton': 'xenial-updates/newton',
'xenial-newton': 'xenial-updates/newton',
'xenial-newton/updates': 'xenial-updates/newton',
'xenial-updates/newton': 'xenial-updates/newton',
'newton/proposed': 'xenial-proposed/newton',
'xenial-newton/proposed': 'xenial-proposed/newton',
'xenial-proposed/newton': 'xenial-proposed/newton',
# Ocata
'ocata': 'xenial-updates/ocata',
'xenial-ocata': 'xenial-updates/ocata',
'xenial-ocata/updates': 'xenial-updates/ocata',
'xenial-updates/ocata': 'xenial-updates/ocata',
'ocata/proposed': 'xenial-proposed/ocata',
'xenial-ocata/proposed': 'xenial-proposed/ocata',
'xenial-ocata/newton': 'xenial-proposed/ocata',
}
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times.
def filter_installed_packages(packages):
"""Return a list of packages that require installation."""
cache = apt_cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_cache(in_memory=True, progress=None):
"""Build and return an apt cache."""
from apt import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
return apt_pkg.Cache(progress)
def install(packages, options=None, fatal=False):
"""Install one or more packages."""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
_run_apt_command(cmd, fatal)
def upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages."""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
_run_apt_command(cmd, fatal)
def update(fatal=False):
"""Update local apt cache."""
cmd = ['apt-get', 'update']
_run_apt_command(cmd, fatal)
def purge(packages, fatal=False):
"""Purge one or more packages."""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
_run_apt_command(cmd, fatal)
def apt_mark(packages, mark, fatal=False):
"""Flag one or more packages using apt-mark."""
log("Marking {} as {}".format(packages, mark))
cmd = ['apt-mark', mark]
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
if fatal:
subprocess.check_call(cmd, universal_newlines=True)
else:
subprocess.call(cmd, universal_newlines=True)
def apt_hold(packages, fatal=False):
return apt_mark(packages, 'hold', fatal=fatal)
def apt_unhold(packages, fatal=False):
return apt_mark(packages, 'unhold', fatal=fatal)
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples::
ppa:charmers/example
deb https://stub:key@private.example.com/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
cmd = ['add-apt-repository', '--yes', source]
_run_with_retries(cmd)
elif source.startswith('cloud:'):
install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None):
"""Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal
command. Defaults to CMD_RETRY_COUNT.
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
Defaults to retry on exit code 1.
:param: retry_message: str: Optional log prefix emitted during retries.
:param: cmd_env: dict: Environment variables to add to the command run.
"""
env = os.environ.copy()
if cmd_env:
env.update(cmd_env)
if not retry_message:
retry_message = "Failed executing '{}'".format(" ".join(cmd))
retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
retry_count = 0
result = None
retry_results = (None,) + retry_exitcodes
while result in retry_results:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
raise
result = e.returncode
log(retry_message)
time.sleep(CMD_RETRY_DELAY)
def _run_apt_command(cmd, fatal=False):
"""Run an apt command with optional retries.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
# Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
cmd_env = {
'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
if fatal:
_run_with_retries(
cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
retry_message="Couldn't acquire DPKG lock")
else:
env = os.environ.copy()
env.update(cmd_env)
subprocess.call(cmd, env=env)
def get_upstream_version(package):
"""Determine upstream version based on installed package
@returns None (if not installed) or the upstream version
"""
import apt_pkg
cache = apt_cache()
try:
pkg = cache[package]
except:
# the package is unknown to the current apt cache.
return None
if not pkg.current_ver:
# package is known, but no version is currently installed.
return None
return apt_pkg.upstream_version(pkg.current_ver.ver_str)

View File

@ -0,0 +1,25 @@
import platform
def get_platform():
"""Return the current OS platform.
For example: if current os platform is Ubuntu then a string "ubuntu"
will be returned (which is the name of the module).
This string is used to decide which platform module should be imported.
"""
# linux_distribution is deprecated and will be removed in Python 3.7
# Warings *not* disabled, as we certainly need to fix this.
tuple_platform = platform.linux_distribution()
current_platform = tuple_platform[0]
if "Ubuntu" in current_platform:
return "ubuntu"
elif "CentOS" in current_platform:
return "centos"
elif "debian" in current_platform:
# Stock Python does not detect Ubuntu and instead returns debian.
# Or at least it does in some build environments like Travis CI
return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))

View File

@ -14,7 +14,7 @@ install_command =
pip install --allow-unverified python-apt {opts} {packages}
commands = ostestr {posargs}
whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_API_URL
passenv = HOME TERM AMULET_* CS_API_*
[testenv:py27]
basepython = python2.7

View File

@ -209,6 +209,7 @@ class TestNeutronUtils(CharmTestCase):
git_requested.return_value = False
self.config.return_value = 'ovs'
self.get_os_codename_install_source.return_value = 'juno'
self.os_release.return_value = 'juno'
self.assertTrue('keepalived' in neutron_utils.get_packages())
@patch('charmhelpers.contrib.openstack.context.config')
@ -296,6 +297,7 @@ class TestNeutronUtils(CharmTestCase):
self.test_config.set('openstack-origin', 'cloud:precise-havana')
self.test_config.set('plugin', 'ovs')
self.get_os_codename_install_source.return_value = 'havana'
self.os_release.return_value = 'havana'
configs = neutron_utils.register_configs()
neutron_utils.do_openstack_upgrade(configs)
self.assertTrue(self.log.called)
@ -314,6 +316,7 @@ class TestNeutronUtils(CharmTestCase):
@patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
def test_register_configs_ovs(self, mock_renderer):
self.config.return_value = 'ovs'
self.os_release.return_value = 'diablo'
self.is_relation_made.return_value = False
configs = neutron_utils.register_configs()
confs = [neutron_utils.NEUTRON_DHCP_AGENT_CONF,
@ -332,6 +335,7 @@ class TestNeutronUtils(CharmTestCase):
self.test_config.set('plugin', 'ovs-odl')
self.is_relation_made.return_value = False
self.get_os_codename_install_source.return_value = 'icehouse'
self.os_release.return_value = 'icehouse'
configs = neutron_utils.register_configs()
confs = [neutron_utils.NEUTRON_DHCP_AGENT_CONF,
neutron_utils.NEUTRON_METADATA_AGENT_CONF,
@ -346,6 +350,7 @@ class TestNeutronUtils(CharmTestCase):
def test_register_configs_amqp_nova(self, mock_renderer):
self.config.return_value = 'ovs'
self.is_relation_made.return_value = True
self.os_release.return_value = 'diablo'
configs = neutron_utils.register_configs()
confs = [neutron_utils.NEUTRON_DHCP_AGENT_CONF,
neutron_utils.NEUTRON_METADATA_AGENT_CONF,
@ -482,6 +487,7 @@ class TestNeutronUtils(CharmTestCase):
self.config.return_value = 'ovs'
# No VPN agent after trusty
mock_get_packages.return_value = ['neutron-l3-agent']
self.os_release.return_value = 'diablo'
rmap = neutron_utils.restart_map()
for services in rmap.itervalues():
self.assertFalse('neutron-vpn-agent' in services)
@ -567,6 +573,7 @@ class TestNeutronUtils(CharmTestCase):
@patch('charmhelpers.contrib.openstack.templating.OSConfigRenderer')
def test_register_configs_nsx(self, mock_renderer):
self.config.return_value = 'nsx'
self.os_release.return_value = 'diablo'
configs = neutron_utils.register_configs()
confs = [neutron_utils.NEUTRON_DHCP_AGENT_CONF,
neutron_utils.NEUTRON_METADATA_AGENT_CONF,
@ -577,6 +584,7 @@ class TestNeutronUtils(CharmTestCase):
def test_stop_services_ovs(self):
self.config.return_value = 'ovs'
self.os_release.return_value = 'diablo'
neutron_utils.stop_services()
calls = [call('neutron-dhcp-agent'),
call('neutron-plugin-openvswitch-agent'),
@ -592,6 +600,7 @@ class TestNeutronUtils(CharmTestCase):
def test_register_configs_pre_install(self, mock_renderer):
self.config.return_value = 'ovs'
self.is_relation_made.return_value = False
self.os_release.return_value = 'diablo'
configs = neutron_utils.register_configs()
confs = [neutron_utils.NOVA_CONF,
neutron_utils.NEUTRON_CONF,
@ -1059,7 +1068,8 @@ class TestNeutronAgentReallocation(CharmTestCase):
symlink, exists, join, remove):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04'}
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.04',
'DISTRIB_CODENAME': 'vivid'}
self.os_release.return_value = 'liberty'
neutron_utils.git_post_install(projects_yaml)
expected = [
@ -1384,7 +1394,8 @@ class TestNeutronAgentReallocation(CharmTestCase):
symlink, exists, join, remove, listdir):
projects_yaml = openstack_origin_git
join.return_value = 'joined-string'
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.10'}
self.lsb_release.return_value = {'DISTRIB_RELEASE': '15.10',
'DISTRIB_CODENAME': 'wily'}
self.os_release.return_value = 'newton'
neutron_utils.git_post_install(projects_yaml)